kernel: Make SMP support default (and non-optional).
[dragonfly.git] / sys / cpu / i386 / include / cpufunc.h
... / ...
CommitLineData
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/i386/include/cpufunc.h,v 1.96.2.3 2002/04/28 22:50:54 dwmalone Exp $
34 */
35
36/*
37 * Functions to provide access to special i386 instructions.
38 */
39
40#ifndef _CPU_CPUFUNC_H_
41#define _CPU_CPUFUNC_H_
42
43#ifndef _SYS_TYPES_H_
44#include <sys/types.h>
45#endif
46#ifndef _SYS_CDEFS_H_
47#include <sys/cdefs.h>
48#endif
49
50__BEGIN_DECLS
51#define readb(va) (*(volatile u_int8_t *) (va))
52#define readw(va) (*(volatile u_int16_t *) (va))
53#define readl(va) (*(volatile u_int32_t *) (va))
54
55#define writeb(va, d) (*(volatile u_int8_t *) (va) = (d))
56#define writew(va, d) (*(volatile u_int16_t *) (va) = (d))
57#define writel(va, d) (*(volatile u_int32_t *) (va) = (d))
58
59#ifdef __GNUC__
60
61#include <machine/lock.h> /* XXX */
62
63#ifdef SWTCH_OPTIM_STATS
64extern int tlb_flush_count; /* XXX */
65#endif
66
67static __inline void
68breakpoint(void)
69{
70 __asm __volatile("int $3");
71}
72
73static __inline void
74cpu_pause(void)
75{
76 __asm __volatile("pause");
77}
78
79/*
80 * Find the first 1 in mask, starting with bit 0 and return the
81 * bit number. If mask is 0 the result is undefined.
82 */
83static __inline u_int
84bsfl(u_int mask)
85{
86 u_int result;
87
88 __asm __volatile("bsfl %0,%0" : "=r" (result) : "0" (mask));
89 return (result);
90}
91
92static __inline u_int
93bsflong(u_long mask)
94{
95 u_long result;
96
97 __asm __volatile("bsfl %0,%0" : "=r" (result) : "0" (mask));
98 return (result);
99}
100
101/*
102 * Find the last 1 in mask, starting with bit 31 and return the
103 * bit number. If mask is 0 the result is undefined.
104 */
105static __inline u_int
106bsrl(u_int mask)
107{
108 u_int result;
109
110 __asm __volatile("bsrl %0,%0" : "=r" (result) : "0" (mask));
111 return (result);
112}
113
114/*
115 * Test and set the specified bit (1 << bit) in the integer. The
116 * previous value of the bit is returned (0 or 1).
117 */
118static __inline int
119btsl(u_int *mask, int bit)
120{
121 int result;
122
123 __asm __volatile("btsl %2,%1; movl $0,%0; adcl $0,%0" :
124 "=r"(result), "=m"(*mask) : "r" (bit));
125 return(result);
126}
127
128/*
129 * Test and clear the specified bit (1 << bit) in the integer. The
130 * previous value of the bit is returned (0 or 1).
131 */
132static __inline int
133btrl(u_int *mask, int bit)
134{
135 int result;
136
137 __asm __volatile("btrl %2,%1; movl $0,%0; adcl $0,%0" :
138 "=r"(result), "=m"(*mask) : "r" (bit));
139 return(result);
140}
141
142static __inline void
143clflush(u_long addr)
144{
145 __asm __volatile("clflush %0" : : "m" (*(char *) addr));
146}
147
148static __inline void
149do_cpuid(u_int ax, u_int *p)
150{
151 __asm __volatile("cpuid"
152 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
153 : "0" (ax));
154}
155
156static __inline void
157cpuid_count(u_int ax, u_int cx, u_int *p)
158{
159 __asm __volatile("cpuid"
160 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
161 : "0" (ax), "c" (cx));
162}
163
164#ifndef _CPU_DISABLE_INTR_DEFINED
165
166static __inline void
167cpu_disable_intr(void)
168{
169 __asm __volatile("cli" : : : "memory");
170}
171
172#endif
173
174#ifndef _CPU_ENABLE_INTR_DEFINED
175
176static __inline void
177cpu_enable_intr(void)
178{
179 __asm __volatile("sti");
180}
181
182#endif
183
184/*
185 * Cpu and compiler memory ordering fence. mfence ensures strong read and
186 * write ordering.
187 *
188 * A serializing or fence instruction is required here. A locked bus
189 * cycle on data for which we already own cache mastership is the most
190 * portable.
191 */
192static __inline void
193cpu_mfence(void)
194{
195#ifdef CPU_HAS_SSE2
196 __asm __volatile("mfence" : : : "memory");
197#else
198 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory");
199#endif
200}
201
202/*
203 * cpu_lfence() ensures strong read ordering for reads issued prior
204 * to the instruction verses reads issued afterwords.
205 *
206 * A serializing or fence instruction is required here. A locked bus
207 * cycle on data for which we already own cache mastership is the most
208 * portable.
209 */
210static __inline void
211cpu_lfence(void)
212{
213#ifdef CPU_HAS_SSE2
214 __asm __volatile("lfence" : : : "memory");
215#else
216 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory");
217#endif
218}
219
220/*
221 * cpu_sfence() ensures strong write ordering for writes issued prior
222 * to the instruction verses writes issued afterwords. Writes are
223 * ordered on intel cpus so we do not actually have to do anything.
224 */
225static __inline void
226cpu_sfence(void)
227{
228 /*
229 * NOTE:
230 * Don't use 'sfence' here, as it will create a lot of
231 * unnecessary stalls.
232 */
233 __asm __volatile("" : : : "memory");
234}
235
236/*
237 * cpu_ccfence() prevents the compiler from reordering instructions, in
238 * particular stores, relative to the current cpu. Use cpu_sfence() if
239 * you need to guarentee ordering by both the compiler and by the cpu.
240 *
241 * This also prevents the compiler from caching memory loads into local
242 * variables across the routine.
243 */
244static __inline void
245cpu_ccfence(void)
246{
247 __asm __volatile("" : : : "memory");
248}
249
250/*
251 * This is a horrible, horrible hack that might have to be put at the
252 * end of certain procedures (on a case by case basis), just before it
253 * returns to avoid what we believe to be an unreported AMD cpu bug.
254 * Found to occur on both a Phenom II X4 820 (two of them), as well
255 * as a 48-core built around an Opteron 6168 (Id = 0x100f91 Stepping = 1).
256 * The problem does not appear to occur w/Intel cpus.
257 *
258 * The bug is likely related to either a write combining issue or the
259 * Return Address Stack (RAS) hardware cache.
260 *
261 * In particular, we had to do this for GCC's fill_sons_in_loop() routine
262 * which due to its deep recursion and stack flow appears to be able to
263 * tickle the amd cpu bug (w/ gcc-4.4.7). Adding a single 'nop' to the
264 * end of the routine just before it returns works around the bug.
265 *
266 * The bug appears to be extremely sensitive to %rip and %rsp values, to
267 * the point where even just inserting an instruction in an unrelated
268 * procedure (shifting the entire code base being run) effects the outcome.
269 * DragonFly is probably able to more readily reproduce the bug due to
270 * the stackgap randomization code. We would expect OpenBSD (where we got
271 * the stackgap randomization code from) to also be able to reproduce the
272 * issue. To date we have only reproduced the issue in DragonFly.
273 */
274#define __AMDCPUBUG_DFLY01_AVAILABLE__
275
276static __inline void
277cpu_amdcpubug_dfly01(void)
278{
279 __asm __volatile("nop" : : : "memory");
280}
281
282#ifdef _KERNEL
283
284#define HAVE_INLINE_FFS
285
286static __inline int
287ffs(int mask)
288{
289 /*
290 * Note that gcc-2's builtin ffs would be used if we didn't declare
291 * this inline or turn off the builtin. The builtin is faster but
292 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
293 * versions.
294 */
295 return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
296}
297
298#define HAVE_INLINE_FLS
299
300static __inline int
301fls(int mask)
302{
303 return (mask == 0 ? mask : (int) bsrl((u_int)mask) + 1);
304}
305
306#endif /* _KERNEL */
307
308/*
309 * The following complications are to get around gcc not having a
310 * constraint letter for the range 0..255. We still put "d" in the
311 * constraint because "i" isn't a valid constraint when the port
312 * isn't constant. This only matters for -O0 because otherwise
313 * the non-working version gets optimized away.
314 *
315 * Use an expression-statement instead of a conditional expression
316 * because gcc-2.6.0 would promote the operands of the conditional
317 * and produce poor code for "if ((inb(var) & const1) == const2)".
318 *
319 * The unnecessary test `(port) < 0x10000' is to generate a warning if
320 * the `port' has type u_short or smaller. Such types are pessimal.
321 * This actually only works for signed types. The range check is
322 * careful to avoid generating warnings.
323 */
324#define inb(port) __extension__ ({ \
325 u_char _data; \
326 if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
327 && (port) < 0x10000) \
328 _data = inbc(port); \
329 else \
330 _data = inbv(port); \
331 _data; })
332
333#define outb(port, data) ( \
334 __builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
335 && (port) < 0x10000 \
336 ? outbc(port, data) : outbv(port, data))
337
338static __inline u_char
339inbc(u_int port)
340{
341 u_char data;
342
343 __asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
344 return (data);
345}
346
347static __inline void
348outbc(u_int port, u_char data)
349{
350 __asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
351}
352
353static __inline u_char
354inbv(u_int port)
355{
356 u_char data;
357 /*
358 * We use %%dx and not %1 here because i/o is done at %dx and not at
359 * %edx, while gcc generates inferior code (movw instead of movl)
360 * if we tell it to load (u_short) port.
361 */
362 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
363 return (data);
364}
365
366static __inline u_int
367inl(u_int port)
368{
369 u_int data;
370
371 __asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
372 return (data);
373}
374
375static __inline void
376insb(u_int port, void *addr, size_t cnt)
377{
378 __asm __volatile("cld; rep; insb"
379 : "=D" (addr), "=c" (cnt)
380 : "0" (addr), "1" (cnt), "d" (port)
381 : "memory");
382}
383
384static __inline void
385insw(u_int port, void *addr, size_t cnt)
386{
387 __asm __volatile("cld; rep; insw"
388 : "=D" (addr), "=c" (cnt)
389 : "0" (addr), "1" (cnt), "d" (port)
390 : "memory");
391}
392
393static __inline void
394insl(u_int port, void *addr, size_t cnt)
395{
396 __asm __volatile("cld; rep; insl"
397 : "=D" (addr), "=c" (cnt)
398 : "0" (addr), "1" (cnt), "d" (port)
399 : "memory");
400}
401
402static __inline void
403invd(void)
404{
405 __asm __volatile("invd");
406}
407
408#if defined(_KERNEL)
409
410/*
411 * If we are not a true-SMP box then smp_invltlb() is a NOP. Note that this
412 * will cause the invl*() functions to be equivalent to the cpu_invl*()
413 * functions.
414 */
415void smp_invltlb(void);
416void smp_invltlb_intr(void);
417
418#ifndef _CPU_INVLPG_DEFINED
419
420/*
421 * Invalidate a patricular VA on this cpu only
422 */
423static __inline void
424cpu_invlpg(void *addr)
425{
426 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
427}
428
429#endif
430
431#ifndef _CPU_INVLTLB_DEFINED
432
433/*
434 * Invalidate the TLB on this cpu only
435 */
436static __inline void
437cpu_invltlb(void)
438{
439 u_int temp;
440 /*
441 * This should be implemented as load_cr3(rcr3()) when load_cr3()
442 * is inlined.
443 */
444 __asm __volatile("movl %%cr3, %0; movl %0, %%cr3" : "=r" (temp)
445 : : "memory");
446#if defined(SWTCH_OPTIM_STATS)
447 ++tlb_flush_count;
448#endif
449}
450
451#endif
452
453static __inline void
454cpu_nop(void)
455{
456 __asm __volatile("rep; nop");
457}
458
459#endif /* _KERNEL */
460
461static __inline u_short
462inw(u_int port)
463{
464 u_short data;
465
466 __asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
467 return (data);
468}
469
470static __inline u_int
471loadandclear(volatile u_int *addr)
472{
473 u_int result;
474
475 __asm __volatile("xorl %0,%0; xchgl %1,%0"
476 : "=&r" (result) : "m" (*addr));
477 return (result);
478}
479
480static __inline void
481outbv(u_int port, u_char data)
482{
483 u_char al;
484 /*
485 * Use an unnecessary assignment to help gcc's register allocator.
486 * This make a large difference for gcc-1.40 and a tiny difference
487 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
488 * best results. gcc-2.6.0 can't handle this.
489 */
490 al = data;
491 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
492}
493
494static __inline void
495outl(u_int port, u_int data)
496{
497 /*
498 * outl() and outw() aren't used much so we haven't looked at
499 * possible micro-optimizations such as the unnecessary
500 * assignment for them.
501 */
502 __asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
503}
504
505static __inline void
506outsb(u_int port, const void *addr, size_t cnt)
507{
508 __asm __volatile("cld; rep; outsb"
509 : "=S" (addr), "=c" (cnt)
510 : "0" (addr), "1" (cnt), "d" (port));
511}
512
513static __inline void
514outsw(u_int port, const void *addr, size_t cnt)
515{
516 __asm __volatile("cld; rep; outsw"
517 : "=S" (addr), "=c" (cnt)
518 : "0" (addr), "1" (cnt), "d" (port));
519}
520
521static __inline void
522outsl(u_int port, const void *addr, size_t cnt)
523{
524 __asm __volatile("cld; rep; outsl"
525 : "=S" (addr), "=c" (cnt)
526 : "0" (addr), "1" (cnt), "d" (port));
527}
528
529static __inline void
530outw(u_int port, u_short data)
531{
532 __asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
533}
534
535static __inline u_int
536rcr2(void)
537{
538 u_int data;
539
540 __asm __volatile("movl %%cr2,%0" : "=r" (data));
541 return (data);
542}
543
544static __inline u_int
545read_eflags(void)
546{
547 u_int ef;
548
549 __asm __volatile("pushfl; popl %0" : "=r" (ef));
550 return (ef);
551}
552
553static __inline u_int64_t
554rdmsr(u_int msr)
555{
556 u_int64_t rv;
557
558 __asm __volatile("rdmsr" : "=A" (rv) : "c" (msr));
559 return (rv);
560}
561
562static __inline u_int64_t
563rdpmc(u_int pmc)
564{
565 u_int64_t rv;
566
567 __asm __volatile("rdpmc" : "=A" (rv) : "c" (pmc));
568 return (rv);
569}
570
571#define _RDTSC_SUPPORTED_
572
573static __inline u_int64_t
574rdtsc(void)
575{
576 u_int64_t rv;
577
578 __asm __volatile("rdtsc" : "=A" (rv));
579 return (rv);
580}
581
582static __inline void
583wbinvd(void)
584{
585 __asm __volatile("wbinvd");
586}
587
588static __inline void
589write_eflags(u_int ef)
590{
591 __asm __volatile("pushl %0; popfl" : : "r" (ef));
592}
593
594static __inline void
595wrmsr(u_int msr, u_int64_t newval)
596{
597 __asm __volatile("wrmsr" : : "A" (newval), "c" (msr));
598}
599
600static __inline u_short
601rfs(void)
602{
603 u_short sel;
604 __asm __volatile("movw %%fs,%0" : "=rm" (sel));
605 return (sel);
606}
607
608static __inline u_short
609rgs(void)
610{
611 u_short sel;
612 __asm __volatile("movw %%gs,%0" : "=rm" (sel));
613 return (sel);
614}
615
616static __inline void
617load_fs(u_short sel)
618{
619 __asm __volatile("movw %0,%%fs" : : "rm" (sel));
620}
621
622static __inline void
623load_gs(u_short sel)
624{
625 __asm __volatile("movw %0,%%gs" : : "rm" (sel));
626}
627
628static __inline u_int
629rdr0(void)
630{
631 u_int data;
632 __asm __volatile("movl %%dr0,%0" : "=r" (data));
633 return (data);
634}
635
636static __inline void
637load_dr0(u_int sel)
638{
639 __asm __volatile("movl %0,%%dr0" : : "r" (sel));
640}
641
642static __inline u_int
643rdr1(void)
644{
645 u_int data;
646 __asm __volatile("movl %%dr1,%0" : "=r" (data));
647 return (data);
648}
649
650static __inline void
651load_dr1(u_int sel)
652{
653 __asm __volatile("movl %0,%%dr1" : : "r" (sel));
654}
655
656static __inline u_int
657rdr2(void)
658{
659 u_int data;
660 __asm __volatile("movl %%dr2,%0" : "=r" (data));
661 return (data);
662}
663
664static __inline void
665load_dr2(u_int sel)
666{
667 __asm __volatile("movl %0,%%dr2" : : "r" (sel));
668}
669
670static __inline u_int
671rdr3(void)
672{
673 u_int data;
674 __asm __volatile("movl %%dr3,%0" : "=r" (data));
675 return (data);
676}
677
678static __inline void
679load_dr3(u_int sel)
680{
681 __asm __volatile("movl %0,%%dr3" : : "r" (sel));
682}
683
684static __inline u_int
685rdr4(void)
686{
687 u_int data;
688 __asm __volatile("movl %%dr4,%0" : "=r" (data));
689 return (data);
690}
691
692static __inline void
693load_dr4(u_int sel)
694{
695 __asm __volatile("movl %0,%%dr4" : : "r" (sel));
696}
697
698static __inline u_int
699rdr5(void)
700{
701 u_int data;
702 __asm __volatile("movl %%dr5,%0" : "=r" (data));
703 return (data);
704}
705
706static __inline void
707load_dr5(u_int sel)
708{
709 __asm __volatile("movl %0,%%dr5" : : "r" (sel));
710}
711
712static __inline u_int
713rdr6(void)
714{
715 u_int data;
716 __asm __volatile("movl %%dr6,%0" : "=r" (data));
717 return (data);
718}
719
720static __inline void
721load_dr6(u_int sel)
722{
723 __asm __volatile("movl %0,%%dr6" : : "r" (sel));
724}
725
726static __inline u_int
727rdr7(void)
728{
729 u_int data;
730 __asm __volatile("movl %%dr7,%0" : "=r" (data));
731 return (data);
732}
733
734static __inline void
735load_dr7(u_int sel)
736{
737 __asm __volatile("movl %0,%%dr7" : : "r" (sel));
738}
739
740#else /* !__GNUC__ */
741
742int breakpoint (void);
743void cpu_pause (void);
744u_int bsfl (u_int mask);
745u_int bsrl (u_int mask);
746void cpu_disable_intr (void);
747void do_cpuid (u_int ax, u_int *p);
748void cpu_enable_intr (void);
749u_char inb (u_int port);
750u_int inl (u_int port);
751void insb (u_int port, void *addr, size_t cnt);
752void insl (u_int port, void *addr, size_t cnt);
753void insw (u_int port, void *addr, size_t cnt);
754void invd (void);
755u_short inw (u_int port);
756u_int loadandclear (u_int *addr);
757void outb (u_int port, u_char data);
758void outl (u_int port, u_int data);
759void outsb (u_int port, void *addr, size_t cnt);
760void outsl (u_int port, void *addr, size_t cnt);
761void outsw (u_int port, void *addr, size_t cnt);
762void outw (u_int port, u_short data);
763u_int rcr2 (void);
764u_int64_t rdmsr (u_int msr);
765u_int64_t rdpmc (u_int pmc);
766u_int64_t rdtsc (void);
767u_int read_eflags (void);
768void wbinvd (void);
769void write_eflags (u_int ef);
770void wrmsr (u_int msr, u_int64_t newval);
771u_short rfs (void);
772u_short rgs (void);
773void load_fs (u_short sel);
774void load_gs (u_short sel);
775
776#endif /* __GNUC__ */
777
778void load_cr0 (u_int cr0);
779void load_cr3 (u_int cr3);
780void load_cr4 (u_int cr4);
781void ltr (u_short sel);
782u_int rcr0 (void);
783u_int rcr3 (void);
784u_int rcr4 (void);
785int rdmsr_safe (u_int msr, uint64_t *val);
786void reset_dbregs (void);
787__END_DECLS
788
789#endif /* !_CPU_CPUFUNC_H_ */