sys/cpu/x86_64: Expose CPUMASK macros to userspace without _KERNEL_STRUCTURES
[dragonfly.git] / sys / cpu / x86_64 / include / types.h
1 /*-
2  * Copyright (c) 1990, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Copyright (c) 2008 The DragonFly Project.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of the University nor the names of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *      @(#)types.h     8.3 (Berkeley) 1/5/94
31  * $FreeBSD: src/sys/i386/include/types.h,v 1.19.2.1 2001/03/21 10:50:58 peter Exp $
32  */
33
34 #ifndef _CPU_TYPES_H_
35 #define _CPU_TYPES_H_
36
37 #include <machine/stdint.h>
38
39 #if defined(__x86_64__)
40 typedef __int64_t       __segsz_t;      /* segment size */
41 typedef __int64_t       register_t;
42 typedef __uint64_t      u_register_t;
43 #elif defined(__i386__)
44 typedef __int32_t       __segsz_t;      /* segment size */
45 typedef __int32_t       register_t;
46 typedef __uint32_t      u_register_t;
47 #endif
48
49 typedef unsigned long   vm_offset_t;    /* address space bounded offset */
50 typedef unsigned long   vm_size_t;      /* address space bounded size */
51
52 typedef __uint64_t      vm_pindex_t;    /* physical page index */
53 typedef __int64_t       vm_ooffset_t;   /* VM object bounded offset */
54 typedef __uint64_t      vm_poff_t;      /* physical offset */
55 typedef __uint64_t      vm_paddr_t;     /* physical addr (same as vm_poff_t) */
56
57 #ifdef _KERNEL
58 typedef __int64_t       intfptr_t;
59 typedef __uint64_t      uintfptr_t;
60 #endif
61
62 /*
63  * MMU page tables
64  */
65 typedef __uint64_t      pml4_entry_t;
66 typedef __uint64_t      pdp_entry_t;
67 typedef __uint64_t      pd_entry_t;
68 typedef __uint64_t      pt_entry_t;
69 typedef __uint32_t      cpulock_t;      /* count and exclusive lock */
70
71 /*
72  * cpumask_t - a mask representing a set of cpus and supporting routines.
73  *
74  * WARNING! It is recommended that this mask NOT be made variably-sized
75  *          because it affects a huge number of system structures.  However,
76  *          kernel code (non-module) can be optimized to not operate on the
77  *          whole mask.
78  */
79
80 #define CPUMASK_ELEMENTS        4       /* tested by assembly for #error */
81
82 typedef struct {
83         __uint64_t      ary[4];
84 } cpumask_t;
85
86 #define CPUMASK_INITIALIZER_ALLONES     { .ary = { (__uint64_t)-1, \
87                                           (__uint64_t)-1, \
88                                           (__uint64_t)-1, \
89                                           (__uint64_t)-1 } }
90 #define CPUMASK_INITIALIZER_ONLYONE     { .ary = { 1, 0, 0, 0 } }
91
92 #define CPUMASK_SIMPLE(cpu)     ((__uint64_t)1 << (cpu))
93
94 #define CPUMASK_ADDR(mask, cpu)                                         \
95                 (((cpu) < 64) ? &(mask).ary[0] :                        \
96                 (((cpu) < 128) ? &(mask).ary[1] :                       \
97                 (((cpu) < 192) ? &(mask).ary[2] : &(mask).ary[3])))
98
99 #define BSRCPUMASK(val)         ((val).ary[3] ? 192 + bsrq((val).ary[3]) : \
100                                 ((val).ary[2] ? 128 + bsrq((val).ary[2]) : \
101                                 ((val).ary[1] ? 64 + bsrq((val).ary[1]) : \
102                                                 bsrq((val).ary[0]))))
103
104 #define BSFCPUMASK(val)         ((val).ary[0] ? bsfq((val).ary[0]) : \
105                                 ((val).ary[1] ? 64 + bsfq((val).ary[1]) : \
106                                 ((val).ary[2] ? 128 + bsfq((val).ary[2]) : \
107                                                 192 + bsfq((val).ary[3]))))
108
109 #define CPUMASK_CMPMASKEQ(val1, val2)   ((val1).ary[0] == (val2).ary[0] && \
110                                          (val1).ary[1] == (val2).ary[1] && \
111                                          (val1).ary[2] == (val2).ary[2] && \
112                                          (val1).ary[3] == (val2).ary[3])
113
114 #define CPUMASK_CMPMASKNEQ(val1, val2)  ((val1).ary[0] != (val2).ary[0] || \
115                                          (val1).ary[1] != (val2).ary[1] || \
116                                          (val1).ary[2] != (val2).ary[2] || \
117                                          (val1).ary[3] != (val2).ary[3])
118
119 #define CPUMASK_ISUP(val)               ((val).ary[0] == 1 && \
120                                          (val).ary[1] == 0 && \
121                                          (val).ary[2] == 0 && \
122                                          (val).ary[3] == 0)
123
124 #define CPUMASK_TESTZERO(val)           ((val).ary[0] == 0 && \
125                                          (val).ary[1] == 0 && \
126                                          (val).ary[2] == 0 && \
127                                          (val).ary[3] == 0)
128
129 #define CPUMASK_TESTNZERO(val)          ((val).ary[0] != 0 || \
130                                          (val).ary[1] != 0 || \
131                                          (val).ary[2] != 0 || \
132                                          (val).ary[3] != 0)
133
134 #define CPUMASK_TESTBIT(val, i)         ((val).ary[((i) >> 6) & 3] & \
135                                          CPUMASK_SIMPLE((i) & 63))
136
137 #define CPUMASK_TESTMASK(val1, val2)    (((val1).ary[0] & (val2.ary[0])) || \
138                                          ((val1).ary[1] & (val2.ary[1])) || \
139                                          ((val1).ary[2] & (val2.ary[2])) || \
140                                          ((val1).ary[3] & (val2.ary[3])))
141
142 #define CPUMASK_LOWMASK(val)            ((val).ary[0])
143
144 #define CPUMASK_ORBIT(mask, i)          ((mask).ary[((i) >> 6) & 3] |= \
145                                          CPUMASK_SIMPLE((i) & 63))
146
147 #define CPUMASK_ANDBIT(mask, i)         ((mask).ary[((i) >> 6) & 3] &= \
148                                          CPUMASK_SIMPLE((i) & 63))
149
150 #define CPUMASK_NANDBIT(mask, i)        ((mask).ary[((i) >> 6) & 3] &= \
151                                          ~CPUMASK_SIMPLE((i) & 63))
152
153 #define CPUMASK_ASSZERO(mask)           do {                            \
154                                         (mask).ary[0] = 0;              \
155                                         (mask).ary[1] = 0;              \
156                                         (mask).ary[2] = 0;              \
157                                         (mask).ary[3] = 0;              \
158                                         } while(0)
159
160 #define CPUMASK_ASSALLONES(mask)        do {                            \
161                                         (mask).ary[0] = (__uint64_t)-1; \
162                                         (mask).ary[1] = (__uint64_t)-1; \
163                                         (mask).ary[2] = (__uint64_t)-1; \
164                                         (mask).ary[3] = (__uint64_t)-1; \
165                                         } while(0)
166
167 #define CPUMASK_ASSBIT(mask, i)         do {                            \
168                                                 CPUMASK_ASSZERO(mask);  \
169                                                 CPUMASK_ORBIT(mask, i); \
170                                         } while(0)
171
172 #define CPUMASK_ASSBMASK(mask, i)       do {                            \
173                 if (i < 64) {                                           \
174                         (mask).ary[0] = CPUMASK_SIMPLE(i) - 1;          \
175                         (mask).ary[1] = 0;                              \
176                         (mask).ary[2] = 0;                              \
177                         (mask).ary[3] = 0;                              \
178                 } else if (i < 128) {                                   \
179                         (mask).ary[0] = (__uint64_t)-1;                 \
180                         (mask).ary[1] = CPUMASK_SIMPLE((i) - 64) - 1;   \
181                         (mask).ary[2] = 0;                              \
182                         (mask).ary[3] = 0;                              \
183                 } else if (i < 192) {                                   \
184                         (mask).ary[0] = (__uint64_t)-1;                 \
185                         (mask).ary[1] = (__uint64_t)-1;                 \
186                         (mask).ary[2] = CPUMASK_SIMPLE((i) - 128) - 1;  \
187                         (mask).ary[3] = 0;                              \
188                 } else {                                                \
189                         (mask).ary[0] = (__uint64_t)-1;                 \
190                         (mask).ary[1] = (__uint64_t)-1;                 \
191                         (mask).ary[2] = (__uint64_t)-1;                 \
192                         (mask).ary[3] = CPUMASK_SIMPLE((i) - 192) - 1;  \
193                 }                                                       \
194                                         } while(0)
195
196 #define CPUMASK_ASSNBMASK(mask, i)      do {                            \
197                 if (i < 64) {                                           \
198                         (mask).ary[0] = ~(CPUMASK_SIMPLE(i) - 1);       \
199                         (mask).ary[1] = (__uint64_t)-1;                 \
200                         (mask).ary[2] = (__uint64_t)-1;                 \
201                         (mask).ary[3] = (__uint64_t)-1;                 \
202                 } else if (i < 128) {                                   \
203                         (mask).ary[0] = 0;                              \
204                         (mask).ary[1] = ~(CPUMASK_SIMPLE((i) - 64) - 1);\
205                         (mask).ary[2] = (__uint64_t)-1;                 \
206                         (mask).ary[3] = (__uint64_t)-1;                 \
207                 } else if (i < 192) {                                   \
208                         (mask).ary[0] = 0;                              \
209                         (mask).ary[1] = 0;                              \
210                         (mask).ary[2] = ~(CPUMASK_SIMPLE((i) - 128) - 1);\
211                         (mask).ary[3] = (__uint64_t)-1;                 \
212                 } else {                                                \
213                         (mask).ary[0] = 0;                              \
214                         (mask).ary[1] = 0;                              \
215                         (mask).ary[2] = 0;                              \
216                         (mask).ary[3] = ~(CPUMASK_SIMPLE((i) - 192) - 1);\
217                 }                                                       \
218                                         } while(0)
219
220 #define CPUMASK_ANDMASK(mask, val)      do {                            \
221                                         (mask).ary[0] &= (val).ary[0];  \
222                                         (mask).ary[1] &= (val).ary[1];  \
223                                         (mask).ary[2] &= (val).ary[2];  \
224                                         (mask).ary[3] &= (val).ary[3];  \
225                                         } while(0)
226
227 #define CPUMASK_NANDMASK(mask, val)     do {                            \
228                                         (mask).ary[0] &= ~(val).ary[0]; \
229                                         (mask).ary[1] &= ~(val).ary[1]; \
230                                         (mask).ary[2] &= ~(val).ary[2]; \
231                                         (mask).ary[3] &= ~(val).ary[3]; \
232                                         } while(0)
233
234 #define CPUMASK_ORMASK(mask, val)       do {                            \
235                                         (mask).ary[0] |= (val).ary[0];  \
236                                         (mask).ary[1] |= (val).ary[1];  \
237                                         (mask).ary[2] |= (val).ary[2];  \
238                                         (mask).ary[3] |= (val).ary[3];  \
239                                         } while(0)
240
241 #define CPUMASK_XORMASK(mask, val)      do {                            \
242                                         (mask).ary[0] ^= (val).ary[0];  \
243                                         (mask).ary[1] ^= (val).ary[1];  \
244                                         (mask).ary[2] ^= (val).ary[2];  \
245                                         (mask).ary[3] ^= (val).ary[3];  \
246                                         } while(0)
247
248 #define ATOMIC_CPUMASK_ORBIT(mask, i)                                     \
249                         atomic_set_cpumask(&(mask).ary[((i) >> 6) & 3],   \
250                                            CPUMASK_SIMPLE((i) & 63))
251
252 #define ATOMIC_CPUMASK_NANDBIT(mask, i)                                   \
253                         atomic_clear_cpumask(&(mask).ary[((i) >> 6) & 3], \
254                                            CPUMASK_SIMPLE((i) & 63))
255
256 #define ATOMIC_CPUMASK_TESTANDSET(mask, i)                                \
257                 atomic_testandset_long(&(mask).ary[((i) >> 6) & 3], (i))
258
259 #define ATOMIC_CPUMASK_TESTANDCLR(mask, i)                                \
260                 atomic_testandclear_long(&(mask).ary[((i) >> 6) & 3], (i))
261
262 #define ATOMIC_CPUMASK_ORMASK(mask, val) do {                             \
263                         atomic_set_cpumask(&(mask).ary[0], (val).ary[0]); \
264                         atomic_set_cpumask(&(mask).ary[1], (val).ary[1]); \
265                         atomic_set_cpumask(&(mask).ary[2], (val).ary[2]); \
266                         atomic_set_cpumask(&(mask).ary[3], (val).ary[3]); \
267                                          } while(0)
268
269 #define ATOMIC_CPUMASK_NANDMASK(mask, val) do {                             \
270                         atomic_clear_cpumask(&(mask).ary[0], (val).ary[0]); \
271                         atomic_clear_cpumask(&(mask).ary[1], (val).ary[1]); \
272                         atomic_clear_cpumask(&(mask).ary[2], (val).ary[2]); \
273                         atomic_clear_cpumask(&(mask).ary[3], (val).ary[3]); \
274                                          } while(0)
275
276 #define ATOMIC_CPUMASK_COPY(mask, val) do {                                 \
277                         atomic_store_rel_cpumask(&(mask).ary[0], (val).ary[0]);\
278                         atomic_store_rel_cpumask(&(mask).ary[1], (val).ary[1]);\
279                         atomic_store_rel_cpumask(&(mask).ary[2], (val).ary[2]);\
280                         atomic_store_rel_cpumask(&(mask).ary[3], (val).ary[3]);\
281                                          } while(0)
282
283 #define CPULOCK_EXCLBIT 0               /* exclusive lock bit number */
284 #define CPULOCK_EXCL    0x00000001      /* exclusive lock */
285 #define CPULOCK_INCR    0x00000002      /* auxillary counter add/sub */
286 #define CPULOCK_CNTMASK 0x7FFFFFFE
287
288 #define PML4SIZE        sizeof(pml4_entry_t) /* for assembly files */
289 #define PDPSIZE         sizeof(pdp_entry_t) /* for assembly files */
290 #define PDESIZE         sizeof(pd_entry_t) /* for assembly files */
291 #define PTESIZE         sizeof(pt_entry_t) /* for assembly files */
292
293 #endif /* !_CPU_TYPES_H_ */