2 * Copyright (c) 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2008 The DragonFly Project.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)types.h 8.3 (Berkeley) 1/5/94
35 * $FreeBSD: src/sys/i386/include/types.h,v 1.19.2.1 2001/03/21 10:50:58 peter Exp $
41 #include <machine/stdint.h>
43 #if defined(__x86_64__)
44 typedef __int64_t __segsz_t; /* segment size */
45 typedef __int64_t register_t;
46 typedef __uint64_t u_register_t;
47 #elif defined(__i386__)
48 typedef __int32_t __segsz_t; /* segment size */
49 typedef __int32_t register_t;
50 typedef __uint32_t u_register_t;
53 typedef unsigned long vm_offset_t; /* address space bounded offset */
54 typedef unsigned long vm_size_t; /* address space bounded size */
56 typedef __uint64_t vm_pindex_t; /* physical page index */
57 typedef __int64_t vm_ooffset_t; /* VM object bounded offset */
58 typedef __uint64_t vm_poff_t; /* physical offset */
59 typedef __uint64_t vm_paddr_t; /* physical addr (same as vm_poff_t) */
62 typedef __int64_t intfptr_t;
63 typedef __uint64_t uintfptr_t;
69 typedef __uint64_t pml4_entry_t;
70 typedef __uint64_t pdp_entry_t;
71 typedef __uint64_t pd_entry_t;
72 typedef __uint64_t pt_entry_t;
73 typedef __uint32_t cpulock_t; /* count and exclusive lock */
76 * cpumask_t - a mask representing a set of cpus and supporting routines.
78 * WARNING! It is recommended that this mask NOT be made variably-sized
79 * because it affects a huge number of system structures. However,
80 * kernel code (non-module) can be optimized to not operate on the
84 #define CPUMASK_ELEMENTS 4 /* tested by assembly for #error */
90 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
92 #define CPUMASK_INITIALIZER_ALLONES { .ary = { (__uint64_t)-1, \
96 #define CPUMASK_INITIALIZER_ONLYONE { .ary = { 1, 0, 0, 0 } }
98 #define CPUMASK_SIMPLE(cpu) ((__uint64_t)1 << (cpu))
100 #define BSRCPUMASK(val) ((val).ary[3] ? 192 + bsrq((val).ary[3]) : \
101 ((val).ary[2] ? 128 + bsrq((val).ary[2]) : \
102 ((val).ary[1] ? 64 + bsrq((val).ary[1]) : \
103 bsrq((val).ary[0]))))
105 #define BSFCPUMASK(val) ((val).ary[0] ? bsfq((val).ary[0]) : \
106 ((val).ary[1] ? 64 + bsfq((val).ary[1]) : \
107 ((val).ary[2] ? 128 + bsfq((val).ary[2]) : \
108 192 + bsfq((val).ary[3]))))
110 #define CPUMASK_CMPMASKEQ(val1, val2) ((val1).ary[0] == (val2).ary[0] && \
111 (val1).ary[1] == (val2).ary[1] && \
112 (val1).ary[2] == (val2).ary[2] && \
113 (val1).ary[3] == (val2).ary[3])
115 #define CPUMASK_CMPMASKNEQ(val1, val2) ((val1).ary[0] != (val2).ary[0] || \
116 (val1).ary[1] != (val2).ary[1] || \
117 (val1).ary[2] != (val2).ary[2] || \
118 (val1).ary[3] != (val2).ary[3])
120 #define CPUMASK_ISUP(val) ((val).ary[0] == 1 && \
121 (val).ary[1] == 0 && \
122 (val).ary[2] == 0 && \
125 #define CPUMASK_TESTZERO(val) ((val).ary[0] == 0 && \
126 (val).ary[1] == 0 && \
127 (val).ary[2] == 0 && \
130 #define CPUMASK_TESTNZERO(val) ((val).ary[0] != 0 || \
131 (val).ary[1] != 0 || \
132 (val).ary[2] != 0 || \
135 #define CPUMASK_TESTBIT(val, i) ((val).ary[((i) >> 6) & 3] & \
136 CPUMASK_SIMPLE((i) & 63))
138 #define CPUMASK_TESTMASK(val1, val2) (((val1).ary[0] & (val2.ary[0])) || \
139 ((val1).ary[1] & (val2.ary[1])) || \
140 ((val1).ary[2] & (val2.ary[2])) || \
141 ((val1).ary[3] & (val2.ary[3])))
143 #define CPUMASK_LOWMASK(val) ((val).ary[0])
145 #define CPUMASK_ORBIT(mask, i) ((mask).ary[((i) >> 6) & 3] |= \
146 CPUMASK_SIMPLE((i) & 63))
148 #define CPUMASK_ANDBIT(mask, i) ((mask).ary[((i) >> 6) & 3] &= \
149 CPUMASK_SIMPLE((i) & 63))
151 #define CPUMASK_NANDBIT(mask, i) ((mask).ary[((i) >> 6) & 3] &= \
152 ~CPUMASK_SIMPLE((i) & 63))
154 #define CPUMASK_ASSZERO(mask) do { \
161 #define CPUMASK_ASSALLONES(mask) do { \
162 (mask).ary[0] = (__uint64_t)-1; \
163 (mask).ary[1] = (__uint64_t)-1; \
164 (mask).ary[2] = (__uint64_t)-1; \
165 (mask).ary[3] = (__uint64_t)-1; \
168 #define CPUMASK_ASSBIT(mask, i) do { \
169 CPUMASK_ASSZERO(mask); \
170 CPUMASK_ORBIT(mask, i); \
173 #define CPUMASK_ASSBMASK(mask, i) do { \
175 (mask).ary[0] = CPUMASK_SIMPLE(i) - 1; \
179 } else if (i < 128) { \
180 (mask).ary[0] = (__uint64_t)-1; \
181 (mask).ary[1] = CPUMASK_SIMPLE((i) - 64) - 1; \
184 } else if (i < 192) { \
185 (mask).ary[0] = (__uint64_t)-1; \
186 (mask).ary[1] = (__uint64_t)-1; \
187 (mask).ary[2] = CPUMASK_SIMPLE((i) - 128) - 1; \
190 (mask).ary[0] = (__uint64_t)-1; \
191 (mask).ary[1] = (__uint64_t)-1; \
192 (mask).ary[2] = (__uint64_t)-1; \
193 (mask).ary[3] = CPUMASK_SIMPLE((i) - 192) - 1; \
197 #define CPUMASK_ASSNBMASK(mask, i) do { \
199 (mask).ary[0] = ~(CPUMASK_SIMPLE(i) - 1); \
200 (mask).ary[1] = (__uint64_t)-1; \
201 (mask).ary[2] = (__uint64_t)-1; \
202 (mask).ary[3] = (__uint64_t)-1; \
203 } else if (i < 128) { \
205 (mask).ary[1] = ~(CPUMASK_SIMPLE((i) - 64) - 1);\
206 (mask).ary[2] = (__uint64_t)-1; \
207 (mask).ary[3] = (__uint64_t)-1; \
208 } else if (i < 192) { \
211 (mask).ary[2] = ~(CPUMASK_SIMPLE((i) - 128) - 1);\
212 (mask).ary[3] = (__uint64_t)-1; \
217 (mask).ary[3] = ~(CPUMASK_SIMPLE((i) - 192) - 1);\
221 #define CPUMASK_ANDMASK(mask, val) do { \
222 (mask).ary[0] &= (val).ary[0]; \
223 (mask).ary[1] &= (val).ary[1]; \
224 (mask).ary[2] &= (val).ary[2]; \
225 (mask).ary[3] &= (val).ary[3]; \
228 #define CPUMASK_NANDMASK(mask, val) do { \
229 (mask).ary[0] &= ~(val).ary[0]; \
230 (mask).ary[1] &= ~(val).ary[1]; \
231 (mask).ary[2] &= ~(val).ary[2]; \
232 (mask).ary[3] &= ~(val).ary[3]; \
235 #define CPUMASK_ORMASK(mask, val) do { \
236 (mask).ary[0] |= (val).ary[0]; \
237 (mask).ary[1] |= (val).ary[1]; \
238 (mask).ary[2] |= (val).ary[2]; \
239 (mask).ary[3] |= (val).ary[3]; \
242 #define CPUMASK_XORMASK(mask, val) do { \
243 (mask).ary[0] ^= (val).ary[0]; \
244 (mask).ary[1] ^= (val).ary[1]; \
245 (mask).ary[2] ^= (val).ary[2]; \
246 (mask).ary[3] ^= (val).ary[3]; \
249 #define ATOMIC_CPUMASK_ORBIT(mask, i) \
250 atomic_set_cpumask(&(mask).ary[((i) >> 6) & 3], \
251 CPUMASK_SIMPLE((i) & 63))
253 #define ATOMIC_CPUMASK_NANDBIT(mask, i) \
254 atomic_clear_cpumask(&(mask).ary[((i) >> 6) & 3], \
255 CPUMASK_SIMPLE((i) & 63))
257 #define ATOMIC_CPUMASK_ORMASK(mask, val) do { \
258 atomic_set_cpumask(&(mask).ary[0], (val).ary[0]); \
259 atomic_set_cpumask(&(mask).ary[1], (val).ary[1]); \
260 atomic_set_cpumask(&(mask).ary[2], (val).ary[2]); \
261 atomic_set_cpumask(&(mask).ary[3], (val).ary[3]); \
264 #define ATOMIC_CPUMASK_NANDMASK(mask, val) do { \
265 atomic_clear_cpumask(&(mask).ary[0], (val).ary[0]); \
266 atomic_clear_cpumask(&(mask).ary[1], (val).ary[1]); \
267 atomic_clear_cpumask(&(mask).ary[2], (val).ary[2]); \
268 atomic_clear_cpumask(&(mask).ary[3], (val).ary[3]); \
273 #define CPULOCK_EXCLBIT 0 /* exclusive lock bit number */
274 #define CPULOCK_EXCL 0x00000001 /* exclusive lock */
275 #define CPULOCK_INCR 0x00000002 /* auxillary counter add/sub */
276 #define CPULOCK_CNTMASK 0x7FFFFFFE
278 #define PML4SIZE sizeof(pml4_entry_t) /* for assembly files */
279 #define PDPSIZE sizeof(pdp_entry_t) /* for assembly files */
280 #define PDESIZE sizeof(pd_entry_t) /* for assembly files */
281 #define PTESIZE sizeof(pt_entry_t) /* for assembly files */
283 #endif /* !_CPU_TYPES_H_ */