2 * Copyright (c) 2008-2016 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #ifndef _CPU_CPUMASK_H_
36 #define _CPU_CPUMASK_H_
38 #include <cpu/types.h>
39 #include <cpu/atomic.h>
41 #if CPUMASK_ELEMENTS != 4
42 #error "CPUMASK macros incompatible with cpumask_t"
45 #define CPUMASK_INITIALIZER_ALLONES { .ary = { (__uint64_t)-1, \
49 #define CPUMASK_INITIALIZER_ONLYONE { .ary = { 1, 0, 0, 0 } }
51 #define CPUMASK_SIMPLE(cpu) ((__uint64_t)1 << (cpu))
53 #define CPUMASK_ADDR(mask, cpu) \
54 (((cpu) < 64) ? &(mask).ary[0] : \
55 (((cpu) < 128) ? &(mask).ary[1] : \
56 (((cpu) < 192) ? &(mask).ary[2] : &(mask).ary[3])))
58 #define BSRCPUMASK(val) ((val).ary[3] ? 192 + bsrq((val).ary[3]) : \
59 ((val).ary[2] ? 128 + bsrq((val).ary[2]) : \
60 ((val).ary[1] ? 64 + bsrq((val).ary[1]) : \
63 #define BSFCPUMASK(val) ((val).ary[0] ? bsfq((val).ary[0]) : \
64 ((val).ary[1] ? 64 + bsfq((val).ary[1]) : \
65 ((val).ary[2] ? 128 + bsfq((val).ary[2]) : \
66 192 + bsfq((val).ary[3]))))
68 #define CPUMASK_CMPMASKEQ(val1, val2) ((val1).ary[0] == (val2).ary[0] && \
69 (val1).ary[1] == (val2).ary[1] && \
70 (val1).ary[2] == (val2).ary[2] && \
71 (val1).ary[3] == (val2).ary[3])
73 #define CPUMASK_CMPMASKNEQ(val1, val2) ((val1).ary[0] != (val2).ary[0] || \
74 (val1).ary[1] != (val2).ary[1] || \
75 (val1).ary[2] != (val2).ary[2] || \
76 (val1).ary[3] != (val2).ary[3])
78 #define CPUMASK_ISUP(val) ((val).ary[0] == 1 && \
79 (val).ary[1] == 0 && \
80 (val).ary[2] == 0 && \
83 #define CPUMASK_TESTZERO(val) ((val).ary[0] == 0 && \
84 (val).ary[1] == 0 && \
85 (val).ary[2] == 0 && \
88 #define CPUMASK_TESTNZERO(val) ((val).ary[0] != 0 || \
89 (val).ary[1] != 0 || \
90 (val).ary[2] != 0 || \
93 #define CPUMASK_TESTBIT(val, i) ((val).ary[((i) >> 6) & 3] & \
94 CPUMASK_SIMPLE((i) & 63))
96 #define CPUMASK_TESTMASK(val1, val2) (((val1).ary[0] & (val2.ary[0])) || \
97 ((val1).ary[1] & (val2.ary[1])) || \
98 ((val1).ary[2] & (val2.ary[2])) || \
99 ((val1).ary[3] & (val2.ary[3])))
101 #define CPUMASK_LOWMASK(val) ((val).ary[0])
103 #define CPUMASK_ORBIT(mask, i) ((mask).ary[((i) >> 6) & 3] |= \
104 CPUMASK_SIMPLE((i) & 63))
106 #define CPUMASK_ANDBIT(mask, i) ((mask).ary[((i) >> 6) & 3] &= \
107 CPUMASK_SIMPLE((i) & 63))
109 #define CPUMASK_NANDBIT(mask, i) ((mask).ary[((i) >> 6) & 3] &= \
110 ~CPUMASK_SIMPLE((i) & 63))
112 #define CPUMASK_ASSZERO(mask) do { \
119 #define CPUMASK_ASSALLONES(mask) do { \
120 (mask).ary[0] = (__uint64_t)-1; \
121 (mask).ary[1] = (__uint64_t)-1; \
122 (mask).ary[2] = (__uint64_t)-1; \
123 (mask).ary[3] = (__uint64_t)-1; \
126 #define CPUMASK_ASSBIT(mask, i) do { \
127 CPUMASK_ASSZERO(mask); \
128 CPUMASK_ORBIT(mask, i); \
131 #define CPUMASK_ASSBMASK(mask, i) do { \
133 (mask).ary[0] = CPUMASK_SIMPLE(i) - 1; \
137 } else if (i < 128) { \
138 (mask).ary[0] = (__uint64_t)-1; \
139 (mask).ary[1] = CPUMASK_SIMPLE((i) - 64) - 1; \
142 } else if (i < 192) { \
143 (mask).ary[0] = (__uint64_t)-1; \
144 (mask).ary[1] = (__uint64_t)-1; \
145 (mask).ary[2] = CPUMASK_SIMPLE((i) - 128) - 1; \
148 (mask).ary[0] = (__uint64_t)-1; \
149 (mask).ary[1] = (__uint64_t)-1; \
150 (mask).ary[2] = (__uint64_t)-1; \
151 (mask).ary[3] = CPUMASK_SIMPLE((i) - 192) - 1; \
155 #define CPUMASK_ASSNBMASK(mask, i) do { \
157 (mask).ary[0] = ~(CPUMASK_SIMPLE(i) - 1); \
158 (mask).ary[1] = (__uint64_t)-1; \
159 (mask).ary[2] = (__uint64_t)-1; \
160 (mask).ary[3] = (__uint64_t)-1; \
161 } else if (i < 128) { \
163 (mask).ary[1] = ~(CPUMASK_SIMPLE((i) - 64) - 1);\
164 (mask).ary[2] = (__uint64_t)-1; \
165 (mask).ary[3] = (__uint64_t)-1; \
166 } else if (i < 192) { \
169 (mask).ary[2] = ~(CPUMASK_SIMPLE((i) - 128) - 1);\
170 (mask).ary[3] = (__uint64_t)-1; \
175 (mask).ary[3] = ~(CPUMASK_SIMPLE((i) - 192) - 1);\
179 #define CPUMASK_ANDMASK(mask, val) do { \
180 (mask).ary[0] &= (val).ary[0]; \
181 (mask).ary[1] &= (val).ary[1]; \
182 (mask).ary[2] &= (val).ary[2]; \
183 (mask).ary[3] &= (val).ary[3]; \
186 #define CPUMASK_NANDMASK(mask, val) do { \
187 (mask).ary[0] &= ~(val).ary[0]; \
188 (mask).ary[1] &= ~(val).ary[1]; \
189 (mask).ary[2] &= ~(val).ary[2]; \
190 (mask).ary[3] &= ~(val).ary[3]; \
193 #define CPUMASK_ORMASK(mask, val) do { \
194 (mask).ary[0] |= (val).ary[0]; \
195 (mask).ary[1] |= (val).ary[1]; \
196 (mask).ary[2] |= (val).ary[2]; \
197 (mask).ary[3] |= (val).ary[3]; \
200 #define CPUMASK_XORMASK(mask, val) do { \
201 (mask).ary[0] ^= (val).ary[0]; \
202 (mask).ary[1] ^= (val).ary[1]; \
203 (mask).ary[2] ^= (val).ary[2]; \
204 (mask).ary[3] ^= (val).ary[3]; \
207 #define ATOMIC_CPUMASK_ORBIT(mask, i) \
208 atomic_set_cpumask(&(mask).ary[((i) >> 6) & 3], \
209 CPUMASK_SIMPLE((i) & 63))
211 #define ATOMIC_CPUMASK_NANDBIT(mask, i) \
212 atomic_clear_cpumask(&(mask).ary[((i) >> 6) & 3], \
213 CPUMASK_SIMPLE((i) & 63))
215 #define ATOMIC_CPUMASK_TESTANDSET(mask, i) \
216 atomic_testandset_long(&(mask).ary[((i) >> 6) & 3], (i))
218 #define ATOMIC_CPUMASK_TESTANDCLR(mask, i) \
219 atomic_testandclear_long(&(mask).ary[((i) >> 6) & 3], (i))
221 #define ATOMIC_CPUMASK_ORMASK(mask, val) do { \
222 atomic_set_cpumask(&(mask).ary[0], (val).ary[0]); \
223 atomic_set_cpumask(&(mask).ary[1], (val).ary[1]); \
224 atomic_set_cpumask(&(mask).ary[2], (val).ary[2]); \
225 atomic_set_cpumask(&(mask).ary[3], (val).ary[3]); \
228 #define ATOMIC_CPUMASK_NANDMASK(mask, val) do { \
229 atomic_clear_cpumask(&(mask).ary[0], (val).ary[0]); \
230 atomic_clear_cpumask(&(mask).ary[1], (val).ary[1]); \
231 atomic_clear_cpumask(&(mask).ary[2], (val).ary[2]); \
232 atomic_clear_cpumask(&(mask).ary[3], (val).ary[3]); \
235 #define ATOMIC_CPUMASK_COPY(mask, val) do { \
236 atomic_store_rel_cpumask(&(mask).ary[0], (val).ary[0]);\
237 atomic_store_rel_cpumask(&(mask).ary[1], (val).ary[1]);\
238 atomic_store_rel_cpumask(&(mask).ary[2], (val).ary[2]);\
239 atomic_store_rel_cpumask(&(mask).ary[3], (val).ary[3]);\
242 #endif /* !_CPU_CPUMASK_H_ */