Bring in the remainder of the post-SoC amd64 enchilada.
[dragonfly.git] / sys / cpu / amd64 / include / atomic.h
CommitLineData
fc3f9779 1/*-
c8fe38ae
MD
2 * Copyright (c) 1998 Doug Rabson.
3 * Copyright (c) 2008 The DragonFly Project.
fc3f9779
SS
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: src/sys/amd64/include/atomic.h,v 1.32 2003/11/21 03:02:00 peter Exp $
c8fe38ae 28 * $DragonFly: src/sys/cpu/amd64/include/atomic.h,v 1.3 2008/08/29 17:07:06 dillon Exp $
fc3f9779
SS
29 */
30#ifndef _CPU_ATOMIC_H_
31#define _CPU_ATOMIC_H_
32
d7f50089
YY
33#ifndef _SYS_TYPES_H_
34#include <sys/types.h>
35#endif
36
fc3f9779 37/*
c8fe38ae
MD
38 * Various simple operations on memory, each of which is atomic in the
39 * presence of interrupts and multiple processors.
fc3f9779 40 *
c8fe38ae
MD
41 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
42 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
43 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
44 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
fc3f9779 45 *
c8fe38ae
MD
46 * atomic_set_short(P, V) (*(u_short *)(P) |= (V))
47 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
48 * atomic_add_short(P, V) (*(u_short *)(P) += (V))
49 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
fc3f9779 50 *
c8fe38ae
MD
51 * atomic_set_int(P, V) (*(u_int *)(P) |= (V))
52 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
53 * atomic_add_int(P, V) (*(u_int *)(P) += (V))
54 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
55 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
fc3f9779 56 *
c8fe38ae
MD
57 * atomic_set_long(P, V) (*(u_long *)(P) |= (V))
58 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
59 * atomic_add_long(P, V) (*(u_long *)(P) += (V))
60 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
61 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
fc3f9779
SS
62 */
63
64/*
65 * The above functions are expanded inline in the statically-linked
66 * kernel. Lock prefixes are generated if an SMP kernel is being
67 * built.
68 *
69 * Kernel modules call real functions which are built into the kernel.
70 * This allows kernel modules to be portable between UP and SMP systems.
71 */
72#if defined(KLD_MODULE)
c8fe38ae 73#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
d7f50089
YY
74void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
75void atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v);
fc3f9779 76
c8fe38ae
MD
77int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
78int atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src);
79u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
80u_long atomic_fetchadd_long(volatile u_long *p, u_long v);
fc3f9779
SS
81
82#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
83u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \
84void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
85
86#else /* !KLD_MODULE */
87
88#ifdef __GNUC__
89
90/*
c8fe38ae
MD
91 * For userland, always use lock prefixes so that the binaries will run
92 * on both SMP and !SMP systems.
fc3f9779
SS
93 */
94#if defined(SMP) || !defined(_KERNEL)
c8fe38ae 95#define MPLOCKED "lock ; "
fc3f9779 96#else
c8fe38ae 97#define MPLOCKED
fc3f9779
SS
98#endif
99
100/*
101 * The assembly is volatilized to demark potential before-and-after side
102 * effects if an interrupt or SMP collision were to occur.
103 */
c8fe38ae 104#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
fc3f9779
SS
105static __inline void \
106atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
107{ \
108 __asm __volatile(MPLOCKED OP \
109 : "+m" (*p) \
110 : CONS (V)); \
111} \
d7f50089
YY
112static __inline void \
113atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v)\
114{ \
115 __asm __volatile(OP \
116 : "+m" (*p) \
117 : CONS (V)); \
118}
fc3f9779
SS
119
120#else /* !__GNUC__ */
121
122#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
d7f50089
YY
123extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
124extern void atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v);
fc3f9779
SS
125
126#endif /* __GNUC__ */
127
128/*
129 * These functions operate on a 32 bit interrupt interlock which is defined
130 * as follows:
131 *
132 * bit 0-30 interrupt handler disabled bits (counter)
133 * bit 31 interrupt handler currently running bit (1 = run)
134 *
135 * atomic_intr_cond_test(P) Determine if the interlock is in an
136 * acquired state. Returns 0 if it not
137 * acquired, non-zero if it is.
138 *
139 * atomic_intr_cond_try(P)
140 * Increment the request counter and attempt to
141 * set bit 31 to acquire the interlock. If
142 * we are unable to set bit 31 the request
143 * counter is decremented and we return -1,
144 * otherwise we return 0.
145 *
146 * atomic_intr_cond_enter(P, func, arg)
147 * Increment the request counter and attempt to
148 * set bit 31 to acquire the interlock. If
149 * we are unable to set bit 31 func(arg) is
150 * called in a loop until we are able to set
151 * bit 31.
152 *
153 * atomic_intr_cond_exit(P, func, arg)
154 * Decrement the request counter and clear bit
155 * 31. If the request counter is still non-zero
156 * call func(arg) once.
157 *
158 * atomic_intr_handler_disable(P)
159 * Set bit 30, indicating that the interrupt
160 * handler has been disabled. Must be called
161 * after the hardware is disabled.
162 *
163 * Returns bit 31 indicating whether a serialized
164 * accessor is active (typically the interrupt
165 * handler is running). 0 == not active,
166 * non-zero == active.
167 *
168 * atomic_intr_handler_enable(P)
169 * Clear bit 30, indicating that the interrupt
170 * handler has been enabled. Must be called
171 * before the hardware is actually enabled.
172 *
173 * atomic_intr_handler_is_enabled(P)
174 * Returns bit 30, 0 indicates that the handler
175 * is enabled, non-zero indicates that it is
176 * disabled. The request counter portion of
177 * the field is ignored.
178 */
179
180#ifndef __ATOMIC_INTR_T
181#define __ATOMIC_INTR_T
182typedef volatile int atomic_intr_t;
183#endif
184
185#if defined(KLD_MODULE)
186
187void atomic_intr_init(atomic_intr_t *p);
188int atomic_intr_handler_disable(atomic_intr_t *p);
189void atomic_intr_handler_enable(atomic_intr_t *p);
190int atomic_intr_handler_is_enabled(atomic_intr_t *p);
191int atomic_intr_cond_test(atomic_intr_t *p);
192int atomic_intr_cond_try(atomic_intr_t *p);
193void atomic_intr_cond_enter(atomic_intr_t *p, void (*func)(void *), void *arg);
194void atomic_intr_cond_exit(atomic_intr_t *p, void (*func)(void *), void *arg);
195
d7f50089 196#else /* !KLD_MODULE */
fc3f9779
SS
197
198static __inline
199void
200atomic_intr_init(atomic_intr_t *p)
201{
202 *p = 0;
203}
204
205static __inline
206int
207atomic_intr_handler_disable(atomic_intr_t *p)
208{
209 int data;
210
211 __asm __volatile(MPLOCKED "orl $0x40000000,%1; movl %1,%%eax; " \
212 "andl $0x80000000,%%eax" \
213 : "=a"(data) , "+m"(*p));
214 return(data);
215}
216
217static __inline
218void
219atomic_intr_handler_enable(atomic_intr_t *p)
220{
221 __asm __volatile(MPLOCKED "andl $0xBFFFFFFF,%0" : "+m" (*p));
222}
223
224static __inline
225int
226atomic_intr_handler_is_enabled(atomic_intr_t *p)
227{
228 int data;
229
230 __asm __volatile("movl %1,%%eax; andl $0x40000000,%%eax" \
231 : "=a"(data) : "m"(*p));
232 return(data);
233}
234
235static __inline
236void
237atomic_intr_cond_enter(atomic_intr_t *p, void (*func)(void *), void *arg)
238{
239 __asm __volatile(MPLOCKED "incl %0; " \
240 "1: ;" \
241 MPLOCKED "btsl $31,%0; jnc 2f; " \
d7f50089 242 "movq %2,%%rdi; call *%1; " \
fc3f9779
SS
243 "jmp 1b; " \
244 "2: ;" \
245 : "+m" (*p) \
246 : "r"(func), "m"(arg) \
0855a2af
JG
247 : "ax", "cx", "dx", "rsi", "rdi", "r8", "r9", "r10", "r11");
248 /* YYY the function call may clobber even more registers? */
fc3f9779
SS
249}
250
c8fe38ae
MD
251/*
252 * Atomically add the value of v to the integer pointed to by p and return
253 * the previous value of *p.
254 */
255static __inline u_int
256atomic_fetchadd_int(volatile u_int *p, u_int v)
257{
258
259 __asm __volatile(
260 " " MPLOCKED " "
261 " xaddl %0, %1 ; "
262 "# atomic_fetchadd_int"
263 : "+r" (v), /* 0 (result) */
264 "=m" (*p) /* 1 */
265 : "m" (*p)); /* 2 */
266
267 return (v);
268}
269
270/*
271 * Atomically add the value of v to the long integer pointed to by p and return
272 * the previous value of *p.
273 */
274static __inline u_long
275atomic_fetchadd_long(volatile u_long *p, u_long v)
276{
277
278 __asm __volatile(
279 " " MPLOCKED " "
280 " xaddq %0, %1 ; "
281 "# atomic_fetchadd_long"
282 : "+r" (v), /* 0 (result) */
283 "=m" (*p) /* 1 */
284 : "m" (*p)); /* 2 */
285
286 return (v);
287}
fc3f9779
SS
288/*
289 * Attempt to enter the interrupt condition variable. Returns zero on
290 * success, 1 on failure.
291 */
292static __inline
293int
294atomic_intr_cond_try(atomic_intr_t *p)
295{
296 int ret;
297
298 __asm __volatile(MPLOCKED "incl %0; " \
299 "1: ;" \
300 "subl %%eax,%%eax; " \
301 MPLOCKED "btsl $31,%0; jnc 2f; " \
302 MPLOCKED "decl %0; " \
303 "movl $1,%%eax;" \
304 "2: ;" \
305 : "+m" (*p), "=a"(ret) \
306 : : "cx", "dx");
307 return (ret);
308}
309
310
311static __inline
312int
313atomic_intr_cond_test(atomic_intr_t *p)
314{
315 return((int)(*p & 0x80000000));
316}
317
318static __inline
319void
320atomic_intr_cond_exit(atomic_intr_t *p, void (*func)(void *), void *arg)
321{
322 __asm __volatile(MPLOCKED "decl %0; " \
323 MPLOCKED "btrl $31,%0; " \
324 "testl $0x3FFFFFFF,%0; jz 1f; " \
d7f50089 325 "movq %2,%%rdi; call *%1; " \
fc3f9779
SS
326 "1: ;" \
327 : "+m" (*p) \
328 : "r"(func), "m"(arg) \
329 : "ax", "cx", "dx", "di"); /* XXX clobbers more regs */
330}
331
332#endif
333
334/*
335 * Atomic compare and set, used by the mutex functions
336 *
337 * if (*dst == exp) *dst = src (all 32 bit words)
338 *
339 * Returns 0 on failure, non-zero on success
340 */
341
342#if defined(__GNUC__)
343
344static __inline int
345atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
346{
347 int res = exp;
348
349 __asm __volatile (
350 MPLOCKED
351 " cmpxchgl %1,%2 ; "
352 " setz %%al ; "
353 " movzbl %%al,%0 ; "
354 "1: "
355 "# atomic_cmpset_int"
356 : "+a" (res) /* 0 (result) */
357 : "r" (src), /* 1 */
358 "m" (*(dst)) /* 2 */
359 : "memory");
360
361 return (res);
362}
363
364static __inline int
365atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src)
366{
367 long res = exp;
368
369 __asm __volatile (
370 MPLOCKED
371 " cmpxchgq %1,%2 ; "
372 " setz %%al ; "
373 " movzbq %%al,%0 ; "
374 "1: "
375 "# atomic_cmpset_long"
376 : "+a" (res) /* 0 (result) */
377 : "r" (src), /* 1 */
378 "m" (*(dst)) /* 2 */
379 : "memory");
380
381 return (res);
382}
383#endif /* defined(__GNUC__) */
384
385#if defined(__GNUC__)
386
c8fe38ae 387#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
fc3f9779
SS
388static __inline u_##TYPE \
389atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
390{ \
391 u_##TYPE res; \
392 \
393 __asm __volatile(MPLOCKED LOP \
394 : "=a" (res), /* 0 (result) */\
395 "+m" (*p) /* 1 */ \
396 : : "memory"); \
397 \
398 return (res); \
399} \
400 \
401/* \
402 * The XCHG instruction asserts LOCK automagically. \
403 */ \
404static __inline void \
405atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
406{ \
407 __asm __volatile(SOP \
408 : "+m" (*p), /* 0 */ \
409 "+r" (v) /* 1 */ \
410 : : "memory"); \
411} \
412struct __hack
413
414#else /* !defined(__GNUC__) */
415
416extern int atomic_cmpset_int(volatile u_int *, u_int, u_int);
417extern int atomic_cmpset_long(volatile u_long *, u_long, u_long);
418
419#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
420extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \
421extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
422
423#endif /* defined(__GNUC__) */
424
d7f50089 425#endif /* !KLD_MODULE */
fc3f9779
SS
426
427ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
428ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
429ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
430ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
431
432ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
433ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
434ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
435ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
436
437ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
438ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
439ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
440ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
441
442ATOMIC_ASM(set, long, "orq %1,%0", "ir", v);
443ATOMIC_ASM(clear, long, "andq %1,%0", "ir", ~v);
444ATOMIC_ASM(add, long, "addq %1,%0", "ir", v);
445ATOMIC_ASM(subtract, long, "subq %1,%0", "ir", v);
446
447ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0");
448ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
449ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0");
450ATOMIC_STORE_LOAD(long, "cmpxchgq %0,%1", "xchgq %1,%0");
451
d7f50089
YY
452#define atomic_cmpset_32 atomic_cmpset_int
453
fc3f9779
SS
454#undef ATOMIC_ASM
455#undef ATOMIC_STORE_LOAD
456
457#define atomic_set_acq_char atomic_set_char
458#define atomic_set_rel_char atomic_set_char
459#define atomic_clear_acq_char atomic_clear_char
460#define atomic_clear_rel_char atomic_clear_char
461#define atomic_add_acq_char atomic_add_char
462#define atomic_add_rel_char atomic_add_char
463#define atomic_subtract_acq_char atomic_subtract_char
464#define atomic_subtract_rel_char atomic_subtract_char
465
466#define atomic_set_acq_short atomic_set_short
467#define atomic_set_rel_short atomic_set_short
468#define atomic_clear_acq_short atomic_clear_short
469#define atomic_clear_rel_short atomic_clear_short
470#define atomic_add_acq_short atomic_add_short
471#define atomic_add_rel_short atomic_add_short
472#define atomic_subtract_acq_short atomic_subtract_short
473#define atomic_subtract_rel_short atomic_subtract_short
474
475#define atomic_set_acq_int atomic_set_int
476#define atomic_set_rel_int atomic_set_int
477#define atomic_clear_acq_int atomic_clear_int
478#define atomic_clear_rel_int atomic_clear_int
479#define atomic_add_acq_int atomic_add_int
480#define atomic_add_rel_int atomic_add_int
481#define atomic_subtract_acq_int atomic_subtract_int
482#define atomic_subtract_rel_int atomic_subtract_int
c8fe38ae
MD
483#define atomic_cmpset_acq_int atomic_cmpset_int
484#define atomic_cmpset_rel_int atomic_cmpset_int
fc3f9779
SS
485
486#define atomic_set_acq_long atomic_set_long
487#define atomic_set_rel_long atomic_set_long
488#define atomic_clear_acq_long atomic_clear_long
489#define atomic_clear_rel_long atomic_clear_long
490#define atomic_add_acq_long atomic_add_long
491#define atomic_add_rel_long atomic_add_long
492#define atomic_subtract_acq_long atomic_subtract_long
493#define atomic_subtract_rel_long atomic_subtract_long
494
fc3f9779
SS
495#define atomic_set_8 atomic_set_char
496#define atomic_set_acq_8 atomic_set_acq_char
497#define atomic_set_rel_8 atomic_set_rel_char
498#define atomic_clear_8 atomic_clear_char
499#define atomic_clear_acq_8 atomic_clear_acq_char
500#define atomic_clear_rel_8 atomic_clear_rel_char
501#define atomic_add_8 atomic_add_char
502#define atomic_add_acq_8 atomic_add_acq_char
503#define atomic_add_rel_8 atomic_add_rel_char
504#define atomic_subtract_8 atomic_subtract_char
505#define atomic_subtract_acq_8 atomic_subtract_acq_char
506#define atomic_subtract_rel_8 atomic_subtract_rel_char
507#define atomic_load_acq_8 atomic_load_acq_char
508#define atomic_store_rel_8 atomic_store_rel_char
509
c8fe38ae 510/* Operations on 16-bit words. */
fc3f9779
SS
511#define atomic_set_16 atomic_set_short
512#define atomic_set_acq_16 atomic_set_acq_short
513#define atomic_set_rel_16 atomic_set_rel_short
514#define atomic_clear_16 atomic_clear_short
515#define atomic_clear_acq_16 atomic_clear_acq_short
516#define atomic_clear_rel_16 atomic_clear_rel_short
517#define atomic_add_16 atomic_add_short
518#define atomic_add_acq_16 atomic_add_acq_short
519#define atomic_add_rel_16 atomic_add_rel_short
520#define atomic_subtract_16 atomic_subtract_short
521#define atomic_subtract_acq_16 atomic_subtract_acq_short
522#define atomic_subtract_rel_16 atomic_subtract_rel_short
523#define atomic_load_acq_16 atomic_load_acq_short
524#define atomic_store_rel_16 atomic_store_rel_short
525
c8fe38ae 526/* Operations on 32-bit double words. */
fc3f9779
SS
527#define atomic_set_32 atomic_set_int
528#define atomic_set_acq_32 atomic_set_acq_int
529#define atomic_set_rel_32 atomic_set_rel_int
530#define atomic_clear_32 atomic_clear_int
531#define atomic_clear_acq_32 atomic_clear_acq_int
532#define atomic_clear_rel_32 atomic_clear_rel_int
533#define atomic_add_32 atomic_add_int
534#define atomic_add_acq_32 atomic_add_acq_int
535#define atomic_add_rel_32 atomic_add_rel_int
536#define atomic_subtract_32 atomic_subtract_int
537#define atomic_subtract_acq_32 atomic_subtract_acq_int
538#define atomic_subtract_rel_32 atomic_subtract_rel_int
539#define atomic_load_acq_32 atomic_load_acq_int
540#define atomic_store_rel_32 atomic_store_rel_int
c8fe38ae 541#define atomic_cmpset_32 atomic_cmpset_int
fc3f9779
SS
542#define atomic_cmpset_acq_32 atomic_cmpset_acq_int
543#define atomic_cmpset_rel_32 atomic_cmpset_rel_int
544#define atomic_readandclear_32 atomic_readandclear_int
c8fe38ae
MD
545#define atomic_fetchadd_32 atomic_fetchadd_int
546
547/* Operations on pointers. */
548#define atomic_set_ptr atomic_set_long
549#define atomic_set_acq_ptr atomic_set_acq_long
550#define atomic_set_rel_ptr atomic_set_rel_long
551#define atomic_clear_ptr atomic_clear_long
552#define atomic_clear_acq_ptr atomic_clear_acq_long
553#define atomic_clear_rel_ptr atomic_clear_rel_long
554#define atomic_add_ptr atomic_add_long
555#define atomic_add_acq_ptr atomic_add_acq_long
556#define atomic_add_rel_ptr atomic_add_rel_long
557#define atomic_subtract_ptr atomic_subtract_long
558#define atomic_subtract_acq_ptr atomic_subtract_acq_long
559#define atomic_subtract_rel_ptr atomic_subtract_rel_long
560#define atomic_load_acq_ptr atomic_load_acq_long
561#define atomic_store_rel_ptr atomic_store_rel_long
562#define atomic_cmpset_ptr atomic_cmpset_long
563#define atomic_cmpset_acq_ptr atomic_cmpset_acq_long
564#define atomic_cmpset_rel_ptr atomic_cmpset_rel_long
565#define atomic_readandclear_ptr atomic_readandclear_long
fc3f9779
SS
566
567#if defined(__GNUC__)
568
c8fe38ae
MD
569#if defined(KLD_MODULE)
570extern u_int atomic_readandclear_int(volatile u_int *addr);
571extern u_long atomic_readandclear_long(volatile u_long *addr);
572#else /* !KLD_MODULE */
fc3f9779
SS
573static __inline u_int
574atomic_readandclear_int(volatile u_int *addr)
575{
576 u_int result;
577
578 __asm __volatile (
579 " xorl %0,%0 ; "
580 " xchgl %1,%0 ; "
581 "# atomic_readandclear_int"
582 : "=&r" (result) /* 0 (result) */
583 : "m" (*addr)); /* 1 (addr) */
584
585 return (result);
586}
587
588static __inline u_long
589atomic_readandclear_long(volatile u_long *addr)
590{
591 u_long result;
592
593 __asm __volatile (
594 " xorq %0,%0 ; "
595 " xchgq %1,%0 ; "
596 "# atomic_readandclear_int"
597 : "=&r" (result) /* 0 (result) */
598 : "m" (*addr)); /* 1 (addr) */
599
600 return (result);
601}
c8fe38ae 602#endif /* KLD_MODULE */
fc3f9779
SS
603
604#else /* !defined(__GNUC__) */
605
606extern u_long atomic_readandclear_long(volatile u_long *);
607extern u_int atomic_readandclear_int(volatile u_int *);
608
609#endif /* defined(__GNUC__) */
610
fc3f9779 611#endif /* ! _CPU_ATOMIC_H_ */