2 * Copyright (c) 1998 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/i386/include/atomic.h,v 1.9.2.1 2000/07/07 00:38:47 obrien Exp $
28 #ifndef _CPU_ATOMIC_H_
29 #define _CPU_ATOMIC_H_
32 #include <sys/types.h>
36 * Various simple arithmetic on memory which is atomic in the presence
37 * of interrupts and multiple processors.
39 * atomic_set_char(P, V) (*(u_char*)(P) |= (V))
40 * atomic_clear_char(P, V) (*(u_char*)(P) &= ~(V))
41 * atomic_add_char(P, V) (*(u_char*)(P) += (V))
42 * atomic_subtract_char(P, V) (*(u_char*)(P) -= (V))
44 * atomic_set_short(P, V) (*(u_short*)(P) |= (V))
45 * atomic_clear_short(P, V) (*(u_short*)(P) &= ~(V))
46 * atomic_add_short(P, V) (*(u_short*)(P) += (V))
47 * atomic_subtract_short(P, V) (*(u_short*)(P) -= (V))
49 * atomic_set_int(P, V) (*(u_int*)(P) |= (V))
50 * atomic_clear_int(P, V) (*(u_int*)(P) &= ~(V))
51 * atomic_add_int(P, V) (*(u_int*)(P) += (V))
52 * atomic_subtract_int(P, V) (*(u_int*)(P) -= (V))
54 * atomic_set_long(P, V) (*(u_long*)(P) |= (V))
55 * atomic_clear_long(P, V) (*(u_long*)(P) &= ~(V))
56 * atomic_add_long(P, V) (*(u_long*)(P) += (V))
57 * atomic_subtract_long(P, V) (*(u_long*)(P) -= (V))
58 * atomic_readandclear_long(P) (return (*(u_long*)(P)); *(u_long*)(P) = 0;)
59 * atomic_readandclear_int(P) (return (*(u_int*)(P)); *(u_int*)(P) = 0;)
63 * The above functions are expanded inline in the statically-linked
64 * kernel. Lock prefixes are generated if an SMP kernel is being
65 * built, or if user code is using these functions.
67 * Kernel modules call real functions which are built into the kernel.
68 * This allows kernel modules to be portable between UP and SMP systems.
70 #if defined(KLD_MODULE)
71 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
72 extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
73 extern void atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v);
74 #else /* !KLD_MODULE */
75 #define MPLOCKED "lock ; "
78 * The assembly is volatilized to demark potential before-and-after side
79 * effects if an interrupt or SMP collision were to occur. The primary
80 * atomic instructions are MP safe, the nonlocked instructions are
81 * local-interrupt-safe (so we don't depend on C 'X |= Y' generating an
82 * atomic instruction).
84 * +m - memory is read and written (=m - memory is only written)
85 * iq - integer constant or %ax/%bx/%cx/%dx (ir = int constant or any reg)
86 * (Note: byte instructions only work on %ax,%bx,%cx, or %dx). iq
87 * is good enough for our needs so don't get fancy.
90 * NOTE: 64-bit immediate values are not supported for most x86-64
91 * instructions so we have to use "r".
94 /* egcs 1.1.2+ version */
95 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
96 static __inline void \
97 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
99 __asm __volatile(MPLOCKED OP \
103 static __inline void \
104 atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v)\
106 __asm __volatile(OP \
111 #endif /* KLD_MODULE */
113 /* egcs 1.1.2+ version */
114 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v)
115 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v)
116 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v)
117 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v)
119 ATOMIC_ASM(set, short, "orw %w1,%0", "iq", v)
120 ATOMIC_ASM(clear, short, "andw %w1,%0", "iq", ~v)
121 ATOMIC_ASM(add, short, "addw %w1,%0", "iq", v)
122 ATOMIC_ASM(subtract, short, "subw %w1,%0", "iq", v)
124 ATOMIC_ASM(set, int, "orl %1,%0", "iq", v)
125 ATOMIC_ASM(clear, int, "andl %1,%0", "iq", ~v)
126 ATOMIC_ASM(add, int, "addl %1,%0", "iq", v)
127 ATOMIC_ASM(subtract, int, "subl %1,%0", "iq", v)
129 ATOMIC_ASM(set, long, "orq %1,%0", "r", v)
130 ATOMIC_ASM(clear, long, "andq %1,%0", "r", ~v)
131 ATOMIC_ASM(add, long, "addq %1,%0", "r", v)
132 ATOMIC_ASM(subtract, long, "subq %1,%0", "r", v)
134 #if defined(KLD_MODULE)
136 u_long atomic_readandclear_long(volatile u_long *addr);
137 u_int atomic_readandclear_int(volatile u_int *addr);
139 #else /* !KLD_MODULE */
141 static __inline u_long
142 atomic_readandclear_long(volatile u_long *addr)
149 "# atomic_readandclear_long"
150 : "+r" (res), /* 0 */
157 static __inline u_int
158 atomic_readandclear_int(volatile u_int *addr)
165 "# atomic_readandclear_int"
166 : "+r" (res), /* 0 */
173 #endif /* KLD_MODULE */
176 * atomic_poll_acquire_int(P) Returns non-zero on success, 0 if the lock
177 * has already been acquired.
178 * atomic_poll_release_int(P)
180 * These support the NDIS driver and are also used for IPIQ interlocks
181 * between cpus. Both the acquisition and release must be
182 * cache-synchronizing instructions.
185 #if defined(KLD_MODULE)
187 extern int atomic_swap_int(volatile int *addr, int value);
188 extern void *atomic_swap_ptr(volatile void **addr, void *value);
189 extern int atomic_poll_acquire_int(volatile u_int *p);
190 extern void atomic_poll_release_int(volatile u_int *p);
195 atomic_swap_int(volatile int *addr, int value)
197 __asm __volatile("xchgl %0, %1" :
198 "=r" (value), "=m" (*addr) : "0" (value) : "memory");
202 static __inline void *
203 atomic_swap_ptr(volatile void **addr, void *value)
205 __asm __volatile("xchgq %0, %1" :
206 "=r" (value), "=m" (*addr) : "0" (value) : "memory");
212 atomic_poll_acquire_int(volatile u_int *p)
216 __asm __volatile(MPLOCKED "btsl $0,%0; setnc %%al; andl $255,%%eax" : "+m" (*p), "=a" (data));
222 atomic_poll_release_int(volatile u_int *p)
224 __asm __volatile(MPLOCKED "btrl $0,%0" : "+m" (*p));
230 * These functions operate on a 32 bit interrupt interlock which is defined
233 * bit 0-30 interrupt handler disabled bits (counter)
234 * bit 31 interrupt handler currently running bit (1 = run)
236 * atomic_intr_cond_test(P) Determine if the interlock is in an
237 * acquired state. Returns 0 if it not
238 * acquired, non-zero if it is.
240 * atomic_intr_cond_try(P)
241 * Increment the request counter and attempt to
242 * set bit 31 to acquire the interlock. If
243 * we are unable to set bit 31 the request
244 * counter is decremented and we return -1,
245 * otherwise we return 0.
247 * atomic_intr_cond_enter(P, func, arg)
248 * Increment the request counter and attempt to
249 * set bit 31 to acquire the interlock. If
250 * we are unable to set bit 31 func(arg) is
251 * called in a loop until we are able to set
254 * atomic_intr_cond_exit(P, func, arg)
255 * Decrement the request counter and clear bit
256 * 31. If the request counter is still non-zero
257 * call func(arg) once.
259 * atomic_intr_handler_disable(P)
260 * Set bit 30, indicating that the interrupt
261 * handler has been disabled. Must be called
262 * after the hardware is disabled.
264 * Returns bit 31 indicating whether a serialized
265 * accessor is active (typically the interrupt
266 * handler is running). 0 == not active,
267 * non-zero == active.
269 * atomic_intr_handler_enable(P)
270 * Clear bit 30, indicating that the interrupt
271 * handler has been enabled. Must be called
272 * before the hardware is actually enabled.
274 * atomic_intr_handler_is_enabled(P)
275 * Returns bit 30, 0 indicates that the handler
276 * is enabled, non-zero indicates that it is
277 * disabled. The request counter portion of
278 * the field is ignored.
281 #if defined(KLD_MODULE)
283 void atomic_intr_init(__atomic_intr_t *p);
284 int atomic_intr_handler_disable(__atomic_intr_t *p);
285 void atomic_intr_handler_enable(__atomic_intr_t *p);
286 int atomic_intr_handler_is_enabled(__atomic_intr_t *p);
287 int atomic_intr_cond_test(__atomic_intr_t *p);
288 int atomic_intr_cond_try(__atomic_intr_t *p);
289 void atomic_intr_cond_enter(__atomic_intr_t *p, void (*func)(void *), void *arg);
290 void atomic_intr_cond_exit(__atomic_intr_t *p, void (*func)(void *), void *arg);
296 atomic_intr_init(__atomic_intr_t *p)
303 atomic_intr_handler_disable(__atomic_intr_t *p)
307 __asm __volatile(MPLOCKED "orl $0x40000000,%1; movl %1,%%eax; " \
308 "andl $0x80000000,%%eax" \
309 : "=a"(data) , "+m"(*p));
315 atomic_intr_handler_enable(__atomic_intr_t *p)
317 __asm __volatile(MPLOCKED "andl $0xBFFFFFFF,%0" : "+m" (*p));
322 atomic_intr_handler_is_enabled(__atomic_intr_t *p)
326 __asm __volatile("movl %1,%%eax; andl $0x40000000,%%eax" \
327 : "=a"(data) : "m"(*p));
333 atomic_intr_cond_enter(__atomic_intr_t *p, void (*func)(void *), void *arg)
335 __asm __volatile(MPLOCKED "incl %0; " \
337 MPLOCKED "btsl $31,%0; jnc 2f; " \
338 "movq %2,%%rdi; call *%1; " \
342 : "r"(func), "m"(arg) \
343 : "ax", "cx", "dx", "rsi", "rdi", "r8", "r9", "r10", "r11");
344 /* YYY the function call may clobber even more registers? */
348 * Attempt to enter the interrupt condition variable. Returns zero on
349 * success, 1 on failure.
353 atomic_intr_cond_try(__atomic_intr_t *p)
357 __asm __volatile(MPLOCKED "incl %0; " \
359 "subl %%eax,%%eax; " \
360 MPLOCKED "btsl $31,%0; jnc 2f; " \
361 MPLOCKED "decl %0; " \
364 : "+m" (*p), "=&a"(ret)
372 atomic_intr_cond_test(__atomic_intr_t *p)
374 return((int)(*p & 0x80000000));
379 atomic_intr_cond_exit(__atomic_intr_t *p, void (*func)(void *), void *arg)
381 __asm __volatile(MPLOCKED "decl %0; " \
382 MPLOCKED "btrl $31,%0; " \
383 "testl $0x3FFFFFFF,%0; jz 1f; " \
384 "movq %2,%%rdi; call *%1; " \
387 : "r"(func), "m"(arg) \
388 : "ax", "cx", "dx", "rsi", "rdi", "r8", "r9", "r10", "r11");
389 /* YYY the function call may clobber even more registers? */
395 * Atomic compare and set
397 * if (*_dst == _old) *_dst = _new (all 32 bit words)
399 * Returns 0 on failure, non-zero on success. The inline is designed to
400 * allow the compiler to optimize the common case where the caller calls
401 * these functions from inside a conditional.
403 #if defined(KLD_MODULE)
405 extern int atomic_cmpset_int(volatile u_int *_dst, u_int _old, u_int _new);
406 extern long atomic_cmpset_long(volatile u_long *_dst, u_long _exp, u_long _src);
407 extern u_int atomic_fetchadd_int(volatile u_int *_p, u_int _v);
408 extern u_long atomic_fetchadd_long(volatile u_long *_p, u_long _v);
413 atomic_cmpset_int(volatile u_int *_dst, u_int _old, u_int _new)
417 __asm __volatile(MPLOCKED "cmpxchgl %2,%1; " \
418 : "+a" (res), "=m" (*_dst) \
419 : "r" (_new), "m" (*_dst) \
421 return (res == _old);
425 atomic_cmpset_long(volatile u_long *_dst, u_long _old, u_long _new)
429 __asm __volatile(MPLOCKED "cmpxchgq %2,%1; " \
430 : "+a" (res), "=m" (*_dst) \
431 : "r" (_new), "m" (*_dst) \
433 return (res == _old);
437 * Atomically add the value of v to the integer pointed to by p and return
438 * the previous value of *p.
440 static __inline u_int
441 atomic_fetchadd_int(volatile u_int *_p, u_int _v)
443 __asm __volatile(MPLOCKED "xaddl %0,%1; " \
444 : "+r" (_v), "=m" (*_p) \
450 static __inline u_long
451 atomic_fetchadd_long(volatile u_long *_p, u_long _v)
453 __asm __volatile(MPLOCKED "xaddq %0,%1; " \
454 : "+r" (_v), "=m" (*_p) \
460 #endif /* KLD_MODULE */
462 #if defined(KLD_MODULE)
464 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
465 extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \
466 extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v);
468 #else /* !KLD_MODULE */
470 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
471 static __inline u_##TYPE \
472 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
476 __asm __volatile(MPLOCKED LOP \
477 : "=a" (res), /* 0 */ \
486 * The XCHG instruction asserts LOCK automagically. \
488 static __inline void \
489 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
491 __asm __volatile(SOP \
492 : "=m" (*p), /* 0 */ \
494 : "m" (*p)); /* 2 */ \
498 #endif /* !KLD_MODULE */
500 ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0");
501 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
502 ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0");
503 ATOMIC_STORE_LOAD(long, "cmpxchgq %0,%1", "xchgq %1,%0");
506 #undef ATOMIC_STORE_LOAD
508 /* Acquire and release variants are identical to the normal ones. */
509 #define atomic_set_acq_char atomic_set_char
510 #define atomic_set_rel_char atomic_set_char
511 #define atomic_clear_acq_char atomic_clear_char
512 #define atomic_clear_rel_char atomic_clear_char
513 #define atomic_add_acq_char atomic_add_char
514 #define atomic_add_rel_char atomic_add_char
515 #define atomic_subtract_acq_char atomic_subtract_char
516 #define atomic_subtract_rel_char atomic_subtract_char
518 #define atomic_set_acq_short atomic_set_short
519 #define atomic_set_rel_short atomic_set_short
520 #define atomic_clear_acq_short atomic_clear_short
521 #define atomic_clear_rel_short atomic_clear_short
522 #define atomic_add_acq_short atomic_add_short
523 #define atomic_add_rel_short atomic_add_short
524 #define atomic_subtract_acq_short atomic_subtract_short
525 #define atomic_subtract_rel_short atomic_subtract_short
527 #define atomic_set_acq_int atomic_set_int
528 #define atomic_set_rel_int atomic_set_int
529 #define atomic_clear_acq_int atomic_clear_int
530 #define atomic_clear_rel_int atomic_clear_int
531 #define atomic_add_acq_int atomic_add_int
532 #define atomic_add_rel_int atomic_add_int
533 #define atomic_subtract_acq_int atomic_subtract_int
534 #define atomic_subtract_rel_int atomic_subtract_int
535 #define atomic_cmpset_acq_int atomic_cmpset_int
536 #define atomic_cmpset_rel_int atomic_cmpset_int
538 #define atomic_set_acq_long atomic_set_long
539 #define atomic_set_rel_long atomic_set_long
540 #define atomic_clear_acq_long atomic_clear_long
541 #define atomic_clear_rel_long atomic_clear_long
542 #define atomic_add_acq_long atomic_add_long
543 #define atomic_add_rel_long atomic_add_long
544 #define atomic_subtract_acq_long atomic_subtract_long
545 #define atomic_subtract_rel_long atomic_subtract_long
546 #define atomic_cmpset_acq_long atomic_cmpset_long
547 #define atomic_cmpset_rel_long atomic_cmpset_long
549 /* cpumask_t is 64-bits on x86-64 */
550 #define atomic_set_cpumask atomic_set_long
551 #define atomic_clear_cpumask atomic_clear_long
552 #define atomic_cmpset_cpumask atomic_cmpset_long
554 /* Operations on 8-bit bytes. */
555 #define atomic_set_8 atomic_set_char
556 #define atomic_set_acq_8 atomic_set_acq_char
557 #define atomic_set_rel_8 atomic_set_rel_char
558 #define atomic_clear_8 atomic_clear_char
559 #define atomic_clear_acq_8 atomic_clear_acq_char
560 #define atomic_clear_rel_8 atomic_clear_rel_char
561 #define atomic_add_8 atomic_add_char
562 #define atomic_add_acq_8 atomic_add_acq_char
563 #define atomic_add_rel_8 atomic_add_rel_char
564 #define atomic_subtract_8 atomic_subtract_char
565 #define atomic_subtract_acq_8 atomic_subtract_acq_char
566 #define atomic_subtract_rel_8 atomic_subtract_rel_char
567 #define atomic_load_acq_8 atomic_load_acq_char
568 #define atomic_store_rel_8 atomic_store_rel_char
570 /* Operations on 16-bit words. */
571 #define atomic_set_16 atomic_set_short
572 #define atomic_set_acq_16 atomic_set_acq_short
573 #define atomic_set_rel_16 atomic_set_rel_short
574 #define atomic_clear_16 atomic_clear_short
575 #define atomic_clear_acq_16 atomic_clear_acq_short
576 #define atomic_clear_rel_16 atomic_clear_rel_short
577 #define atomic_add_16 atomic_add_short
578 #define atomic_add_acq_16 atomic_add_acq_short
579 #define atomic_add_rel_16 atomic_add_rel_short
580 #define atomic_subtract_16 atomic_subtract_short
581 #define atomic_subtract_acq_16 atomic_subtract_acq_short
582 #define atomic_subtract_rel_16 atomic_subtract_rel_short
583 #define atomic_load_acq_16 atomic_load_acq_short
584 #define atomic_store_rel_16 atomic_store_rel_short
586 /* Operations on 32-bit double words. */
587 #define atomic_set_32 atomic_set_int
588 #define atomic_set_acq_32 atomic_set_acq_int
589 #define atomic_set_rel_32 atomic_set_rel_int
590 #define atomic_clear_32 atomic_clear_int
591 #define atomic_clear_acq_32 atomic_clear_acq_int
592 #define atomic_clear_rel_32 atomic_clear_rel_int
593 #define atomic_add_32 atomic_add_int
594 #define atomic_add_acq_32 atomic_add_acq_int
595 #define atomic_add_rel_32 atomic_add_rel_int
596 #define atomic_subtract_32 atomic_subtract_int
597 #define atomic_subtract_acq_32 atomic_subtract_acq_int
598 #define atomic_subtract_rel_32 atomic_subtract_rel_int
599 #define atomic_load_acq_32 atomic_load_acq_int
600 #define atomic_store_rel_32 atomic_store_rel_int
601 #define atomic_cmpset_32 atomic_cmpset_int
602 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int
603 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int
604 #define atomic_readandclear_32 atomic_readandclear_int
605 #define atomic_fetchadd_32 atomic_fetchadd_int
607 /* Operations on pointers. */
608 #define atomic_set_ptr(p, v) \
609 atomic_set_long((volatile u_long *)(p), (u_long)(v))
610 #define atomic_set_acq_ptr(p, v) \
611 atomic_set_acq_long((volatile u_long *)(p), (u_long)(v))
612 #define atomic_set_rel_ptr(p, v) \
613 atomic_set_rel_long((volatile u_long *)(p), (u_long)(v))
614 #define atomic_clear_ptr(p, v) \
615 atomic_clear_long((volatile u_long *)(p), (u_long)(v))
616 #define atomic_clear_acq_ptr(p, v) \
617 atomic_clear_acq_long((volatile u_long *)(p), (u_long)(v))
618 #define atomic_clear_rel_ptr(p, v) \
619 atomic_clear_rel_long((volatile u_long *)(p), (u_long)(v))
620 #define atomic_add_ptr(p, v) \
621 atomic_add_long((volatile u_long *)(p), (u_long)(v))
622 #define atomic_add_acq_ptr(p, v) \
623 atomic_add_acq_long((volatile u_long *)(p), (u_long)(v))
624 #define atomic_add_rel_ptr(p, v) \
625 atomic_add_rel_long((volatile u_long *)(p), (u_long)(v))
626 #define atomic_subtract_ptr(p, v) \
627 atomic_subtract_long((volatile u_long *)(p), (u_long)(v))
628 #define atomic_subtract_acq_ptr(p, v) \
629 atomic_subtract_acq_long((volatile u_long *)(p), (u_long)(v))
630 #define atomic_subtract_rel_ptr(p, v) \
631 atomic_subtract_rel_long((volatile u_long *)(p), (u_long)(v))
632 #define atomic_load_acq_ptr(p) \
633 atomic_load_acq_long((volatile u_long *)(p))
634 #define atomic_store_rel_ptr(p, v) \
635 atomic_store_rel_long((volatile u_long *)(p), (v))
636 #define atomic_cmpset_ptr(dst, old, new) \
637 atomic_cmpset_long((volatile u_long *)(dst), (u_long)(old), \
639 #define atomic_cmpset_acq_ptr(dst, old, new) \
640 atomic_cmpset_acq_long((volatile u_long *)(dst), (u_long)(old), \
642 #define atomic_cmpset_rel_ptr(dst, old, new) \
643 atomic_cmpset_rel_long((volatile u_long *)(dst), (u_long)(old), \
645 #define atomic_readandclear_ptr(p) \
646 atomic_readandclear_long((volatile u_long *)(p))
648 #endif /* ! _CPU_ATOMIC_H_ */