Merge from vendor branch GCC:
[dragonfly.git] / sys / cpu / amd64 / include / atomic.h
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/amd64/include/atomic.h,v 1.32 2003/11/21 03:02:00 peter Exp $
27  * $DragonFly: src/sys/cpu/amd64/include/atomic.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
28  */
29 #ifndef _CPU_ATOMIC_H_
30 #define _CPU_ATOMIC_H_
31
32 #ifndef _SYS_TYPES_H_
33 #include <sys/types.h>
34 #endif
35
36 /*
37  * Various simple arithmetic on memory which is atomic in the presence
38  * of interrupts and multiple processors.
39  *
40  * atomic_set_char(P, V)        (*(u_char*)(P) |= (V))
41  * atomic_clear_char(P, V)      (*(u_char*)(P) &= ~(V))
42  * atomic_add_char(P, V)        (*(u_char*)(P) += (V))
43  * atomic_subtract_char(P, V)   (*(u_char*)(P) -= (V))
44  *
45  * atomic_set_short(P, V)       (*(u_short*)(P) |= (V))
46  * atomic_clear_short(P, V)     (*(u_short*)(P) &= ~(V))
47  * atomic_add_short(P, V)       (*(u_short*)(P) += (V))
48  * atomic_subtract_short(P, V)  (*(u_short*)(P) -= (V))
49  *
50  * atomic_set_int(P, V)         (*(u_int*)(P) |= (V))
51  * atomic_clear_int(P, V)       (*(u_int*)(P) &= ~(V))
52  * atomic_add_int(P, V)         (*(u_int*)(P) += (V))
53  * atomic_subtract_int(P, V)    (*(u_int*)(P) -= (V))
54  * atomic_readandclear_int(P)   (return  *(u_int*)P; *(u_int*)P = 0;)
55  *
56  * atomic_set_long(P, V)        (*(u_long*)(P) |= (V))
57  * atomic_clear_long(P, V)      (*(u_long*)(P) &= ~(V))
58  * atomic_add_long(P, V)        (*(u_long*)(P) += (V))
59  * atomic_subtract_long(P, V)   (*(u_long*)(P) -= (V))
60  * atomic_readandclear_long(P)  (return  *(u_long*)P; *(u_long*)P = 0;)
61  */
62
63 /*
64  * The above functions are expanded inline in the statically-linked
65  * kernel.  Lock prefixes are generated if an SMP kernel is being
66  * built.
67  *
68  * Kernel modules call real functions which are built into the kernel.
69  * This allows kernel modules to be portable between UP and SMP systems.
70  */
71 #if defined(KLD_MODULE)
72 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)                     \
73 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);  \
74 void atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v);
75
76 int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
77 int atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src);
78
79 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)                       \
80 u_##TYPE        atomic_load_acq_##TYPE(volatile u_##TYPE *p);   \
81 void            atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
82
83 #else /* !KLD_MODULE */
84
85 #ifdef __GNUC__
86
87 /*
88  * For userland, assume the SMP case and use lock prefixes so that
89  * the binaries will run on both types of systems.
90  */
91 #if defined(SMP) || !defined(_KERNEL)
92 #define MPLOCKED        "lock ; "
93 #else
94 #define MPLOCKED
95 #endif
96
97 /*
98  * The assembly is volatilized to demark potential before-and-after side
99  * effects if an interrupt or SMP collision were to occur.
100  */
101 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)             \
102 static __inline void                                    \
103 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
104 {                                                       \
105         __asm __volatile(MPLOCKED OP                    \
106                          : "+m" (*p)                    \
107                          : CONS (V));                   \
108 }                                                       \
109 static __inline void                                    \
110 atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v)\
111 {                                                       \
112         __asm __volatile(OP                             \
113                          : "+m" (*p)                    \
114                          : CONS (V));                   \
115 }
116
117 #else /* !__GNUC__ */
118
119 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)                             \
120 extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);   \
121 extern void atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v);
122
123 #endif /* __GNUC__ */
124
125 /*
126  * These functions operate on a 32 bit interrupt interlock which is defined
127  * as follows:
128  *
129  *      bit 0-30        interrupt handler disabled bits (counter)
130  *      bit 31          interrupt handler currently running bit (1 = run)
131  *
132  * atomic_intr_cond_test(P)     Determine if the interlock is in an
133  *                              acquired state.  Returns 0 if it not
134  *                              acquired, non-zero if it is.
135  *
136  * atomic_intr_cond_try(P)
137  *                              Increment the request counter and attempt to
138  *                              set bit 31 to acquire the interlock.  If
139  *                              we are unable to set bit 31 the request
140  *                              counter is decremented and we return -1,
141  *                              otherwise we return 0.
142  *
143  * atomic_intr_cond_enter(P, func, arg)
144  *                              Increment the request counter and attempt to
145  *                              set bit 31 to acquire the interlock.  If
146  *                              we are unable to set bit 31 func(arg) is
147  *                              called in a loop until we are able to set
148  *                              bit 31.
149  *
150  * atomic_intr_cond_exit(P, func, arg)
151  *                              Decrement the request counter and clear bit
152  *                              31.  If the request counter is still non-zero
153  *                              call func(arg) once.
154  *
155  * atomic_intr_handler_disable(P)
156  *                              Set bit 30, indicating that the interrupt
157  *                              handler has been disabled.  Must be called
158  *                              after the hardware is disabled.
159  *
160  *                              Returns bit 31 indicating whether a serialized
161  *                              accessor is active (typically the interrupt
162  *                              handler is running).  0 == not active,
163  *                              non-zero == active.
164  *
165  * atomic_intr_handler_enable(P)
166  *                              Clear bit 30, indicating that the interrupt
167  *                              handler has been enabled.  Must be called
168  *                              before the hardware is actually enabled.
169  *
170  * atomic_intr_handler_is_enabled(P)
171  *                              Returns bit 30, 0 indicates that the handler
172  *                              is enabled, non-zero indicates that it is
173  *                              disabled.  The request counter portion of
174  *                              the field is ignored.
175  */
176
177 #ifndef __ATOMIC_INTR_T
178 #define __ATOMIC_INTR_T
179 typedef volatile int atomic_intr_t;
180 #endif
181
182 #if defined(KLD_MODULE)
183
184 void atomic_intr_init(atomic_intr_t *p);
185 int atomic_intr_handler_disable(atomic_intr_t *p);
186 void atomic_intr_handler_enable(atomic_intr_t *p);
187 int atomic_intr_handler_is_enabled(atomic_intr_t *p);
188 int atomic_intr_cond_test(atomic_intr_t *p);
189 int atomic_intr_cond_try(atomic_intr_t *p);
190 void atomic_intr_cond_enter(atomic_intr_t *p, void (*func)(void *), void *arg);
191 void atomic_intr_cond_exit(atomic_intr_t *p, void (*func)(void *), void *arg);
192
193 #else /* !KLD_MODULE */
194
195 static __inline
196 void
197 atomic_intr_init(atomic_intr_t *p)
198 {
199         *p = 0;
200 }
201
202 static __inline
203 int
204 atomic_intr_handler_disable(atomic_intr_t *p)
205 {
206         int data;
207
208         __asm __volatile(MPLOCKED "orl $0x40000000,%1; movl %1,%%eax; " \
209                                   "andl $0x80000000,%%eax" \
210                                   : "=a"(data) , "+m"(*p));
211         return(data);
212 }
213
214 static __inline
215 void
216 atomic_intr_handler_enable(atomic_intr_t *p)
217 {
218         __asm __volatile(MPLOCKED "andl $0xBFFFFFFF,%0" : "+m" (*p));
219 }
220
221 static __inline
222 int
223 atomic_intr_handler_is_enabled(atomic_intr_t *p)
224 {
225         int data;
226
227         __asm __volatile("movl %1,%%eax; andl $0x40000000,%%eax" \
228                          : "=a"(data) : "m"(*p));
229         return(data);
230 }
231
232 static __inline
233 void
234 atomic_intr_cond_enter(atomic_intr_t *p, void (*func)(void *), void *arg)
235 {
236         __asm __volatile(MPLOCKED "incl %0; " \
237                          "1: ;" \
238                          MPLOCKED "btsl $31,%0; jnc 2f; " \
239                          "movq %2,%%rdi; call *%1; " \
240                          "jmp 1b; " \
241                          "2: ;" \
242                          : "+m" (*p) \
243                          : "r"(func), "m"(arg) \
244                          : "ax", "cx", "dx", "di");     /* XXX clobbers more regs */
245 }
246
247 /*
248  * Attempt to enter the interrupt condition variable.  Returns zero on
249  * success, 1 on failure.
250  */
251 static __inline
252 int
253 atomic_intr_cond_try(atomic_intr_t *p)
254 {
255         int ret;
256
257         __asm __volatile(MPLOCKED "incl %0; " \
258                          "1: ;" \
259                          "subl %%eax,%%eax; " \
260                          MPLOCKED "btsl $31,%0; jnc 2f; " \
261                          MPLOCKED "decl %0; " \
262                          "movl $1,%%eax;" \
263                          "2: ;" \
264                          : "+m" (*p), "=a"(ret) \
265                          : : "cx", "dx");
266         return (ret);
267 }
268
269
270 static __inline
271 int
272 atomic_intr_cond_test(atomic_intr_t *p)
273 {
274         return((int)(*p & 0x80000000));
275 }
276
277 static __inline
278 void
279 atomic_intr_cond_exit(atomic_intr_t *p, void (*func)(void *), void *arg)
280 {
281         __asm __volatile(MPLOCKED "decl %0; " \
282                         MPLOCKED "btrl $31,%0; " \
283                         "testl $0x3FFFFFFF,%0; jz 1f; " \
284                          "movq %2,%%rdi; call *%1; " \
285                          "1: ;" \
286                          : "+m" (*p) \
287                          : "r"(func), "m"(arg) \
288                          : "ax", "cx", "dx", "di");     /* XXX clobbers more regs */
289 }
290
291 #endif
292
293 /*
294  * Atomic compare and set, used by the mutex functions
295  *
296  * if (*dst == exp) *dst = src (all 32 bit words)
297  *
298  * Returns 0 on failure, non-zero on success
299  */
300
301 #if defined(__GNUC__)
302
303 static __inline int
304 atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
305 {
306         int res = exp;
307
308         __asm __volatile (
309                 MPLOCKED
310         "       cmpxchgl %1,%2 ;        "
311         "       setz    %%al ;          "
312         "       movzbl  %%al,%0 ;       "
313         "1:                             "
314         "# atomic_cmpset_int"
315         : "+a" (res)                    /* 0 (result) */
316         : "r" (src),                    /* 1 */
317           "m" (*(dst))                  /* 2 */
318         : "memory");                             
319
320         return (res);
321 }
322
323 static __inline int
324 atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src)
325 {
326         long res = exp;
327
328         __asm __volatile (
329                 MPLOCKED
330         "       cmpxchgq %1,%2 ;        "
331         "       setz    %%al ;          "
332         "       movzbq  %%al,%0 ;       "
333         "1:                             "
334         "# atomic_cmpset_long"
335         : "+a" (res)                    /* 0 (result) */
336         : "r" (src),                    /* 1 */
337           "m" (*(dst))                  /* 2 */
338         : "memory");                             
339
340         return (res);
341 }
342 #endif /* defined(__GNUC__) */
343
344 #if defined(__GNUC__)
345
346 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)               \
347 static __inline u_##TYPE                                \
348 atomic_load_acq_##TYPE(volatile u_##TYPE *p)            \
349 {                                                       \
350         u_##TYPE res;                                   \
351                                                         \
352         __asm __volatile(MPLOCKED LOP                   \
353         : "=a" (res),                   /* 0 (result) */\
354           "+m" (*p)                     /* 1 */         \
355         : : "memory");                                  \
356                                                         \
357         return (res);                                   \
358 }                                                       \
359                                                         \
360 /*                                                      \
361  * The XCHG instruction asserts LOCK automagically.     \
362  */                                                     \
363 static __inline void                                    \
364 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
365 {                                                       \
366         __asm __volatile(SOP                            \
367         : "+m" (*p),                    /* 0 */         \
368           "+r" (v)                      /* 1 */         \
369         : : "memory");                                  \
370 }                                                       \
371 struct __hack
372
373 #else /* !defined(__GNUC__) */
374
375 extern int atomic_cmpset_int(volatile u_int *, u_int, u_int);
376 extern int atomic_cmpset_long(volatile u_long *, u_long, u_long);
377
378 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)                               \
379 extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p);           \
380 extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
381
382 #endif /* defined(__GNUC__) */
383
384 #endif /* !KLD_MODULE */
385
386 ATOMIC_ASM(set,      char,  "orb %b1,%0",  "iq",  v);
387 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
388 ATOMIC_ASM(add,      char,  "addb %b1,%0", "iq",  v);
389 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
390
391 ATOMIC_ASM(set,      short, "orw %w1,%0",  "ir",  v);
392 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
393 ATOMIC_ASM(add,      short, "addw %w1,%0", "ir",  v);
394 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
395
396 ATOMIC_ASM(set,      int,   "orl %1,%0",   "ir",  v);
397 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
398 ATOMIC_ASM(add,      int,   "addl %1,%0",  "ir",  v);
399 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
400
401 ATOMIC_ASM(set,      long,  "orq %1,%0",   "ir",  v);
402 ATOMIC_ASM(clear,    long,  "andq %1,%0",  "ir", ~v);
403 ATOMIC_ASM(add,      long,  "addq %1,%0",  "ir",  v);
404 ATOMIC_ASM(subtract, long,  "subq %1,%0",  "ir",  v);
405
406 ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0");
407 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
408 ATOMIC_STORE_LOAD(int,  "cmpxchgl %0,%1",  "xchgl %1,%0");
409 ATOMIC_STORE_LOAD(long, "cmpxchgq %0,%1",  "xchgq %1,%0");
410
411 #define atomic_cmpset_32        atomic_cmpset_int
412
413 #if 0
414
415 #undef ATOMIC_ASM
416 #undef ATOMIC_STORE_LOAD
417
418 #define atomic_set_acq_char             atomic_set_char
419 #define atomic_set_rel_char             atomic_set_char
420 #define atomic_clear_acq_char           atomic_clear_char
421 #define atomic_clear_rel_char           atomic_clear_char
422 #define atomic_add_acq_char             atomic_add_char
423 #define atomic_add_rel_char             atomic_add_char
424 #define atomic_subtract_acq_char        atomic_subtract_char
425 #define atomic_subtract_rel_char        atomic_subtract_char
426
427 #define atomic_set_acq_short            atomic_set_short
428 #define atomic_set_rel_short            atomic_set_short
429 #define atomic_clear_acq_short          atomic_clear_short
430 #define atomic_clear_rel_short          atomic_clear_short
431 #define atomic_add_acq_short            atomic_add_short
432 #define atomic_add_rel_short            atomic_add_short
433 #define atomic_subtract_acq_short       atomic_subtract_short
434 #define atomic_subtract_rel_short       atomic_subtract_short
435
436 #define atomic_set_acq_int              atomic_set_int
437 #define atomic_set_rel_int              atomic_set_int
438 #define atomic_clear_acq_int            atomic_clear_int
439 #define atomic_clear_rel_int            atomic_clear_int
440 #define atomic_add_acq_int              atomic_add_int
441 #define atomic_add_rel_int              atomic_add_int
442 #define atomic_subtract_acq_int         atomic_subtract_int
443 #define atomic_subtract_rel_int         atomic_subtract_int
444 #define atomic_cmpset_acq_int           atomic_cmpset_int
445 #define atomic_cmpset_rel_int           atomic_cmpset_int
446
447 #define atomic_set_acq_long             atomic_set_long
448 #define atomic_set_rel_long             atomic_set_long
449 #define atomic_clear_acq_long           atomic_clear_long
450 #define atomic_clear_rel_long           atomic_clear_long
451 #define atomic_add_acq_long             atomic_add_long
452 #define atomic_add_rel_long             atomic_add_long
453 #define atomic_subtract_acq_long        atomic_subtract_long
454 #define atomic_subtract_rel_long        atomic_subtract_long
455
456 #define atomic_cmpset_acq_ptr           atomic_cmpset_ptr
457 #define atomic_cmpset_rel_ptr           atomic_cmpset_ptr
458
459 #define atomic_set_8            atomic_set_char
460 #define atomic_set_acq_8        atomic_set_acq_char
461 #define atomic_set_rel_8        atomic_set_rel_char
462 #define atomic_clear_8          atomic_clear_char
463 #define atomic_clear_acq_8      atomic_clear_acq_char
464 #define atomic_clear_rel_8      atomic_clear_rel_char
465 #define atomic_add_8            atomic_add_char
466 #define atomic_add_acq_8        atomic_add_acq_char
467 #define atomic_add_rel_8        atomic_add_rel_char
468 #define atomic_subtract_8       atomic_subtract_char
469 #define atomic_subtract_acq_8   atomic_subtract_acq_char
470 #define atomic_subtract_rel_8   atomic_subtract_rel_char
471 #define atomic_load_acq_8       atomic_load_acq_char
472 #define atomic_store_rel_8      atomic_store_rel_char
473
474 #define atomic_set_16           atomic_set_short
475 #define atomic_set_acq_16       atomic_set_acq_short
476 #define atomic_set_rel_16       atomic_set_rel_short
477 #define atomic_clear_16         atomic_clear_short
478 #define atomic_clear_acq_16     atomic_clear_acq_short
479 #define atomic_clear_rel_16     atomic_clear_rel_short
480 #define atomic_add_16           atomic_add_short
481 #define atomic_add_acq_16       atomic_add_acq_short
482 #define atomic_add_rel_16       atomic_add_rel_short
483 #define atomic_subtract_16      atomic_subtract_short
484 #define atomic_subtract_acq_16  atomic_subtract_acq_short
485 #define atomic_subtract_rel_16  atomic_subtract_rel_short
486 #define atomic_load_acq_16      atomic_load_acq_short
487 #define atomic_store_rel_16     atomic_store_rel_short
488
489 #define atomic_set_32           atomic_set_int
490 #define atomic_set_acq_32       atomic_set_acq_int
491 #define atomic_set_rel_32       atomic_set_rel_int
492 #define atomic_clear_32         atomic_clear_int
493 #define atomic_clear_acq_32     atomic_clear_acq_int
494 #define atomic_clear_rel_32     atomic_clear_rel_int
495 #define atomic_add_32           atomic_add_int
496 #define atomic_add_acq_32       atomic_add_acq_int
497 #define atomic_add_rel_32       atomic_add_rel_int
498 #define atomic_subtract_32      atomic_subtract_int
499 #define atomic_subtract_acq_32  atomic_subtract_acq_int
500 #define atomic_subtract_rel_32  atomic_subtract_rel_int
501 #define atomic_load_acq_32      atomic_load_acq_int
502 #define atomic_store_rel_32     atomic_store_rel_int
503 #define atomic_cmpset_acq_32    atomic_cmpset_acq_int
504 #define atomic_cmpset_rel_32    atomic_cmpset_rel_int
505 #define atomic_readandclear_32  atomic_readandclear_int
506
507 #if !defined(WANT_FUNCTIONS)
508 static __inline int
509 atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
510 {
511
512         return (atomic_cmpset_long((volatile u_long *)dst,
513             (u_long)exp, (u_long)src));
514 }
515
516 static __inline void *
517 atomic_load_acq_ptr(volatile void *p)
518 {
519         /*
520          * The apparently-bogus cast to intptr_t in the following is to
521          * avoid a warning from "gcc -Wbad-function-cast".
522          */
523         return ((void *)(intptr_t)atomic_load_acq_long((volatile u_long *)p));
524 }
525
526 static __inline void
527 atomic_store_rel_ptr(volatile void *p, void *v)
528 {
529         atomic_store_rel_long((volatile u_long *)p, (u_long)v);
530 }
531
532 #define ATOMIC_PTR(NAME)                                \
533 static __inline void                                    \
534 atomic_##NAME##_ptr(volatile void *p, uintptr_t v)      \
535 {                                                       \
536         atomic_##NAME##_long((volatile u_long *)p, v);  \
537 }                                                       \
538                                                         \
539 static __inline void                                    \
540 atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v)  \
541 {                                                       \
542         atomic_##NAME##_acq_long((volatile u_long *)p, v);\
543 }                                                       \
544                                                         \
545 static __inline void                                    \
546 atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v)  \
547 {                                                       \
548         atomic_##NAME##_rel_long((volatile u_long *)p, v);\
549 }
550
551 ATOMIC_PTR(set)
552 ATOMIC_PTR(clear)
553 ATOMIC_PTR(add)
554 ATOMIC_PTR(subtract)
555
556 #undef ATOMIC_PTR
557
558 #if defined(__GNUC__)
559
560 static __inline u_int
561 atomic_readandclear_int(volatile u_int *addr)
562 {
563         u_int result;
564
565         __asm __volatile (
566         "       xorl    %0,%0 ;         "
567         "       xchgl   %1,%0 ;         "
568         "# atomic_readandclear_int"
569         : "=&r" (result)                /* 0 (result) */
570         : "m" (*addr));                 /* 1 (addr) */
571
572         return (result);
573 }
574
575 static __inline u_long
576 atomic_readandclear_long(volatile u_long *addr)
577 {
578         u_long result;
579
580         __asm __volatile (
581         "       xorq    %0,%0 ;         "
582         "       xchgq   %1,%0 ;         "
583         "# atomic_readandclear_int"
584         : "=&r" (result)                /* 0 (result) */
585         : "m" (*addr));                 /* 1 (addr) */
586
587         return (result);
588 }
589
590 #else /* !defined(__GNUC__) */
591
592 extern u_long   atomic_readandclear_long(volatile u_long *);
593 extern u_int    atomic_readandclear_int(volatile u_int *);
594
595 #endif /* defined(__GNUC__) */
596
597 #endif  /* !defined(WANT_FUNCTIONS) */
598 #endif /* 0 */
599
600 #endif /* ! _CPU_ATOMIC_H_ */