1/* Internal macros for atomic operations for GNU C Library.
2 Copyright (C) 2002-2016 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
19
20#ifndef _ATOMIC_H
21#define _ATOMIC_H 1
22
23/* This header defines three types of macros:
24
25 - atomic arithmetic and logic operation on memory. They all
26 have the prefix "atomic_".
27
28 - conditionally atomic operations of the same kinds. These
29 always behave identical but can be faster when atomicity
30 is not really needed since only one thread has access to
31 the memory location. In that case the code is slower in
32 the multi-thread case. The interfaces have the prefix
33 "catomic_".
34
35 - support functions like barriers. They also have the prefix
36 "atomic_".
37
38 Architectures must provide a few lowlevel macros (the compare
39 and exchange definitions). All others are optional. They
40 should only be provided if the architecture has specific
41 support for the operation.
42
43 As <atomic.h> macros are usually heavily nested and often use local
44 variables to make sure side-effects are evaluated properly, use for
45 macro local variables a per-macro unique prefix. This file uses
46 __atgN_ prefix where N is different in each macro. */
47
48#include <stdlib.h>
49
50#include <atomic-machine.h>
51
52/* Wrapper macros to call pre_NN_post (mem, ...) where NN is the
53 bit width of *MEM. The calling macro puts parens around MEM
54 and following args. */
55#define __atomic_val_bysize(pre, post, mem, ...) \
56 ({ \
57 __typeof (*mem) __atg1_result; \
58 if (sizeof (*mem) == 1) \
59 __atg1_result = pre##_8_##post (mem, __VA_ARGS__); \
60 else if (sizeof (*mem) == 2) \
61 __atg1_result = pre##_16_##post (mem, __VA_ARGS__); \
62 else if (sizeof (*mem) == 4) \
63 __atg1_result = pre##_32_##post (mem, __VA_ARGS__); \
64 else if (sizeof (*mem) == 8) \
65 __atg1_result = pre##_64_##post (mem, __VA_ARGS__); \
66 else \
67 abort (); \
68 __atg1_result; \
69 })
70#define __atomic_bool_bysize(pre, post, mem, ...) \
71 ({ \
72 int __atg2_result; \
73 if (sizeof (*mem) == 1) \
74 __atg2_result = pre##_8_##post (mem, __VA_ARGS__); \
75 else if (sizeof (*mem) == 2) \
76 __atg2_result = pre##_16_##post (mem, __VA_ARGS__); \
77 else if (sizeof (*mem) == 4) \
78 __atg2_result = pre##_32_##post (mem, __VA_ARGS__); \
79 else if (sizeof (*mem) == 8) \
80 __atg2_result = pre##_64_##post (mem, __VA_ARGS__); \
81 else \
82 abort (); \
83 __atg2_result; \
84 })
85
86
87/* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
88 Return the old *MEM value. */
89#if !defined atomic_compare_and_exchange_val_acq \
90 && defined __arch_compare_and_exchange_val_32_acq
91# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
92 __atomic_val_bysize (__arch_compare_and_exchange_val,acq, \
93 mem, newval, oldval)
94#endif
95
96
97#ifndef catomic_compare_and_exchange_val_acq
98# ifdef __arch_c_compare_and_exchange_val_32_acq
99# define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
100 __atomic_val_bysize (__arch_c_compare_and_exchange_val,acq, \
101 mem, newval, oldval)
102# else
103# define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
104 atomic_compare_and_exchange_val_acq (mem, newval, oldval)
105# endif
106#endif
107
108
109#ifndef catomic_compare_and_exchange_val_rel
110# ifndef atomic_compare_and_exchange_val_rel
111# define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
112 catomic_compare_and_exchange_val_acq (mem, newval, oldval)
113# else
114# define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
115 atomic_compare_and_exchange_val_rel (mem, newval, oldval)
116# endif
117#endif
118
119
120#ifndef atomic_compare_and_exchange_val_rel
121# define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
122 atomic_compare_and_exchange_val_acq (mem, newval, oldval)
123#endif
124
125
126/* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
127 Return zero if *MEM was changed or non-zero if no exchange happened. */
128#ifndef atomic_compare_and_exchange_bool_acq
129# ifdef __arch_compare_and_exchange_bool_32_acq
130# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
131 __atomic_bool_bysize (__arch_compare_and_exchange_bool,acq, \
132 mem, newval, oldval)
133# else
134# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
135 ({ /* Cannot use __oldval here, because macros later in this file might \
136 call this macro with __oldval argument. */ \
137 __typeof (oldval) __atg3_old = (oldval); \
138 atomic_compare_and_exchange_val_acq (mem, newval, __atg3_old) \
139 != __atg3_old; \
140 })
141# endif
142#endif
143
144
145#ifndef catomic_compare_and_exchange_bool_acq
146# ifdef __arch_c_compare_and_exchange_bool_32_acq
147# define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
148 __atomic_bool_bysize (__arch_c_compare_and_exchange_bool,acq, \
149 mem, newval, oldval)
150# else
151# define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
152 ({ /* Cannot use __oldval here, because macros later in this file might \
153 call this macro with __oldval argument. */ \
154 __typeof (oldval) __atg4_old = (oldval); \
155 catomic_compare_and_exchange_val_acq (mem, newval, __atg4_old) \
156 != __atg4_old; \
157 })
158# endif
159#endif
160
161
162/* Store NEWVALUE in *MEM and return the old value. */
163#ifndef atomic_exchange_acq
164# define atomic_exchange_acq(mem, newvalue) \
165 ({ __typeof (*(mem)) __atg5_oldval; \
166 __typeof (mem) __atg5_memp = (mem); \
167 __typeof (*(mem)) __atg5_value = (newvalue); \
168 \
169 do \
170 __atg5_oldval = *__atg5_memp; \
171 while (__builtin_expect \
172 (atomic_compare_and_exchange_bool_acq (__atg5_memp, __atg5_value, \
173 __atg5_oldval), 0)); \
174 \
175 __atg5_oldval; })
176#endif
177
178#ifndef atomic_exchange_rel
179# define atomic_exchange_rel(mem, newvalue) atomic_exchange_acq (mem, newvalue)
180#endif
181
182
183/* Add VALUE to *MEM and return the old value of *MEM. */
184#ifndef atomic_exchange_and_add_acq
185# ifdef atomic_exchange_and_add
186# define atomic_exchange_and_add_acq(mem, value) \
187 atomic_exchange_and_add (mem, value)
188# else
189# define atomic_exchange_and_add_acq(mem, value) \
190 ({ __typeof (*(mem)) __atg6_oldval; \
191 __typeof (mem) __atg6_memp = (mem); \
192 __typeof (*(mem)) __atg6_value = (value); \
193 \
194 do \
195 __atg6_oldval = *__atg6_memp; \
196 while (__builtin_expect \
197 (atomic_compare_and_exchange_bool_acq (__atg6_memp, \
198 __atg6_oldval \
199 + __atg6_value, \
200 __atg6_oldval), 0)); \
201 \
202 __atg6_oldval; })
203# endif
204#endif
205
206#ifndef atomic_exchange_and_add_rel
207# define atomic_exchange_and_add_rel(mem, value) \
208 atomic_exchange_and_add_acq(mem, value)
209#endif
210
211#ifndef atomic_exchange_and_add
212# define atomic_exchange_and_add(mem, value) \
213 atomic_exchange_and_add_acq(mem, value)
214#endif
215
216#ifndef catomic_exchange_and_add
217# define catomic_exchange_and_add(mem, value) \
218 ({ __typeof (*(mem)) __atg7_oldv; \
219 __typeof (mem) __atg7_memp = (mem); \
220 __typeof (*(mem)) __atg7_value = (value); \
221 \
222 do \
223 __atg7_oldv = *__atg7_memp; \
224 while (__builtin_expect \
225 (catomic_compare_and_exchange_bool_acq (__atg7_memp, \
226 __atg7_oldv \
227 + __atg7_value, \
228 __atg7_oldv), 0)); \
229 \
230 __atg7_oldv; })
231#endif
232
233
234#ifndef atomic_max
235# define atomic_max(mem, value) \
236 do { \
237 __typeof (*(mem)) __atg8_oldval; \
238 __typeof (mem) __atg8_memp = (mem); \
239 __typeof (*(mem)) __atg8_value = (value); \
240 do { \
241 __atg8_oldval = *__atg8_memp; \
242 if (__atg8_oldval >= __atg8_value) \
243 break; \
244 } while (__builtin_expect \
245 (atomic_compare_and_exchange_bool_acq (__atg8_memp, __atg8_value,\
246 __atg8_oldval), 0)); \
247 } while (0)
248#endif
249
250
251#ifndef catomic_max
252# define catomic_max(mem, value) \
253 do { \
254 __typeof (*(mem)) __atg9_oldv; \
255 __typeof (mem) __atg9_memp = (mem); \
256 __typeof (*(mem)) __atg9_value = (value); \
257 do { \
258 __atg9_oldv = *__atg9_memp; \
259 if (__atg9_oldv >= __atg9_value) \
260 break; \
261 } while (__builtin_expect \
262 (catomic_compare_and_exchange_bool_acq (__atg9_memp, \
263 __atg9_value, \
264 __atg9_oldv), 0)); \
265 } while (0)
266#endif
267
268
269#ifndef atomic_min
270# define atomic_min(mem, value) \
271 do { \
272 __typeof (*(mem)) __atg10_oldval; \
273 __typeof (mem) __atg10_memp = (mem); \
274 __typeof (*(mem)) __atg10_value = (value); \
275 do { \
276 __atg10_oldval = *__atg10_memp; \
277 if (__atg10_oldval <= __atg10_value) \
278 break; \
279 } while (__builtin_expect \
280 (atomic_compare_and_exchange_bool_acq (__atg10_memp, \
281 __atg10_value, \
282 __atg10_oldval), 0)); \
283 } while (0)
284#endif
285
286
287#ifndef atomic_add
288# define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value))
289#endif
290
291
292#ifndef catomic_add
293# define catomic_add(mem, value) \
294 (void) catomic_exchange_and_add ((mem), (value))
295#endif
296
297
298#ifndef atomic_increment
299# define atomic_increment(mem) atomic_add ((mem), 1)
300#endif
301
302
303#ifndef catomic_increment
304# define catomic_increment(mem) catomic_add ((mem), 1)
305#endif
306
307
308#ifndef atomic_increment_val
309# define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1)
310#endif
311
312
313#ifndef catomic_increment_val
314# define catomic_increment_val(mem) (catomic_exchange_and_add ((mem), 1) + 1)
315#endif
316
317
318/* Add one to *MEM and return true iff it's now zero. */
319#ifndef atomic_increment_and_test
320# define atomic_increment_and_test(mem) \
321 (atomic_exchange_and_add ((mem), 1) + 1 == 0)
322#endif
323
324
325#ifndef atomic_decrement
326# define atomic_decrement(mem) atomic_add ((mem), -1)
327#endif
328
329
330#ifndef catomic_decrement
331# define catomic_decrement(mem) catomic_add ((mem), -1)
332#endif
333
334
335#ifndef atomic_decrement_val
336# define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1)
337#endif
338
339
340#ifndef catomic_decrement_val
341# define catomic_decrement_val(mem) (catomic_exchange_and_add ((mem), -1) - 1)
342#endif
343
344
345/* Subtract 1 from *MEM and return true iff it's now zero. */
346#ifndef atomic_decrement_and_test
347# define atomic_decrement_and_test(mem) \
348 (atomic_exchange_and_add ((mem), -1) == 1)
349#endif
350
351
352/* Decrement *MEM if it is > 0, and return the old value. */
353#ifndef atomic_decrement_if_positive
354# define atomic_decrement_if_positive(mem) \
355 ({ __typeof (*(mem)) __atg11_oldval; \
356 __typeof (mem) __atg11_memp = (mem); \
357 \
358 do \
359 { \
360 __atg11_oldval = *__atg11_memp; \
361 if (__glibc_unlikely (__atg11_oldval <= 0)) \
362 break; \
363 } \
364 while (__builtin_expect \
365 (atomic_compare_and_exchange_bool_acq (__atg11_memp, \
366 __atg11_oldval - 1, \
367 __atg11_oldval), 0)); \
368 __atg11_oldval; })
369#endif
370
371
372#ifndef atomic_add_negative
373# define atomic_add_negative(mem, value) \
374 ({ __typeof (value) __atg12_value = (value); \
375 atomic_exchange_and_add (mem, __atg12_value) < -__atg12_value; })
376#endif
377
378
379#ifndef atomic_add_zero
380# define atomic_add_zero(mem, value) \
381 ({ __typeof (value) __atg13_value = (value); \
382 atomic_exchange_and_add (mem, __atg13_value) == -__atg13_value; })
383#endif
384
385
386#ifndef atomic_bit_set
387# define atomic_bit_set(mem, bit) \
388 (void) atomic_bit_test_set(mem, bit)
389#endif
390
391
392#ifndef atomic_bit_test_set
393# define atomic_bit_test_set(mem, bit) \
394 ({ __typeof (*(mem)) __atg14_old; \
395 __typeof (mem) __atg14_memp = (mem); \
396 __typeof (*(mem)) __atg14_mask = ((__typeof (*(mem))) 1 << (bit)); \
397 \
398 do \
399 __atg14_old = (*__atg14_memp); \
400 while (__builtin_expect \
401 (atomic_compare_and_exchange_bool_acq (__atg14_memp, \
402 __atg14_old | __atg14_mask,\
403 __atg14_old), 0)); \
404 \
405 __atg14_old & __atg14_mask; })
406#endif
407
408/* Atomically *mem &= mask. */
409#ifndef atomic_and
410# define atomic_and(mem, mask) \
411 do { \
412 __typeof (*(mem)) __atg15_old; \
413 __typeof (mem) __atg15_memp = (mem); \
414 __typeof (*(mem)) __atg15_mask = (mask); \
415 \
416 do \
417 __atg15_old = (*__atg15_memp); \
418 while (__builtin_expect \
419 (atomic_compare_and_exchange_bool_acq (__atg15_memp, \
420 __atg15_old & __atg15_mask, \
421 __atg15_old), 0)); \
422 } while (0)
423#endif
424
425#ifndef catomic_and
426# define catomic_and(mem, mask) \
427 do { \
428 __typeof (*(mem)) __atg20_old; \
429 __typeof (mem) __atg20_memp = (mem); \
430 __typeof (*(mem)) __atg20_mask = (mask); \
431 \
432 do \
433 __atg20_old = (*__atg20_memp); \
434 while (__builtin_expect \
435 (catomic_compare_and_exchange_bool_acq (__atg20_memp, \
436 __atg20_old & __atg20_mask,\
437 __atg20_old), 0)); \
438 } while (0)
439#endif
440
441/* Atomically *mem &= mask and return the old value of *mem. */
442#ifndef atomic_and_val
443# define atomic_and_val(mem, mask) \
444 ({ __typeof (*(mem)) __atg16_old; \
445 __typeof (mem) __atg16_memp = (mem); \
446 __typeof (*(mem)) __atg16_mask = (mask); \
447 \
448 do \
449 __atg16_old = (*__atg16_memp); \
450 while (__builtin_expect \
451 (atomic_compare_and_exchange_bool_acq (__atg16_memp, \
452 __atg16_old & __atg16_mask,\
453 __atg16_old), 0)); \
454 \
455 __atg16_old; })
456#endif
457
458/* Atomically *mem |= mask and return the old value of *mem. */
459#ifndef atomic_or
460# define atomic_or(mem, mask) \
461 do { \
462 __typeof (*(mem)) __atg17_old; \
463 __typeof (mem) __atg17_memp = (mem); \
464 __typeof (*(mem)) __atg17_mask = (mask); \
465 \
466 do \
467 __atg17_old = (*__atg17_memp); \
468 while (__builtin_expect \
469 (atomic_compare_and_exchange_bool_acq (__atg17_memp, \
470 __atg17_old | __atg17_mask, \
471 __atg17_old), 0)); \
472 } while (0)
473#endif
474
475#ifndef catomic_or
476# define catomic_or(mem, mask) \
477 do { \
478 __typeof (*(mem)) __atg18_old; \
479 __typeof (mem) __atg18_memp = (mem); \
480 __typeof (*(mem)) __atg18_mask = (mask); \
481 \
482 do \
483 __atg18_old = (*__atg18_memp); \
484 while (__builtin_expect \
485 (catomic_compare_and_exchange_bool_acq (__atg18_memp, \
486 __atg18_old | __atg18_mask,\
487 __atg18_old), 0)); \
488 } while (0)
489#endif
490
491/* Atomically *mem |= mask and return the old value of *mem. */
492#ifndef atomic_or_val
493# define atomic_or_val(mem, mask) \
494 ({ __typeof (*(mem)) __atg19_old; \
495 __typeof (mem) __atg19_memp = (mem); \
496 __typeof (*(mem)) __atg19_mask = (mask); \
497 \
498 do \
499 __atg19_old = (*__atg19_memp); \
500 while (__builtin_expect \
501 (atomic_compare_and_exchange_bool_acq (__atg19_memp, \
502 __atg19_old | __atg19_mask,\
503 __atg19_old), 0)); \
504 \
505 __atg19_old; })
506#endif
507
508#ifndef atomic_full_barrier
509# define atomic_full_barrier() __asm ("" ::: "memory")
510#endif
511
512
513#ifndef atomic_read_barrier
514# define atomic_read_barrier() atomic_full_barrier ()
515#endif
516
517
518#ifndef atomic_write_barrier
519# define atomic_write_barrier() atomic_full_barrier ()
520#endif
521
522
523#ifndef atomic_forced_read
524# define atomic_forced_read(x) \
525 ({ __typeof (x) __x; __asm ("" : "=r" (__x) : "0" (x)); __x; })
526#endif
527
528/* This is equal to 1 iff the architecture supports 64b atomic operations. */
529#ifndef __HAVE_64B_ATOMICS
530#error Unable to determine if 64-bit atomics are present.
531#endif
532
533/* The following functions are a subset of the atomic operations provided by
534 C11. Usually, a function named atomic_OP_MO(args) is equivalent to C11's
535 atomic_OP_explicit(args, memory_order_MO); exceptions noted below. */
536
537/* Each arch can request to use compiler built-ins for C11 atomics. If it
538 does, all atomics will be based on these. */
539#if USE_ATOMIC_COMPILER_BUILTINS
540
541/* We require 32b atomic operations; some archs also support 64b atomic
542 operations. */
543void __atomic_link_error (void);
544# if __HAVE_64B_ATOMICS == 1
545# define __atomic_check_size(mem) \
546 if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8)) \
547 __atomic_link_error ();
548# else
549# define __atomic_check_size(mem) \
550 if (sizeof (*mem) != 4) \
551 __atomic_link_error ();
552# endif
553
554# define atomic_thread_fence_acquire() \
555 __atomic_thread_fence (__ATOMIC_ACQUIRE)
556# define atomic_thread_fence_release() \
557 __atomic_thread_fence (__ATOMIC_RELEASE)
558# define atomic_thread_fence_seq_cst() \
559 __atomic_thread_fence (__ATOMIC_SEQ_CST)
560
561# define atomic_load_relaxed(mem) \
562 ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_RELAXED); })
563# define atomic_load_acquire(mem) \
564 ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_ACQUIRE); })
565
566# define atomic_store_relaxed(mem, val) \
567 do { \
568 __atomic_check_size((mem)); \
569 __atomic_store_n ((mem), (val), __ATOMIC_RELAXED); \
570 } while (0)
571# define atomic_store_release(mem, val) \
572 do { \
573 __atomic_check_size((mem)); \
574 __atomic_store_n ((mem), (val), __ATOMIC_RELEASE); \
575 } while (0)
576
577/* On failure, this CAS has memory_order_relaxed semantics. */
578# define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
579 ({ __atomic_check_size((mem)); \
580 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
581 __ATOMIC_RELAXED, __ATOMIC_RELAXED); })
582# define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
583 ({ __atomic_check_size((mem)); \
584 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
585 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); })
586# define atomic_compare_exchange_weak_release(mem, expected, desired) \
587 ({ __atomic_check_size((mem)); \
588 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
589 __ATOMIC_RELEASE, __ATOMIC_RELAXED); })
590
591# define atomic_exchange_acquire(mem, desired) \
592 ({ __atomic_check_size((mem)); \
593 __atomic_exchange_n ((mem), (desired), __ATOMIC_ACQUIRE); })
594# define atomic_exchange_release(mem, desired) \
595 ({ __atomic_check_size((mem)); \
596 __atomic_exchange_n ((mem), (desired), __ATOMIC_RELEASE); })
597
598# define atomic_fetch_add_relaxed(mem, operand) \
599 ({ __atomic_check_size((mem)); \
600 __atomic_fetch_add ((mem), (operand), __ATOMIC_RELAXED); })
601# define atomic_fetch_add_acquire(mem, operand) \
602 ({ __atomic_check_size((mem)); \
603 __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQUIRE); })
604# define atomic_fetch_add_release(mem, operand) \
605 ({ __atomic_check_size((mem)); \
606 __atomic_fetch_add ((mem), (operand), __ATOMIC_RELEASE); })
607# define atomic_fetch_add_acq_rel(mem, operand) \
608 ({ __atomic_check_size((mem)); \
609 __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQ_REL); })
610
611# define atomic_fetch_and_acquire(mem, operand) \
612 ({ __atomic_check_size((mem)); \
613 __atomic_fetch_and ((mem), (operand), __ATOMIC_ACQUIRE); })
614
615# define atomic_fetch_or_relaxed(mem, operand) \
616 ({ __atomic_check_size((mem)); \
617 __atomic_fetch_or ((mem), (operand), __ATOMIC_RELAXED); })
618# define atomic_fetch_or_acquire(mem, operand) \
619 ({ __atomic_check_size((mem)); \
620 __atomic_fetch_or ((mem), (operand), __ATOMIC_ACQUIRE); })
621
622#else /* !USE_ATOMIC_COMPILER_BUILTINS */
623
624/* By default, we assume that read, write, and full barriers are equivalent
625 to acquire, release, and seq_cst barriers. Archs for which this does not
626 hold have to provide custom definitions of the fences. */
627# ifndef atomic_thread_fence_acquire
628# define atomic_thread_fence_acquire() atomic_read_barrier ()
629# endif
630# ifndef atomic_thread_fence_release
631# define atomic_thread_fence_release() atomic_write_barrier ()
632# endif
633# ifndef atomic_thread_fence_seq_cst
634# define atomic_thread_fence_seq_cst() atomic_full_barrier ()
635# endif
636
637# ifndef atomic_load_relaxed
638# define atomic_load_relaxed(mem) \
639 ({ __typeof (*(mem)) __atg100_val; \
640 __asm ("" : "=r" (__atg100_val) : "0" (*(mem))); \
641 __atg100_val; })
642# endif
643# ifndef atomic_load_acquire
644# define atomic_load_acquire(mem) \
645 ({ __typeof (*(mem)) __atg101_val = atomic_load_relaxed (mem); \
646 atomic_thread_fence_acquire (); \
647 __atg101_val; })
648# endif
649
650# ifndef atomic_store_relaxed
651/* XXX Use inline asm here? */
652# define atomic_store_relaxed(mem, val) do { *(mem) = (val); } while (0)
653# endif
654# ifndef atomic_store_release
655# define atomic_store_release(mem, val) \
656 do { \
657 atomic_thread_fence_release (); \
658 atomic_store_relaxed ((mem), (val)); \
659 } while (0)
660# endif
661
662/* On failure, this CAS has memory_order_relaxed semantics. */
663/* XXX This potentially has one branch more than necessary, but archs
664 currently do not define a CAS that returns both the previous value and
665 the success flag. */
666# ifndef atomic_compare_exchange_weak_acquire
667# define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
668 ({ typeof (*(expected)) __atg102_expected = *(expected); \
669 *(expected) = \
670 atomic_compare_and_exchange_val_acq ((mem), (desired), *(expected)); \
671 *(expected) == __atg102_expected; })
672# endif
673# ifndef atomic_compare_exchange_weak_relaxed
674/* XXX Fall back to CAS with acquire MO because archs do not define a weaker
675 CAS. */
676# define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
677 atomic_compare_exchange_weak_acquire ((mem), (expected), (desired))
678# endif
679# ifndef atomic_compare_exchange_weak_release
680# define atomic_compare_exchange_weak_release(mem, expected, desired) \
681 ({ typeof (*(expected)) __atg103_expected = *(expected); \
682 *(expected) = \
683 atomic_compare_and_exchange_val_rel ((mem), (desired), *(expected)); \
684 *(expected) == __atg103_expected; })
685# endif
686
687# ifndef atomic_exchange_acquire
688# define atomic_exchange_acquire(mem, val) \
689 atomic_exchange_acq ((mem), (val))
690# endif
691# ifndef atomic_exchange_release
692# define atomic_exchange_release(mem, val) \
693 atomic_exchange_rel ((mem), (val))
694# endif
695
696# ifndef atomic_fetch_add_acquire
697# define atomic_fetch_add_acquire(mem, operand) \
698 atomic_exchange_and_add_acq ((mem), (operand))
699# endif
700# ifndef atomic_fetch_add_relaxed
701/* XXX Fall back to acquire MO because the MO semantics of
702 atomic_exchange_and_add are not documented; the generic version falls back
703 to atomic_exchange_and_add_acq if atomic_exchange_and_add is not defined,
704 and vice versa. */
705# define atomic_fetch_add_relaxed(mem, operand) \
706 atomic_fetch_add_acquire ((mem), (operand))
707# endif
708# ifndef atomic_fetch_add_release
709# define atomic_fetch_add_release(mem, operand) \
710 atomic_exchange_and_add_rel ((mem), (operand))
711# endif
712# ifndef atomic_fetch_add_acq_rel
713# define atomic_fetch_add_acq_rel(mem, operand) \
714 ({ atomic_thread_fence_release (); \
715 atomic_exchange_and_add_acq ((mem), (operand)); })
716# endif
717
718/* XXX The default for atomic_and_val has acquire semantics, but this is not
719 documented. */
720# ifndef atomic_fetch_and_acquire
721# define atomic_fetch_and_acquire(mem, operand) \
722 atomic_and_val ((mem), (operand))
723# endif
724
725/* XXX The default for atomic_or_val has acquire semantics, but this is not
726 documented. */
727# ifndef atomic_fetch_or_acquire
728# define atomic_fetch_or_acquire(mem, operand) \
729 atomic_or_val ((mem), (operand))
730# endif
731/* XXX Fall back to acquire MO because archs do not define a weaker
732 atomic_or_val. */
733# ifndef atomic_fetch_or_relaxed
734# define atomic_fetch_or_relaxed(mem, operand) \
735 atomic_fetch_or_acquire ((mem), (operand))
736# endif
737
738#endif /* !USE_ATOMIC_COMPILER_BUILTINS */
739
740/* This operation does not affect synchronization semantics but can be used
741 in the body of a spin loop to potentially improve its efficiency. */
742#ifndef atomic_spin_nop
743# define atomic_spin_nop() do { /* nothing */ } while (0)
744#endif
745
746#endif /* atomic.h */
747