1/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#ifndef _X86_64_ATOMIC_MACHINE_H
20#define _X86_64_ATOMIC_MACHINE_H 1
21
22#include <stdint.h>
23#include <tls.h> /* For tcbhead_t. */
24#include <libc-pointer-arith.h> /* For cast_to_integer. */
25
26typedef int8_t atomic8_t;
27typedef uint8_t uatomic8_t;
28typedef int_fast8_t atomic_fast8_t;
29typedef uint_fast8_t uatomic_fast8_t;
30
31typedef int16_t atomic16_t;
32typedef uint16_t uatomic16_t;
33typedef int_fast16_t atomic_fast16_t;
34typedef uint_fast16_t uatomic_fast16_t;
35
36typedef int32_t atomic32_t;
37typedef uint32_t uatomic32_t;
38typedef int_fast32_t atomic_fast32_t;
39typedef uint_fast32_t uatomic_fast32_t;
40
41typedef int64_t atomic64_t;
42typedef uint64_t uatomic64_t;
43typedef int_fast64_t atomic_fast64_t;
44typedef uint_fast64_t uatomic_fast64_t;
45
46typedef intptr_t atomicptr_t;
47typedef uintptr_t uatomicptr_t;
48typedef intmax_t atomic_max_t;
49typedef uintmax_t uatomic_max_t;
50
51
52#ifndef LOCK_PREFIX
53# ifdef UP
54# define LOCK_PREFIX /* nothing */
55# else
56# define LOCK_PREFIX "lock;"
57# endif
58#endif
59
60#define __HAVE_64B_ATOMICS 1
61#define USE_ATOMIC_COMPILER_BUILTINS 1
62#define ATOMIC_EXCHANGE_USES_CAS 0
63
64#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
65 __sync_val_compare_and_swap (mem, oldval, newval)
66#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
67 (! __sync_bool_compare_and_swap (mem, oldval, newval))
68
69
70#define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
71 ({ __typeof (*mem) ret; \
72 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
73 "je 0f\n\t" \
74 "lock\n" \
75 "0:\tcmpxchgb %b2, %1" \
76 : "=a" (ret), "=m" (*mem) \
77 : "q" (newval), "m" (*mem), "0" (oldval), \
78 "i" (offsetof (tcbhead_t, multiple_threads))); \
79 ret; })
80
81#define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
82 ({ __typeof (*mem) ret; \
83 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
84 "je 0f\n\t" \
85 "lock\n" \
86 "0:\tcmpxchgw %w2, %1" \
87 : "=a" (ret), "=m" (*mem) \
88 : "q" (newval), "m" (*mem), "0" (oldval), \
89 "i" (offsetof (tcbhead_t, multiple_threads))); \
90 ret; })
91
92#define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
93 ({ __typeof (*mem) ret; \
94 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
95 "je 0f\n\t" \
96 "lock\n" \
97 "0:\tcmpxchgl %2, %1" \
98 : "=a" (ret), "=m" (*mem) \
99 : "q" (newval), "m" (*mem), "0" (oldval), \
100 "i" (offsetof (tcbhead_t, multiple_threads))); \
101 ret; })
102
103#define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
104 ({ __typeof (*mem) ret; \
105 __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
106 "je 0f\n\t" \
107 "lock\n" \
108 "0:\tcmpxchgq %q2, %1" \
109 : "=a" (ret), "=m" (*mem) \
110 : "q" ((atomic64_t) cast_to_integer (newval)), \
111 "m" (*mem), \
112 "0" ((atomic64_t) cast_to_integer (oldval)), \
113 "i" (offsetof (tcbhead_t, multiple_threads))); \
114 ret; })
115
116
117/* Note that we need no lock prefix. */
118#define atomic_exchange_acq(mem, newvalue) \
119 ({ __typeof (*mem) result; \
120 if (sizeof (*mem) == 1) \
121 __asm __volatile ("xchgb %b0, %1" \
122 : "=q" (result), "=m" (*mem) \
123 : "0" (newvalue), "m" (*mem)); \
124 else if (sizeof (*mem) == 2) \
125 __asm __volatile ("xchgw %w0, %1" \
126 : "=r" (result), "=m" (*mem) \
127 : "0" (newvalue), "m" (*mem)); \
128 else if (sizeof (*mem) == 4) \
129 __asm __volatile ("xchgl %0, %1" \
130 : "=r" (result), "=m" (*mem) \
131 : "0" (newvalue), "m" (*mem)); \
132 else \
133 __asm __volatile ("xchgq %q0, %1" \
134 : "=r" (result), "=m" (*mem) \
135 : "0" ((atomic64_t) cast_to_integer (newvalue)), \
136 "m" (*mem)); \
137 result; })
138
139
140#define __arch_exchange_and_add_body(lock, mem, value) \
141 ({ __typeof (*mem) result; \
142 if (sizeof (*mem) == 1) \
143 __asm __volatile (lock "xaddb %b0, %1" \
144 : "=q" (result), "=m" (*mem) \
145 : "0" (value), "m" (*mem), \
146 "i" (offsetof (tcbhead_t, multiple_threads))); \
147 else if (sizeof (*mem) == 2) \
148 __asm __volatile (lock "xaddw %w0, %1" \
149 : "=r" (result), "=m" (*mem) \
150 : "0" (value), "m" (*mem), \
151 "i" (offsetof (tcbhead_t, multiple_threads))); \
152 else if (sizeof (*mem) == 4) \
153 __asm __volatile (lock "xaddl %0, %1" \
154 : "=r" (result), "=m" (*mem) \
155 : "0" (value), "m" (*mem), \
156 "i" (offsetof (tcbhead_t, multiple_threads))); \
157 else \
158 __asm __volatile (lock "xaddq %q0, %1" \
159 : "=r" (result), "=m" (*mem) \
160 : "0" ((atomic64_t) cast_to_integer (value)), \
161 "m" (*mem), \
162 "i" (offsetof (tcbhead_t, multiple_threads))); \
163 result; })
164
165#define atomic_exchange_and_add(mem, value) \
166 __sync_fetch_and_add (mem, value)
167
168#define __arch_exchange_and_add_cprefix \
169 "cmpl $0, %%fs:%P4\n\tje 0f\n\tlock\n0:\t"
170
171#define catomic_exchange_and_add(mem, value) \
172 __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, mem, value)
173
174
175#define __arch_add_body(lock, pfx, mem, value) \
176 do { \
177 if (__builtin_constant_p (value) && (value) == 1) \
178 pfx##_increment (mem); \
179 else if (__builtin_constant_p (value) && (value) == -1) \
180 pfx##_decrement (mem); \
181 else if (sizeof (*mem) == 1) \
182 __asm __volatile (lock "addb %b1, %0" \
183 : "=m" (*mem) \
184 : "iq" (value), "m" (*mem), \
185 "i" (offsetof (tcbhead_t, multiple_threads))); \
186 else if (sizeof (*mem) == 2) \
187 __asm __volatile (lock "addw %w1, %0" \
188 : "=m" (*mem) \
189 : "ir" (value), "m" (*mem), \
190 "i" (offsetof (tcbhead_t, multiple_threads))); \
191 else if (sizeof (*mem) == 4) \
192 __asm __volatile (lock "addl %1, %0" \
193 : "=m" (*mem) \
194 : "ir" (value), "m" (*mem), \
195 "i" (offsetof (tcbhead_t, multiple_threads))); \
196 else \
197 __asm __volatile (lock "addq %q1, %0" \
198 : "=m" (*mem) \
199 : "ir" ((atomic64_t) cast_to_integer (value)), \
200 "m" (*mem), \
201 "i" (offsetof (tcbhead_t, multiple_threads))); \
202 } while (0)
203
204#define atomic_add(mem, value) \
205 __arch_add_body (LOCK_PREFIX, atomic, mem, value)
206
207#define __arch_add_cprefix \
208 "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
209
210#define catomic_add(mem, value) \
211 __arch_add_body (__arch_add_cprefix, catomic, mem, value)
212
213
214#define atomic_add_negative(mem, value) \
215 ({ unsigned char __result; \
216 if (sizeof (*mem) == 1) \
217 __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \
218 : "=m" (*mem), "=qm" (__result) \
219 : "iq" (value), "m" (*mem)); \
220 else if (sizeof (*mem) == 2) \
221 __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \
222 : "=m" (*mem), "=qm" (__result) \
223 : "ir" (value), "m" (*mem)); \
224 else if (sizeof (*mem) == 4) \
225 __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \
226 : "=m" (*mem), "=qm" (__result) \
227 : "ir" (value), "m" (*mem)); \
228 else \
229 __asm __volatile (LOCK_PREFIX "addq %q2, %0; sets %1" \
230 : "=m" (*mem), "=qm" (__result) \
231 : "ir" ((atomic64_t) cast_to_integer (value)), \
232 "m" (*mem)); \
233 __result; })
234
235
236#define atomic_add_zero(mem, value) \
237 ({ unsigned char __result; \
238 if (sizeof (*mem) == 1) \
239 __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \
240 : "=m" (*mem), "=qm" (__result) \
241 : "iq" (value), "m" (*mem)); \
242 else if (sizeof (*mem) == 2) \
243 __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \
244 : "=m" (*mem), "=qm" (__result) \
245 : "ir" (value), "m" (*mem)); \
246 else if (sizeof (*mem) == 4) \
247 __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \
248 : "=m" (*mem), "=qm" (__result) \
249 : "ir" (value), "m" (*mem)); \
250 else \
251 __asm __volatile (LOCK_PREFIX "addq %q2, %0; setz %1" \
252 : "=m" (*mem), "=qm" (__result) \
253 : "ir" ((atomic64_t) cast_to_integer (value)), \
254 "m" (*mem)); \
255 __result; })
256
257
258#define __arch_increment_body(lock, mem) \
259 do { \
260 if (sizeof (*mem) == 1) \
261 __asm __volatile (lock "incb %b0" \
262 : "=m" (*mem) \
263 : "m" (*mem), \
264 "i" (offsetof (tcbhead_t, multiple_threads))); \
265 else if (sizeof (*mem) == 2) \
266 __asm __volatile (lock "incw %w0" \
267 : "=m" (*mem) \
268 : "m" (*mem), \
269 "i" (offsetof (tcbhead_t, multiple_threads))); \
270 else if (sizeof (*mem) == 4) \
271 __asm __volatile (lock "incl %0" \
272 : "=m" (*mem) \
273 : "m" (*mem), \
274 "i" (offsetof (tcbhead_t, multiple_threads))); \
275 else \
276 __asm __volatile (lock "incq %q0" \
277 : "=m" (*mem) \
278 : "m" (*mem), \
279 "i" (offsetof (tcbhead_t, multiple_threads))); \
280 } while (0)
281
282#define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, mem)
283
284#define __arch_increment_cprefix \
285 "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
286
287#define catomic_increment(mem) \
288 __arch_increment_body (__arch_increment_cprefix, mem)
289
290
291#define atomic_increment_and_test(mem) \
292 ({ unsigned char __result; \
293 if (sizeof (*mem) == 1) \
294 __asm __volatile (LOCK_PREFIX "incb %b0; sete %1" \
295 : "=m" (*mem), "=qm" (__result) \
296 : "m" (*mem)); \
297 else if (sizeof (*mem) == 2) \
298 __asm __volatile (LOCK_PREFIX "incw %w0; sete %1" \
299 : "=m" (*mem), "=qm" (__result) \
300 : "m" (*mem)); \
301 else if (sizeof (*mem) == 4) \
302 __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \
303 : "=m" (*mem), "=qm" (__result) \
304 : "m" (*mem)); \
305 else \
306 __asm __volatile (LOCK_PREFIX "incq %q0; sete %1" \
307 : "=m" (*mem), "=qm" (__result) \
308 : "m" (*mem)); \
309 __result; })
310
311
312#define __arch_decrement_body(lock, mem) \
313 do { \
314 if (sizeof (*mem) == 1) \
315 __asm __volatile (lock "decb %b0" \
316 : "=m" (*mem) \
317 : "m" (*mem), \
318 "i" (offsetof (tcbhead_t, multiple_threads))); \
319 else if (sizeof (*mem) == 2) \
320 __asm __volatile (lock "decw %w0" \
321 : "=m" (*mem) \
322 : "m" (*mem), \
323 "i" (offsetof (tcbhead_t, multiple_threads))); \
324 else if (sizeof (*mem) == 4) \
325 __asm __volatile (lock "decl %0" \
326 : "=m" (*mem) \
327 : "m" (*mem), \
328 "i" (offsetof (tcbhead_t, multiple_threads))); \
329 else \
330 __asm __volatile (lock "decq %q0" \
331 : "=m" (*mem) \
332 : "m" (*mem), \
333 "i" (offsetof (tcbhead_t, multiple_threads))); \
334 } while (0)
335
336#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, mem)
337
338#define __arch_decrement_cprefix \
339 "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
340
341#define catomic_decrement(mem) \
342 __arch_decrement_body (__arch_decrement_cprefix, mem)
343
344
345#define atomic_decrement_and_test(mem) \
346 ({ unsigned char __result; \
347 if (sizeof (*mem) == 1) \
348 __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \
349 : "=m" (*mem), "=qm" (__result) \
350 : "m" (*mem)); \
351 else if (sizeof (*mem) == 2) \
352 __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \
353 : "=m" (*mem), "=qm" (__result) \
354 : "m" (*mem)); \
355 else if (sizeof (*mem) == 4) \
356 __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \
357 : "=m" (*mem), "=qm" (__result) \
358 : "m" (*mem)); \
359 else \
360 __asm __volatile (LOCK_PREFIX "decq %q0; sete %1" \
361 : "=m" (*mem), "=qm" (__result) \
362 : "m" (*mem)); \
363 __result; })
364
365
366#define atomic_bit_set(mem, bit) \
367 do { \
368 if (sizeof (*mem) == 1) \
369 __asm __volatile (LOCK_PREFIX "orb %b2, %0" \
370 : "=m" (*mem) \
371 : "m" (*mem), "iq" (1L << (bit))); \
372 else if (sizeof (*mem) == 2) \
373 __asm __volatile (LOCK_PREFIX "orw %w2, %0" \
374 : "=m" (*mem) \
375 : "m" (*mem), "ir" (1L << (bit))); \
376 else if (sizeof (*mem) == 4) \
377 __asm __volatile (LOCK_PREFIX "orl %2, %0" \
378 : "=m" (*mem) \
379 : "m" (*mem), "ir" (1L << (bit))); \
380 else if (__builtin_constant_p (bit) && (bit) < 32) \
381 __asm __volatile (LOCK_PREFIX "orq %2, %0" \
382 : "=m" (*mem) \
383 : "m" (*mem), "i" (1L << (bit))); \
384 else \
385 __asm __volatile (LOCK_PREFIX "orq %q2, %0" \
386 : "=m" (*mem) \
387 : "m" (*mem), "r" (1UL << (bit))); \
388 } while (0)
389
390
391#define atomic_bit_test_set(mem, bit) \
392 ({ unsigned char __result; \
393 if (sizeof (*mem) == 1) \
394 __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \
395 : "=q" (__result), "=m" (*mem) \
396 : "m" (*mem), "iq" (bit)); \
397 else if (sizeof (*mem) == 2) \
398 __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \
399 : "=q" (__result), "=m" (*mem) \
400 : "m" (*mem), "ir" (bit)); \
401 else if (sizeof (*mem) == 4) \
402 __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \
403 : "=q" (__result), "=m" (*mem) \
404 : "m" (*mem), "ir" (bit)); \
405 else \
406 __asm __volatile (LOCK_PREFIX "btsq %3, %1; setc %0" \
407 : "=q" (__result), "=m" (*mem) \
408 : "m" (*mem), "ir" (bit)); \
409 __result; })
410
411
412#define atomic_spin_nop() asm ("rep; nop")
413
414
415#define __arch_and_body(lock, mem, mask) \
416 do { \
417 if (sizeof (*mem) == 1) \
418 __asm __volatile (lock "andb %b1, %0" \
419 : "=m" (*mem) \
420 : "iq" (mask), "m" (*mem), \
421 "i" (offsetof (tcbhead_t, multiple_threads))); \
422 else if (sizeof (*mem) == 2) \
423 __asm __volatile (lock "andw %w1, %0" \
424 : "=m" (*mem) \
425 : "ir" (mask), "m" (*mem), \
426 "i" (offsetof (tcbhead_t, multiple_threads))); \
427 else if (sizeof (*mem) == 4) \
428 __asm __volatile (lock "andl %1, %0" \
429 : "=m" (*mem) \
430 : "ir" (mask), "m" (*mem), \
431 "i" (offsetof (tcbhead_t, multiple_threads))); \
432 else \
433 __asm __volatile (lock "andq %q1, %0" \
434 : "=m" (*mem) \
435 : "ir" (mask), "m" (*mem), \
436 "i" (offsetof (tcbhead_t, multiple_threads))); \
437 } while (0)
438
439#define __arch_cprefix \
440 "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
441
442#define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask)
443
444#define catomic_and(mem, mask) __arch_and_body (__arch_cprefix, mem, mask)
445
446
447#define __arch_or_body(lock, mem, mask) \
448 do { \
449 if (sizeof (*mem) == 1) \
450 __asm __volatile (lock "orb %b1, %0" \
451 : "=m" (*mem) \
452 : "iq" (mask), "m" (*mem), \
453 "i" (offsetof (tcbhead_t, multiple_threads))); \
454 else if (sizeof (*mem) == 2) \
455 __asm __volatile (lock "orw %w1, %0" \
456 : "=m" (*mem) \
457 : "ir" (mask), "m" (*mem), \
458 "i" (offsetof (tcbhead_t, multiple_threads))); \
459 else if (sizeof (*mem) == 4) \
460 __asm __volatile (lock "orl %1, %0" \
461 : "=m" (*mem) \
462 : "ir" (mask), "m" (*mem), \
463 "i" (offsetof (tcbhead_t, multiple_threads))); \
464 else \
465 __asm __volatile (lock "orq %q1, %0" \
466 : "=m" (*mem) \
467 : "ir" (mask), "m" (*mem), \
468 "i" (offsetof (tcbhead_t, multiple_threads))); \
469 } while (0)
470
471#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
472
473#define catomic_or(mem, mask) __arch_or_body (__arch_cprefix, mem, mask)
474
475/* We don't use mfence because it is supposedly slower due to having to
476 provide stronger guarantees (e.g., regarding self-modifying code). */
477#define atomic_full_barrier() \
478 __asm __volatile (LOCK_PREFIX "orl $0, (%%rsp)" ::: "memory")
479#define atomic_read_barrier() __asm ("" ::: "memory")
480#define atomic_write_barrier() __asm ("" ::: "memory")
481
482#endif /* atomic-machine.h */
483