1 | /* Atomic operations. X86 version. |
2 | Copyright (C) 2018-2020 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #ifndef _X86_ATOMIC_MACHINE_H |
20 | #define _X86_ATOMIC_MACHINE_H 1 |
21 | |
22 | #include <stdint.h> |
23 | #include <tls.h> /* For tcbhead_t. */ |
24 | #include <libc-pointer-arith.h> /* For cast_to_integer. */ |
25 | |
26 | typedef int8_t atomic8_t; |
27 | typedef uint8_t uatomic8_t; |
28 | typedef int_fast8_t atomic_fast8_t; |
29 | typedef uint_fast8_t uatomic_fast8_t; |
30 | |
31 | typedef int16_t atomic16_t; |
32 | typedef uint16_t uatomic16_t; |
33 | typedef int_fast16_t atomic_fast16_t; |
34 | typedef uint_fast16_t uatomic_fast16_t; |
35 | |
36 | typedef int32_t atomic32_t; |
37 | typedef uint32_t uatomic32_t; |
38 | typedef int_fast32_t atomic_fast32_t; |
39 | typedef uint_fast32_t uatomic_fast32_t; |
40 | |
41 | typedef int64_t atomic64_t; |
42 | typedef uint64_t uatomic64_t; |
43 | typedef int_fast64_t atomic_fast64_t; |
44 | typedef uint_fast64_t uatomic_fast64_t; |
45 | |
46 | typedef intptr_t atomicptr_t; |
47 | typedef uintptr_t uatomicptr_t; |
48 | typedef intmax_t atomic_max_t; |
49 | typedef uintmax_t uatomic_max_t; |
50 | |
51 | |
52 | #ifndef LOCK_PREFIX |
53 | # ifdef UP |
54 | # define LOCK_PREFIX /* nothing */ |
55 | # else |
56 | # define LOCK_PREFIX "lock;" |
57 | # endif |
58 | #endif |
59 | |
60 | #define USE_ATOMIC_COMPILER_BUILTINS 1 |
61 | |
62 | #ifdef __x86_64__ |
63 | # define __HAVE_64B_ATOMICS 1 |
64 | # define SP_REG "rsp" |
65 | # define SEG_REG "fs" |
66 | # define BR_CONSTRAINT "q" |
67 | # define IBR_CONSTRAINT "iq" |
68 | #else |
69 | # define __HAVE_64B_ATOMICS 0 |
70 | # define SP_REG "esp" |
71 | # define SEG_REG "gs" |
72 | # define BR_CONSTRAINT "r" |
73 | # define IBR_CONSTRAINT "ir" |
74 | #endif |
75 | #define ATOMIC_EXCHANGE_USES_CAS 0 |
76 | |
77 | #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ |
78 | __sync_val_compare_and_swap (mem, oldval, newval) |
79 | #define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ |
80 | (! __sync_bool_compare_and_swap (mem, oldval, newval)) |
81 | |
82 | |
83 | #define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \ |
84 | ({ __typeof (*mem) ret; \ |
85 | __asm __volatile ("cmpl $0, %%" SEG_REG ":%P5\n\t" \ |
86 | "je 0f\n\t" \ |
87 | "lock\n" \ |
88 | "0:\tcmpxchgb %b2, %1" \ |
89 | : "=a" (ret), "=m" (*mem) \ |
90 | : BR_CONSTRAINT (newval), "m" (*mem), "0" (oldval), \ |
91 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
92 | ret; }) |
93 | |
94 | #define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \ |
95 | ({ __typeof (*mem) ret; \ |
96 | __asm __volatile ("cmpl $0, %%" SEG_REG ":%P5\n\t" \ |
97 | "je 0f\n\t" \ |
98 | "lock\n" \ |
99 | "0:\tcmpxchgw %w2, %1" \ |
100 | : "=a" (ret), "=m" (*mem) \ |
101 | : BR_CONSTRAINT (newval), "m" (*mem), "0" (oldval), \ |
102 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
103 | ret; }) |
104 | |
105 | #define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \ |
106 | ({ __typeof (*mem) ret; \ |
107 | __asm __volatile ("cmpl $0, %%" SEG_REG ":%P5\n\t" \ |
108 | "je 0f\n\t" \ |
109 | "lock\n" \ |
110 | "0:\tcmpxchgl %2, %1" \ |
111 | : "=a" (ret), "=m" (*mem) \ |
112 | : BR_CONSTRAINT (newval), "m" (*mem), "0" (oldval), \ |
113 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
114 | ret; }) |
115 | |
116 | #ifdef __x86_64__ |
117 | # define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \ |
118 | ({ __typeof (*mem) ret; \ |
119 | __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \ |
120 | "je 0f\n\t" \ |
121 | "lock\n" \ |
122 | "0:\tcmpxchgq %q2, %1" \ |
123 | : "=a" (ret), "=m" (*mem) \ |
124 | : "q" ((atomic64_t) cast_to_integer (newval)), \ |
125 | "m" (*mem), \ |
126 | "0" ((atomic64_t) cast_to_integer (oldval)), \ |
127 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
128 | ret; }) |
129 | # define do_exchange_and_add_val_64_acq(pfx, mem, value) 0 |
130 | # define do_add_val_64_acq(pfx, mem, value) do { } while (0) |
131 | #else |
132 | /* XXX We do not really need 64-bit compare-and-exchange. At least |
133 | not in the moment. Using it would mean causing portability |
134 | problems since not many other 32-bit architectures have support for |
135 | such an operation. So don't define any code for now. If it is |
136 | really going to be used the code below can be used on Intel Pentium |
137 | and later, but NOT on i486. */ |
138 | # define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \ |
139 | ({ __typeof (*mem) ret = *(mem); \ |
140 | __atomic_link_error (); \ |
141 | ret = (newval); \ |
142 | ret = (oldval); \ |
143 | ret; }) |
144 | |
145 | # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ |
146 | ({ __typeof (*mem) ret = *(mem); \ |
147 | __atomic_link_error (); \ |
148 | ret = (newval); \ |
149 | ret = (oldval); \ |
150 | ret; }) |
151 | |
152 | # define do_exchange_and_add_val_64_acq(pfx, mem, value) \ |
153 | ({ __typeof (value) __addval = (value); \ |
154 | __typeof (*mem) __result; \ |
155 | __typeof (mem) __memp = (mem); \ |
156 | __typeof (*mem) __tmpval; \ |
157 | __result = *__memp; \ |
158 | do \ |
159 | __tmpval = __result; \ |
160 | while ((__result = pfx##_compare_and_exchange_val_64_acq \ |
161 | (__memp, __result + __addval, __result)) == __tmpval); \ |
162 | __result; }) |
163 | |
164 | # define do_add_val_64_acq(pfx, mem, value) \ |
165 | { \ |
166 | __typeof (value) __addval = (value); \ |
167 | __typeof (mem) __memp = (mem); \ |
168 | __typeof (*mem) __oldval = *__memp; \ |
169 | __typeof (*mem) __tmpval; \ |
170 | do \ |
171 | __tmpval = __oldval; \ |
172 | while ((__oldval = pfx##_compare_and_exchange_val_64_acq \ |
173 | (__memp, __oldval + __addval, __oldval)) == __tmpval); \ |
174 | } |
175 | #endif |
176 | |
177 | |
178 | /* Note that we need no lock prefix. */ |
179 | #define atomic_exchange_acq(mem, newvalue) \ |
180 | ({ __typeof (*mem) result; \ |
181 | if (sizeof (*mem) == 1) \ |
182 | __asm __volatile ("xchgb %b0, %1" \ |
183 | : "=q" (result), "=m" (*mem) \ |
184 | : "0" (newvalue), "m" (*mem)); \ |
185 | else if (sizeof (*mem) == 2) \ |
186 | __asm __volatile ("xchgw %w0, %1" \ |
187 | : "=r" (result), "=m" (*mem) \ |
188 | : "0" (newvalue), "m" (*mem)); \ |
189 | else if (sizeof (*mem) == 4) \ |
190 | __asm __volatile ("xchgl %0, %1" \ |
191 | : "=r" (result), "=m" (*mem) \ |
192 | : "0" (newvalue), "m" (*mem)); \ |
193 | else if (__HAVE_64B_ATOMICS) \ |
194 | __asm __volatile ("xchgq %q0, %1" \ |
195 | : "=r" (result), "=m" (*mem) \ |
196 | : "0" ((atomic64_t) cast_to_integer (newvalue)), \ |
197 | "m" (*mem)); \ |
198 | else \ |
199 | { \ |
200 | result = 0; \ |
201 | __atomic_link_error (); \ |
202 | } \ |
203 | result; }) |
204 | |
205 | |
206 | #define __arch_exchange_and_add_body(lock, pfx, mem, value) \ |
207 | ({ __typeof (*mem) __result; \ |
208 | __typeof (value) __addval = (value); \ |
209 | if (sizeof (*mem) == 1) \ |
210 | __asm __volatile (lock "xaddb %b0, %1" \ |
211 | : "=q" (__result), "=m" (*mem) \ |
212 | : "0" (__addval), "m" (*mem), \ |
213 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
214 | else if (sizeof (*mem) == 2) \ |
215 | __asm __volatile (lock "xaddw %w0, %1" \ |
216 | : "=r" (__result), "=m" (*mem) \ |
217 | : "0" (__addval), "m" (*mem), \ |
218 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
219 | else if (sizeof (*mem) == 4) \ |
220 | __asm __volatile (lock "xaddl %0, %1" \ |
221 | : "=r" (__result), "=m" (*mem) \ |
222 | : "0" (__addval), "m" (*mem), \ |
223 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
224 | else if (__HAVE_64B_ATOMICS) \ |
225 | __asm __volatile (lock "xaddq %q0, %1" \ |
226 | : "=r" (__result), "=m" (*mem) \ |
227 | : "0" ((atomic64_t) cast_to_integer (__addval)), \ |
228 | "m" (*mem), \ |
229 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
230 | else \ |
231 | __result = do_exchange_and_add_val_64_acq (pfx, (mem), __addval); \ |
232 | __result; }) |
233 | |
234 | #define atomic_exchange_and_add(mem, value) \ |
235 | __sync_fetch_and_add (mem, value) |
236 | |
237 | #define __arch_exchange_and_add_cprefix \ |
238 | "cmpl $0, %%" SEG_REG ":%P4\n\tje 0f\n\tlock\n0:\t" |
239 | |
240 | #define catomic_exchange_and_add(mem, value) \ |
241 | __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, __arch_c, \ |
242 | mem, value) |
243 | |
244 | |
245 | #define __arch_add_body(lock, pfx, apfx, mem, value) \ |
246 | do { \ |
247 | if (__builtin_constant_p (value) && (value) == 1) \ |
248 | pfx##_increment (mem); \ |
249 | else if (__builtin_constant_p (value) && (value) == -1) \ |
250 | pfx##_decrement (mem); \ |
251 | else if (sizeof (*mem) == 1) \ |
252 | __asm __volatile (lock "addb %b1, %0" \ |
253 | : "=m" (*mem) \ |
254 | : IBR_CONSTRAINT (value), "m" (*mem), \ |
255 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
256 | else if (sizeof (*mem) == 2) \ |
257 | __asm __volatile (lock "addw %w1, %0" \ |
258 | : "=m" (*mem) \ |
259 | : "ir" (value), "m" (*mem), \ |
260 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
261 | else if (sizeof (*mem) == 4) \ |
262 | __asm __volatile (lock "addl %1, %0" \ |
263 | : "=m" (*mem) \ |
264 | : "ir" (value), "m" (*mem), \ |
265 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
266 | else if (__HAVE_64B_ATOMICS) \ |
267 | __asm __volatile (lock "addq %q1, %0" \ |
268 | : "=m" (*mem) \ |
269 | : "ir" ((atomic64_t) cast_to_integer (value)), \ |
270 | "m" (*mem), \ |
271 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
272 | else \ |
273 | do_add_val_64_acq (apfx, (mem), (value)); \ |
274 | } while (0) |
275 | |
276 | # define atomic_add(mem, value) \ |
277 | __arch_add_body (LOCK_PREFIX, atomic, __arch, mem, value) |
278 | |
279 | #define __arch_add_cprefix \ |
280 | "cmpl $0, %%" SEG_REG ":%P3\n\tje 0f\n\tlock\n0:\t" |
281 | |
282 | #define catomic_add(mem, value) \ |
283 | __arch_add_body (__arch_add_cprefix, atomic, __arch_c, mem, value) |
284 | |
285 | |
286 | #define atomic_add_negative(mem, value) \ |
287 | ({ unsigned char __result; \ |
288 | if (sizeof (*mem) == 1) \ |
289 | __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \ |
290 | : "=m" (*mem), "=qm" (__result) \ |
291 | : IBR_CONSTRAINT (value), "m" (*mem)); \ |
292 | else if (sizeof (*mem) == 2) \ |
293 | __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \ |
294 | : "=m" (*mem), "=qm" (__result) \ |
295 | : "ir" (value), "m" (*mem)); \ |
296 | else if (sizeof (*mem) == 4) \ |
297 | __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \ |
298 | : "=m" (*mem), "=qm" (__result) \ |
299 | : "ir" (value), "m" (*mem)); \ |
300 | else if (__HAVE_64B_ATOMICS) \ |
301 | __asm __volatile (LOCK_PREFIX "addq %q2, %0; sets %1" \ |
302 | : "=m" (*mem), "=qm" (__result) \ |
303 | : "ir" ((atomic64_t) cast_to_integer (value)), \ |
304 | "m" (*mem)); \ |
305 | else \ |
306 | __atomic_link_error (); \ |
307 | __result; }) |
308 | |
309 | |
310 | #define atomic_add_zero(mem, value) \ |
311 | ({ unsigned char __result; \ |
312 | if (sizeof (*mem) == 1) \ |
313 | __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \ |
314 | : "=m" (*mem), "=qm" (__result) \ |
315 | : IBR_CONSTRAINT (value), "m" (*mem)); \ |
316 | else if (sizeof (*mem) == 2) \ |
317 | __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \ |
318 | : "=m" (*mem), "=qm" (__result) \ |
319 | : "ir" (value), "m" (*mem)); \ |
320 | else if (sizeof (*mem) == 4) \ |
321 | __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \ |
322 | : "=m" (*mem), "=qm" (__result) \ |
323 | : "ir" (value), "m" (*mem)); \ |
324 | else if (__HAVE_64B_ATOMICS) \ |
325 | __asm __volatile (LOCK_PREFIX "addq %q2, %0; setz %1" \ |
326 | : "=m" (*mem), "=qm" (__result) \ |
327 | : "ir" ((atomic64_t) cast_to_integer (value)), \ |
328 | "m" (*mem)); \ |
329 | else \ |
330 | __atomic_link_error (); \ |
331 | __result; }) |
332 | |
333 | |
334 | #define __arch_increment_body(lock, pfx, mem) \ |
335 | do { \ |
336 | if (sizeof (*mem) == 1) \ |
337 | __asm __volatile (lock "incb %b0" \ |
338 | : "=m" (*mem) \ |
339 | : "m" (*mem), \ |
340 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
341 | else if (sizeof (*mem) == 2) \ |
342 | __asm __volatile (lock "incw %w0" \ |
343 | : "=m" (*mem) \ |
344 | : "m" (*mem), \ |
345 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
346 | else if (sizeof (*mem) == 4) \ |
347 | __asm __volatile (lock "incl %0" \ |
348 | : "=m" (*mem) \ |
349 | : "m" (*mem), \ |
350 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
351 | else if (__HAVE_64B_ATOMICS) \ |
352 | __asm __volatile (lock "incq %q0" \ |
353 | : "=m" (*mem) \ |
354 | : "m" (*mem), \ |
355 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
356 | else \ |
357 | do_add_val_64_acq (pfx, mem, 1); \ |
358 | } while (0) |
359 | |
360 | #define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, __arch, mem) |
361 | |
362 | #define __arch_increment_cprefix \ |
363 | "cmpl $0, %%" SEG_REG ":%P2\n\tje 0f\n\tlock\n0:\t" |
364 | |
365 | #define catomic_increment(mem) \ |
366 | __arch_increment_body (__arch_increment_cprefix, __arch_c, mem) |
367 | |
368 | |
369 | #define atomic_increment_and_test(mem) \ |
370 | ({ unsigned char __result; \ |
371 | if (sizeof (*mem) == 1) \ |
372 | __asm __volatile (LOCK_PREFIX "incb %b0; sete %b1" \ |
373 | : "=m" (*mem), "=qm" (__result) \ |
374 | : "m" (*mem)); \ |
375 | else if (sizeof (*mem) == 2) \ |
376 | __asm __volatile (LOCK_PREFIX "incw %w0; sete %w1" \ |
377 | : "=m" (*mem), "=qm" (__result) \ |
378 | : "m" (*mem)); \ |
379 | else if (sizeof (*mem) == 4) \ |
380 | __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \ |
381 | : "=m" (*mem), "=qm" (__result) \ |
382 | : "m" (*mem)); \ |
383 | else if (__HAVE_64B_ATOMICS) \ |
384 | __asm __volatile (LOCK_PREFIX "incq %q0; sete %1" \ |
385 | : "=m" (*mem), "=qm" (__result) \ |
386 | : "m" (*mem)); \ |
387 | else \ |
388 | __atomic_link_error (); \ |
389 | __result; }) |
390 | |
391 | |
392 | #define __arch_decrement_body(lock, pfx, mem) \ |
393 | do { \ |
394 | if (sizeof (*mem) == 1) \ |
395 | __asm __volatile (lock "decb %b0" \ |
396 | : "=m" (*mem) \ |
397 | : "m" (*mem), \ |
398 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
399 | else if (sizeof (*mem) == 2) \ |
400 | __asm __volatile (lock "decw %w0" \ |
401 | : "=m" (*mem) \ |
402 | : "m" (*mem), \ |
403 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
404 | else if (sizeof (*mem) == 4) \ |
405 | __asm __volatile (lock "decl %0" \ |
406 | : "=m" (*mem) \ |
407 | : "m" (*mem), \ |
408 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
409 | else if (__HAVE_64B_ATOMICS) \ |
410 | __asm __volatile (lock "decq %q0" \ |
411 | : "=m" (*mem) \ |
412 | : "m" (*mem), \ |
413 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
414 | else \ |
415 | do_add_val_64_acq (pfx, mem, -1); \ |
416 | } while (0) |
417 | |
418 | #define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, __arch, mem) |
419 | |
420 | #define __arch_decrement_cprefix \ |
421 | "cmpl $0, %%" SEG_REG ":%P2\n\tje 0f\n\tlock\n0:\t" |
422 | |
423 | #define catomic_decrement(mem) \ |
424 | __arch_decrement_body (__arch_decrement_cprefix, __arch_c, mem) |
425 | |
426 | |
427 | #define atomic_decrement_and_test(mem) \ |
428 | ({ unsigned char __result; \ |
429 | if (sizeof (*mem) == 1) \ |
430 | __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \ |
431 | : "=m" (*mem), "=qm" (__result) \ |
432 | : "m" (*mem)); \ |
433 | else if (sizeof (*mem) == 2) \ |
434 | __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \ |
435 | : "=m" (*mem), "=qm" (__result) \ |
436 | : "m" (*mem)); \ |
437 | else if (sizeof (*mem) == 4) \ |
438 | __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \ |
439 | : "=m" (*mem), "=qm" (__result) \ |
440 | : "m" (*mem)); \ |
441 | else \ |
442 | __asm __volatile (LOCK_PREFIX "decq %q0; sete %1" \ |
443 | : "=m" (*mem), "=qm" (__result) \ |
444 | : "m" (*mem)); \ |
445 | __result; }) |
446 | |
447 | |
448 | #define atomic_bit_set(mem, bit) \ |
449 | do { \ |
450 | if (sizeof (*mem) == 1) \ |
451 | __asm __volatile (LOCK_PREFIX "orb %b2, %0" \ |
452 | : "=m" (*mem) \ |
453 | : "m" (*mem), IBR_CONSTRAINT (1L << (bit))); \ |
454 | else if (sizeof (*mem) == 2) \ |
455 | __asm __volatile (LOCK_PREFIX "orw %w2, %0" \ |
456 | : "=m" (*mem) \ |
457 | : "m" (*mem), "ir" (1L << (bit))); \ |
458 | else if (sizeof (*mem) == 4) \ |
459 | __asm __volatile (LOCK_PREFIX "orl %2, %0" \ |
460 | : "=m" (*mem) \ |
461 | : "m" (*mem), "ir" (1L << (bit))); \ |
462 | else if (__builtin_constant_p (bit) && (bit) < 32) \ |
463 | __asm __volatile (LOCK_PREFIX "orq %2, %0" \ |
464 | : "=m" (*mem) \ |
465 | : "m" (*mem), "i" (1L << (bit))); \ |
466 | else if (__HAVE_64B_ATOMICS) \ |
467 | __asm __volatile (LOCK_PREFIX "orq %q2, %0" \ |
468 | : "=m" (*mem) \ |
469 | : "m" (*mem), "r" (1UL << (bit))); \ |
470 | else \ |
471 | __atomic_link_error (); \ |
472 | } while (0) |
473 | |
474 | |
475 | #define atomic_bit_test_set(mem, bit) \ |
476 | ({ unsigned char __result; \ |
477 | if (sizeof (*mem) == 1) \ |
478 | __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \ |
479 | : "=q" (__result), "=m" (*mem) \ |
480 | : "m" (*mem), IBR_CONSTRAINT (bit)); \ |
481 | else if (sizeof (*mem) == 2) \ |
482 | __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \ |
483 | : "=q" (__result), "=m" (*mem) \ |
484 | : "m" (*mem), "ir" (bit)); \ |
485 | else if (sizeof (*mem) == 4) \ |
486 | __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \ |
487 | : "=q" (__result), "=m" (*mem) \ |
488 | : "m" (*mem), "ir" (bit)); \ |
489 | else if (__HAVE_64B_ATOMICS) \ |
490 | __asm __volatile (LOCK_PREFIX "btsq %3, %1; setc %0" \ |
491 | : "=q" (__result), "=m" (*mem) \ |
492 | : "m" (*mem), "ir" (bit)); \ |
493 | else \ |
494 | __atomic_link_error (); \ |
495 | __result; }) |
496 | |
497 | |
498 | #define __arch_and_body(lock, mem, mask) \ |
499 | do { \ |
500 | if (sizeof (*mem) == 1) \ |
501 | __asm __volatile (lock "andb %b1, %0" \ |
502 | : "=m" (*mem) \ |
503 | : IBR_CONSTRAINT (mask), "m" (*mem), \ |
504 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
505 | else if (sizeof (*mem) == 2) \ |
506 | __asm __volatile (lock "andw %w1, %0" \ |
507 | : "=m" (*mem) \ |
508 | : "ir" (mask), "m" (*mem), \ |
509 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
510 | else if (sizeof (*mem) == 4) \ |
511 | __asm __volatile (lock "andl %1, %0" \ |
512 | : "=m" (*mem) \ |
513 | : "ir" (mask), "m" (*mem), \ |
514 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
515 | else if (__HAVE_64B_ATOMICS) \ |
516 | __asm __volatile (lock "andq %q1, %0" \ |
517 | : "=m" (*mem) \ |
518 | : "ir" (mask), "m" (*mem), \ |
519 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
520 | else \ |
521 | __atomic_link_error (); \ |
522 | } while (0) |
523 | |
524 | #define __arch_cprefix \ |
525 | "cmpl $0, %%" SEG_REG ":%P3\n\tje 0f\n\tlock\n0:\t" |
526 | |
527 | #define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask) |
528 | |
529 | #define catomic_and(mem, mask) __arch_and_body (__arch_cprefix, mem, mask) |
530 | |
531 | |
532 | #define __arch_or_body(lock, mem, mask) \ |
533 | do { \ |
534 | if (sizeof (*mem) == 1) \ |
535 | __asm __volatile (lock "orb %b1, %0" \ |
536 | : "=m" (*mem) \ |
537 | : IBR_CONSTRAINT (mask), "m" (*mem), \ |
538 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
539 | else if (sizeof (*mem) == 2) \ |
540 | __asm __volatile (lock "orw %w1, %0" \ |
541 | : "=m" (*mem) \ |
542 | : "ir" (mask), "m" (*mem), \ |
543 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
544 | else if (sizeof (*mem) == 4) \ |
545 | __asm __volatile (lock "orl %1, %0" \ |
546 | : "=m" (*mem) \ |
547 | : "ir" (mask), "m" (*mem), \ |
548 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
549 | else if (__HAVE_64B_ATOMICS) \ |
550 | __asm __volatile (lock "orq %q1, %0" \ |
551 | : "=m" (*mem) \ |
552 | : "ir" (mask), "m" (*mem), \ |
553 | "i" (offsetof (tcbhead_t, multiple_threads))); \ |
554 | else \ |
555 | __atomic_link_error (); \ |
556 | } while (0) |
557 | |
558 | #define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask) |
559 | |
560 | #define catomic_or(mem, mask) __arch_or_body (__arch_cprefix, mem, mask) |
561 | |
562 | /* We don't use mfence because it is supposedly slower due to having to |
563 | provide stronger guarantees (e.g., regarding self-modifying code). */ |
564 | #define atomic_full_barrier() \ |
565 | __asm __volatile (LOCK_PREFIX "orl $0, (%%" SP_REG ")" ::: "memory") |
566 | #define atomic_read_barrier() __asm ("" ::: "memory") |
567 | #define atomic_write_barrier() __asm ("" ::: "memory") |
568 | |
569 | #define atomic_spin_nop() __asm ("pause") |
570 | |
571 | #endif /* atomic-machine.h */ |
572 | |