1/* Copyright (C) 2002-2016 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#ifndef _LOWLEVELLOCK_H
20#define _LOWLEVELLOCK_H 1
21
22#include <stap-probe.h>
23
24#ifndef __ASSEMBLER__
25# include <time.h>
26# include <sys/param.h>
27# include <bits/pthreadtypes.h>
28# include <kernel-features.h>
29# include <tcb-offsets.h>
30
31# ifndef LOCK_INSTR
32# ifdef UP
33# define LOCK_INSTR /* nothing */
34# else
35# define LOCK_INSTR "lock;"
36# endif
37# endif
38#else
39# ifndef LOCK
40# ifdef UP
41# define LOCK
42# else
43# define LOCK lock
44# endif
45# endif
46#endif
47
48#include <lowlevellock-futex.h>
49
50/* XXX Remove when no assembler code uses futexes anymore. */
51#define SYS_futex __NR_futex
52
53#ifndef __ASSEMBLER__
54
55/* Initializer for lock. */
56#define LLL_LOCK_INITIALIZER (0)
57#define LLL_LOCK_INITIALIZER_LOCKED (1)
58#define LLL_LOCK_INITIALIZER_WAITERS (2)
59
60
61/* NB: in the lll_trylock macro we simply return the value in %eax
62 after the cmpxchg instruction. In case the operation succeded this
63 value is zero. In case the operation failed, the cmpxchg instruction
64 has loaded the current value of the memory work which is guaranteed
65 to be nonzero. */
66#if !IS_IN (libc) || defined UP
67# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
68#else
69# define __lll_trylock_asm "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
70 "je 0f\n\t" \
71 "lock; cmpxchgl %2, %1\n\t" \
72 "jmp 1f\n\t" \
73 "0:\tcmpxchgl %2, %1\n\t" \
74 "1:"
75#endif
76
77#define lll_trylock(futex) \
78 ({ int ret; \
79 __asm __volatile (__lll_trylock_asm \
80 : "=a" (ret), "=m" (futex) \
81 : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
82 "0" (LLL_LOCK_INITIALIZER) \
83 : "memory"); \
84 ret; })
85
86#define lll_cond_trylock(futex) \
87 ({ int ret; \
88 __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
89 : "=a" (ret), "=m" (futex) \
90 : "r" (LLL_LOCK_INITIALIZER_WAITERS), \
91 "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
92 : "memory"); \
93 ret; })
94
95#if !IS_IN (libc) || defined UP
96# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %4, %2\n\t" \
97 "jz 24f\n\t"
98#else
99# define __lll_lock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
100 "je 0f\n\t" \
101 "lock; cmpxchgl %4, %2\n\t" \
102 "jnz 1f\n\t" \
103 "jmp 24f\n" \
104 "0:\tcmpxchgl %4, %2\n\t" \
105 "jz 24f\n\t"
106#endif
107
108#define lll_lock(futex, private) \
109 (void) \
110 ({ int ignore1, ignore2, ignore3; \
111 if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
112 __asm __volatile (__lll_lock_asm_start \
113 "1:\tlea %2, %%" RDI_LP "\n" \
114 "2:\tsub $128, %%" RSP_LP "\n" \
115 ".cfi_adjust_cfa_offset 128\n" \
116 "3:\tcallq __lll_lock_wait_private\n" \
117 "4:\tadd $128, %%" RSP_LP "\n" \
118 ".cfi_adjust_cfa_offset -128\n" \
119 "24:" \
120 : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \
121 "=a" (ignore3) \
122 : "0" (1), "m" (futex), "3" (0) \
123 : "cx", "r11", "cc", "memory"); \
124 else \
125 __asm __volatile (__lll_lock_asm_start \
126 "1:\tlea %2, %%" RDI_LP "\n" \
127 "2:\tsub $128, %%" RSP_LP "\n" \
128 ".cfi_adjust_cfa_offset 128\n" \
129 "3:\tcallq __lll_lock_wait\n" \
130 "4:\tadd $128, %%" RSP_LP "\n" \
131 ".cfi_adjust_cfa_offset -128\n" \
132 "24:" \
133 : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
134 "=a" (ignore3) \
135 : "1" (1), "m" (futex), "3" (0), "0" (private) \
136 : "cx", "r11", "cc", "memory"); \
137 }) \
138
139#define lll_robust_lock(futex, id, private) \
140 ({ int result, ignore1, ignore2; \
141 __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
142 "jz 24f\n" \
143 "1:\tlea %2, %%" RDI_LP "\n" \
144 "2:\tsub $128, %%" RSP_LP "\n" \
145 ".cfi_adjust_cfa_offset 128\n" \
146 "3:\tcallq __lll_robust_lock_wait\n" \
147 "4:\tadd $128, %%" RSP_LP "\n" \
148 ".cfi_adjust_cfa_offset -128\n" \
149 "24:" \
150 : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
151 "=a" (result) \
152 : "1" (id), "m" (futex), "3" (0), "0" (private) \
153 : "cx", "r11", "cc", "memory"); \
154 result; })
155
156#define lll_cond_lock(futex, private) \
157 (void) \
158 ({ int ignore1, ignore2, ignore3; \
159 __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
160 "jz 24f\n" \
161 "1:\tlea %2, %%" RDI_LP "\n" \
162 "2:\tsub $128, %%" RSP_LP "\n" \
163 ".cfi_adjust_cfa_offset 128\n" \
164 "3:\tcallq __lll_lock_wait\n" \
165 "4:\tadd $128, %%" RSP_LP "\n" \
166 ".cfi_adjust_cfa_offset -128\n" \
167 "24:" \
168 : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
169 "=a" (ignore3) \
170 : "1" (2), "m" (futex), "3" (0), "0" (private) \
171 : "cx", "r11", "cc", "memory"); \
172 })
173
174#define lll_robust_cond_lock(futex, id, private) \
175 ({ int result, ignore1, ignore2; \
176 __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
177 "jz 24f\n" \
178 "1:\tlea %2, %%" RDI_LP "\n" \
179 "2:\tsub $128, %%" RSP_LP "\n" \
180 ".cfi_adjust_cfa_offset 128\n" \
181 "3:\tcallq __lll_robust_lock_wait\n" \
182 "4:\tadd $128, %%" RSP_LP "\n" \
183 ".cfi_adjust_cfa_offset -128\n" \
184 "24:" \
185 : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
186 "=a" (result) \
187 : "1" (id | FUTEX_WAITERS), "m" (futex), "3" (0), \
188 "0" (private) \
189 : "cx", "r11", "cc", "memory"); \
190 result; })
191
192#define lll_timedlock(futex, timeout, private) \
193 ({ int result, ignore1, ignore2, ignore3; \
194 __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
195 "jz 24f\n" \
196 "1:\tlea %4, %%" RDI_LP "\n" \
197 "0:\tmov %8, %%" RDX_LP "\n" \
198 "2:\tsub $128, %%" RSP_LP "\n" \
199 ".cfi_adjust_cfa_offset 128\n" \
200 "3:\tcallq __lll_timedlock_wait\n" \
201 "4:\tadd $128, %%" RSP_LP "\n" \
202 ".cfi_adjust_cfa_offset -128\n" \
203 "24:" \
204 : "=a" (result), "=D" (ignore1), "=S" (ignore2), \
205 "=&d" (ignore3), "=m" (futex) \
206 : "0" (0), "1" (1), "m" (futex), "m" (timeout), \
207 "2" (private) \
208 : "memory", "cx", "cc", "r10", "r11"); \
209 result; })
210
211extern int __lll_timedlock_elision (int *futex, short *adapt_count,
212 const struct timespec *timeout,
213 int private) attribute_hidden;
214
215#define lll_timedlock_elision(futex, adapt_count, timeout, private) \
216 __lll_timedlock_elision(&(futex), &(adapt_count), timeout, private)
217
218#define lll_robust_timedlock(futex, timeout, id, private) \
219 ({ int result, ignore1, ignore2, ignore3; \
220 __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
221 "jz 24f\n\t" \
222 "1:\tlea %4, %%" RDI_LP "\n" \
223 "0:\tmov %8, %%" RDX_LP "\n" \
224 "2:\tsub $128, %%" RSP_LP "\n" \
225 ".cfi_adjust_cfa_offset 128\n" \
226 "3:\tcallq __lll_robust_timedlock_wait\n" \
227 "4:\tadd $128, %%" RSP_LP "\n" \
228 ".cfi_adjust_cfa_offset -128\n" \
229 "24:" \
230 : "=a" (result), "=D" (ignore1), "=S" (ignore2), \
231 "=&d" (ignore3), "=m" (futex) \
232 : "0" (0), "1" (id), "m" (futex), "m" (timeout), \
233 "2" (private) \
234 : "memory", "cx", "cc", "r10", "r11"); \
235 result; })
236
237#if !IS_IN (libc) || defined UP
238# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t" \
239 "je 24f\n\t"
240#else
241# define __lll_unlock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
242 "je 0f\n\t" \
243 "lock; decl %0\n\t" \
244 "jne 1f\n\t" \
245 "jmp 24f\n\t" \
246 "0:\tdecl %0\n\t" \
247 "je 24f\n\t"
248#endif
249
250#define lll_unlock(futex, private) \
251 (void) \
252 ({ int ignore; \
253 if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
254 __asm __volatile (__lll_unlock_asm_start \
255 "1:\tlea %0, %%" RDI_LP "\n" \
256 "2:\tsub $128, %%" RSP_LP "\n" \
257 ".cfi_adjust_cfa_offset 128\n" \
258 "3:\tcallq __lll_unlock_wake_private\n" \
259 "4:\tadd $128, %%" RSP_LP "\n" \
260 ".cfi_adjust_cfa_offset -128\n" \
261 "24:" \
262 : "=m" (futex), "=&D" (ignore) \
263 : "m" (futex) \
264 : "ax", "cx", "r11", "cc", "memory"); \
265 else \
266 __asm __volatile (__lll_unlock_asm_start \
267 "1:\tlea %0, %%" RDI_LP "\n" \
268 "2:\tsub $128, %%" RSP_LP "\n" \
269 ".cfi_adjust_cfa_offset 128\n" \
270 "3:\tcallq __lll_unlock_wake\n" \
271 "4:\tadd $128, %%" RSP_LP "\n" \
272 ".cfi_adjust_cfa_offset -128\n" \
273 "24:" \
274 : "=m" (futex), "=&D" (ignore) \
275 : "m" (futex), "S" (private) \
276 : "ax", "cx", "r11", "cc", "memory"); \
277 })
278
279#define lll_robust_unlock(futex, private) \
280 do \
281 { \
282 int ignore; \
283 __asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \
284 "je 24f\n\t" \
285 "1:\tlea %0, %%" RDI_LP "\n" \
286 "2:\tsub $128, %%" RSP_LP "\n" \
287 ".cfi_adjust_cfa_offset 128\n" \
288 "3:\tcallq __lll_unlock_wake\n" \
289 "4:\tadd $128, %%" RSP_LP "\n" \
290 ".cfi_adjust_cfa_offset -128\n" \
291 "24:" \
292 : "=m" (futex), "=&D" (ignore) \
293 : "i" (FUTEX_WAITERS), "m" (futex), \
294 "S" (private) \
295 : "ax", "cx", "r11", "cc", "memory"); \
296 } \
297 while (0)
298
299#define lll_islocked(futex) \
300 (futex != LLL_LOCK_INITIALIZER)
301
302
303/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
304 wake-up when the clone terminates. The memory location contains the
305 thread ID while the clone is running and is reset to zero by the kernel
306 afterwards. The kernel up to version 3.16.3 does not use the private futex
307 operations for futex wake-up when the clone terminates. */
308#define lll_wait_tid(tid) \
309 do { \
310 __typeof (tid) __tid; \
311 while ((__tid = (tid)) != 0) \
312 lll_futex_wait (&(tid), __tid, LLL_SHARED);\
313 } while (0)
314
315extern int __lll_timedwait_tid (int *, const struct timespec *)
316 attribute_hidden;
317
318/* As lll_wait_tid, but with a timeout. If the timeout occurs then return
319 ETIMEDOUT. If ABSTIME is invalid, return EINVAL.
320 XXX Note that this differs from the generic version in that we do the
321 error checking here and not in __lll_timedwait_tid. */
322#define lll_timedwait_tid(tid, abstime) \
323 ({ \
324 int __result = 0; \
325 if ((tid) != 0) \
326 { \
327 if ((abstime)->tv_nsec < 0 || (abstime)->tv_nsec >= 1000000000) \
328 __result = EINVAL; \
329 else \
330 __result = __lll_timedwait_tid (&(tid), (abstime)); \
331 } \
332 __result; })
333
334extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
335 attribute_hidden;
336
337extern int __lll_unlock_elision (int *lock, int private)
338 attribute_hidden;
339
340extern int __lll_trylock_elision (int *lock, short *adapt_count)
341 attribute_hidden;
342
343#define lll_lock_elision(futex, adapt_count, private) \
344 __lll_lock_elision (&(futex), &(adapt_count), private)
345#define lll_unlock_elision(futex, adapt_count, private) \
346 __lll_unlock_elision (&(futex), private)
347#define lll_trylock_elision(futex, adapt_count) \
348 __lll_trylock_elision (&(futex), &(adapt_count))
349
350#endif /* !__ASSEMBLER__ */
351
352#endif /* lowlevellock.h */
353