1 | /* Copyright (C) 2002-2018 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <errno.h> |
21 | #include <stdlib.h> |
22 | #include "pthreadP.h" |
23 | #include <lowlevellock.h> |
24 | |
25 | #ifndef lll_trylock_elision |
26 | #define lll_trylock_elision(a,t) lll_trylock(a) |
27 | #endif |
28 | |
29 | #ifndef FORCE_ELISION |
30 | #define FORCE_ELISION(m, s) |
31 | #endif |
32 | |
33 | int |
34 | __pthread_mutex_trylock (pthread_mutex_t *mutex) |
35 | { |
36 | int oldval; |
37 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
38 | |
39 | /* See concurrency notes regarding mutex type which is loaded from __kind |
40 | in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ |
41 | switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), |
42 | PTHREAD_MUTEX_TIMED_NP)) |
43 | { |
44 | /* Recursive mutex. */ |
45 | case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP: |
46 | case PTHREAD_MUTEX_RECURSIVE_NP: |
47 | /* Check whether we already hold the mutex. */ |
48 | if (mutex->__data.__owner == id) |
49 | { |
50 | /* Just bump the counter. */ |
51 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
52 | /* Overflow of the counter. */ |
53 | return EAGAIN; |
54 | |
55 | ++mutex->__data.__count; |
56 | return 0; |
57 | } |
58 | |
59 | if (lll_trylock (mutex->__data.__lock) == 0) |
60 | { |
61 | /* Record the ownership. */ |
62 | mutex->__data.__owner = id; |
63 | mutex->__data.__count = 1; |
64 | ++mutex->__data.__nusers; |
65 | return 0; |
66 | } |
67 | break; |
68 | |
69 | case PTHREAD_MUTEX_TIMED_ELISION_NP: |
70 | elision: __attribute__((unused)) |
71 | if (lll_trylock_elision (mutex->__data.__lock, |
72 | mutex->__data.__elision) != 0) |
73 | break; |
74 | /* Don't record the ownership. */ |
75 | return 0; |
76 | |
77 | case PTHREAD_MUTEX_TIMED_NP: |
78 | FORCE_ELISION (mutex, goto elision); |
79 | /*FALL THROUGH*/ |
80 | case PTHREAD_MUTEX_ADAPTIVE_NP: |
81 | case PTHREAD_MUTEX_ERRORCHECK_NP: |
82 | if (lll_trylock (mutex->__data.__lock) != 0) |
83 | break; |
84 | |
85 | /* Record the ownership. */ |
86 | mutex->__data.__owner = id; |
87 | ++mutex->__data.__nusers; |
88 | |
89 | return 0; |
90 | |
91 | case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: |
92 | case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: |
93 | case PTHREAD_MUTEX_ROBUST_NORMAL_NP: |
94 | case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: |
95 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
96 | &mutex->__data.__list.__next); |
97 | /* We need to set op_pending before starting the operation. Also |
98 | see comments at ENQUEUE_MUTEX. */ |
99 | __asm ("" ::: "memory" ); |
100 | |
101 | oldval = mutex->__data.__lock; |
102 | do |
103 | { |
104 | again: |
105 | if ((oldval & FUTEX_OWNER_DIED) != 0) |
106 | { |
107 | /* The previous owner died. Try locking the mutex. */ |
108 | int newval = id | (oldval & FUTEX_WAITERS); |
109 | |
110 | newval |
111 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
112 | newval, oldval); |
113 | |
114 | if (newval != oldval) |
115 | { |
116 | oldval = newval; |
117 | goto again; |
118 | } |
119 | |
120 | /* We got the mutex. */ |
121 | mutex->__data.__count = 1; |
122 | /* But it is inconsistent unless marked otherwise. */ |
123 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
124 | |
125 | /* We must not enqueue the mutex before we have acquired it. |
126 | Also see comments at ENQUEUE_MUTEX. */ |
127 | __asm ("" ::: "memory" ); |
128 | ENQUEUE_MUTEX (mutex); |
129 | /* We need to clear op_pending after we enqueue the mutex. */ |
130 | __asm ("" ::: "memory" ); |
131 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
132 | |
133 | /* Note that we deliberately exist here. If we fall |
134 | through to the end of the function __nusers would be |
135 | incremented which is not correct because the old |
136 | owner has to be discounted. */ |
137 | return EOWNERDEAD; |
138 | } |
139 | |
140 | /* Check whether we already hold the mutex. */ |
141 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
142 | { |
143 | int kind = PTHREAD_MUTEX_TYPE (mutex); |
144 | if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) |
145 | { |
146 | /* We do not need to ensure ordering wrt another memory |
147 | access. Also see comments at ENQUEUE_MUTEX. */ |
148 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
149 | NULL); |
150 | return EDEADLK; |
151 | } |
152 | |
153 | if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) |
154 | { |
155 | /* We do not need to ensure ordering wrt another memory |
156 | access. */ |
157 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
158 | NULL); |
159 | |
160 | /* Just bump the counter. */ |
161 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
162 | /* Overflow of the counter. */ |
163 | return EAGAIN; |
164 | |
165 | ++mutex->__data.__count; |
166 | |
167 | return 0; |
168 | } |
169 | } |
170 | |
171 | oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
172 | id, 0); |
173 | if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0) |
174 | { |
175 | /* We haven't acquired the lock as it is already acquired by |
176 | another owner. We do not need to ensure ordering wrt another |
177 | memory access. */ |
178 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
179 | |
180 | return EBUSY; |
181 | } |
182 | |
183 | if (__builtin_expect (mutex->__data.__owner |
184 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
185 | { |
186 | /* This mutex is now not recoverable. */ |
187 | mutex->__data.__count = 0; |
188 | if (oldval == id) |
189 | lll_unlock (mutex->__data.__lock, |
190 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
191 | /* FIXME This violates the mutex destruction requirements. See |
192 | __pthread_mutex_unlock_full. */ |
193 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
194 | return ENOTRECOVERABLE; |
195 | } |
196 | } |
197 | while ((oldval & FUTEX_OWNER_DIED) != 0); |
198 | |
199 | /* We must not enqueue the mutex before we have acquired it. |
200 | Also see comments at ENQUEUE_MUTEX. */ |
201 | __asm ("" ::: "memory" ); |
202 | ENQUEUE_MUTEX (mutex); |
203 | /* We need to clear op_pending after we enqueue the mutex. */ |
204 | __asm ("" ::: "memory" ); |
205 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
206 | |
207 | mutex->__data.__owner = id; |
208 | ++mutex->__data.__nusers; |
209 | mutex->__data.__count = 1; |
210 | |
211 | return 0; |
212 | |
213 | /* The PI support requires the Linux futex system call. If that's not |
214 | available, pthread_mutex_init should never have allowed the type to |
215 | be set. So it will get the default case for an invalid type. */ |
216 | #ifdef __NR_futex |
217 | case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
218 | case PTHREAD_MUTEX_PI_ERRORCHECK_NP: |
219 | case PTHREAD_MUTEX_PI_NORMAL_NP: |
220 | case PTHREAD_MUTEX_PI_ADAPTIVE_NP: |
221 | case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: |
222 | case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: |
223 | case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: |
224 | case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: |
225 | { |
226 | int kind, robust; |
227 | { |
228 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
229 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
230 | int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); |
231 | kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; |
232 | robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
233 | } |
234 | |
235 | if (robust) |
236 | { |
237 | /* Note: robust PI futexes are signaled by setting bit 0. */ |
238 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
239 | (void *) (((uintptr_t) &mutex->__data.__list.__next) |
240 | | 1)); |
241 | /* We need to set op_pending before starting the operation. Also |
242 | see comments at ENQUEUE_MUTEX. */ |
243 | __asm ("" ::: "memory" ); |
244 | } |
245 | |
246 | oldval = mutex->__data.__lock; |
247 | |
248 | /* Check whether we already hold the mutex. */ |
249 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
250 | { |
251 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
252 | { |
253 | /* We do not need to ensure ordering wrt another memory |
254 | access. */ |
255 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
256 | return EDEADLK; |
257 | } |
258 | |
259 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
260 | { |
261 | /* We do not need to ensure ordering wrt another memory |
262 | access. */ |
263 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
264 | |
265 | /* Just bump the counter. */ |
266 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
267 | /* Overflow of the counter. */ |
268 | return EAGAIN; |
269 | |
270 | ++mutex->__data.__count; |
271 | |
272 | return 0; |
273 | } |
274 | } |
275 | |
276 | oldval |
277 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
278 | id, 0); |
279 | |
280 | if (oldval != 0) |
281 | { |
282 | if ((oldval & FUTEX_OWNER_DIED) == 0) |
283 | { |
284 | /* We haven't acquired the lock as it is already acquired by |
285 | another owner. We do not need to ensure ordering wrt another |
286 | memory access. */ |
287 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
288 | |
289 | return EBUSY; |
290 | } |
291 | |
292 | assert (robust); |
293 | |
294 | /* The mutex owner died. The kernel will now take care of |
295 | everything. */ |
296 | int private = (robust |
297 | ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) |
298 | : PTHREAD_MUTEX_PSHARED (mutex)); |
299 | INTERNAL_SYSCALL_DECL (__err); |
300 | int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, |
301 | __lll_private_flag (FUTEX_TRYLOCK_PI, |
302 | private), 0, 0); |
303 | |
304 | if (INTERNAL_SYSCALL_ERROR_P (e, __err) |
305 | && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK) |
306 | { |
307 | /* The kernel has not yet finished the mutex owner death. |
308 | We do not need to ensure ordering wrt another memory |
309 | access. */ |
310 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
311 | |
312 | return EBUSY; |
313 | } |
314 | |
315 | oldval = mutex->__data.__lock; |
316 | } |
317 | |
318 | if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) |
319 | { |
320 | atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); |
321 | |
322 | /* We got the mutex. */ |
323 | mutex->__data.__count = 1; |
324 | /* But it is inconsistent unless marked otherwise. */ |
325 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
326 | |
327 | /* We must not enqueue the mutex before we have acquired it. |
328 | Also see comments at ENQUEUE_MUTEX. */ |
329 | __asm ("" ::: "memory" ); |
330 | ENQUEUE_MUTEX (mutex); |
331 | /* We need to clear op_pending after we enqueue the mutex. */ |
332 | __asm ("" ::: "memory" ); |
333 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
334 | |
335 | /* Note that we deliberately exit here. If we fall |
336 | through to the end of the function __nusers would be |
337 | incremented which is not correct because the old owner |
338 | has to be discounted. */ |
339 | return EOWNERDEAD; |
340 | } |
341 | |
342 | if (robust |
343 | && __builtin_expect (mutex->__data.__owner |
344 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
345 | { |
346 | /* This mutex is now not recoverable. */ |
347 | mutex->__data.__count = 0; |
348 | |
349 | INTERNAL_SYSCALL_DECL (__err); |
350 | INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, |
351 | __lll_private_flag (FUTEX_UNLOCK_PI, |
352 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)), |
353 | 0, 0); |
354 | |
355 | /* To the kernel, this will be visible after the kernel has |
356 | acquired the mutex in the syscall. */ |
357 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
358 | return ENOTRECOVERABLE; |
359 | } |
360 | |
361 | if (robust) |
362 | { |
363 | /* We must not enqueue the mutex before we have acquired it. |
364 | Also see comments at ENQUEUE_MUTEX. */ |
365 | __asm ("" ::: "memory" ); |
366 | ENQUEUE_MUTEX_PI (mutex); |
367 | /* We need to clear op_pending after we enqueue the mutex. */ |
368 | __asm ("" ::: "memory" ); |
369 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
370 | } |
371 | |
372 | mutex->__data.__owner = id; |
373 | ++mutex->__data.__nusers; |
374 | mutex->__data.__count = 1; |
375 | |
376 | return 0; |
377 | } |
378 | #endif /* __NR_futex. */ |
379 | |
380 | case PTHREAD_MUTEX_PP_RECURSIVE_NP: |
381 | case PTHREAD_MUTEX_PP_ERRORCHECK_NP: |
382 | case PTHREAD_MUTEX_PP_NORMAL_NP: |
383 | case PTHREAD_MUTEX_PP_ADAPTIVE_NP: |
384 | { |
385 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
386 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
387 | int kind = atomic_load_relaxed (&(mutex->__data.__kind)) |
388 | & PTHREAD_MUTEX_KIND_MASK_NP; |
389 | |
390 | oldval = mutex->__data.__lock; |
391 | |
392 | /* Check whether we already hold the mutex. */ |
393 | if (mutex->__data.__owner == id) |
394 | { |
395 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
396 | return EDEADLK; |
397 | |
398 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
399 | { |
400 | /* Just bump the counter. */ |
401 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
402 | /* Overflow of the counter. */ |
403 | return EAGAIN; |
404 | |
405 | ++mutex->__data.__count; |
406 | |
407 | return 0; |
408 | } |
409 | } |
410 | |
411 | int oldprio = -1, ceilval; |
412 | do |
413 | { |
414 | int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) |
415 | >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
416 | |
417 | if (__pthread_current_priority () > ceiling) |
418 | { |
419 | if (oldprio != -1) |
420 | __pthread_tpp_change_priority (oldprio, -1); |
421 | return EINVAL; |
422 | } |
423 | |
424 | int retval = __pthread_tpp_change_priority (oldprio, ceiling); |
425 | if (retval) |
426 | return retval; |
427 | |
428 | ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
429 | oldprio = ceiling; |
430 | |
431 | oldval |
432 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
433 | ceilval | 1, ceilval); |
434 | |
435 | if (oldval == ceilval) |
436 | break; |
437 | } |
438 | while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); |
439 | |
440 | if (oldval != ceilval) |
441 | { |
442 | __pthread_tpp_change_priority (oldprio, -1); |
443 | break; |
444 | } |
445 | |
446 | assert (mutex->__data.__owner == 0); |
447 | /* Record the ownership. */ |
448 | mutex->__data.__owner = id; |
449 | ++mutex->__data.__nusers; |
450 | mutex->__data.__count = 1; |
451 | |
452 | return 0; |
453 | } |
454 | break; |
455 | |
456 | default: |
457 | /* Correct code cannot set any other type. */ |
458 | return EINVAL; |
459 | } |
460 | |
461 | return EBUSY; |
462 | } |
463 | |
464 | #ifndef __pthread_mutex_trylock |
465 | #ifndef pthread_mutex_trylock |
466 | weak_alias (__pthread_mutex_trylock, pthread_mutex_trylock) |
467 | hidden_def (__pthread_mutex_trylock) |
468 | #endif |
469 | #endif |
470 | |