1 | /* Copyright (C) 2002-2017 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <errno.h> |
21 | #include <stdlib.h> |
22 | #include "pthreadP.h" |
23 | #include <lowlevellock.h> |
24 | |
25 | #ifndef lll_trylock_elision |
26 | #define lll_trylock_elision(a,t) lll_trylock(a) |
27 | #endif |
28 | |
29 | #ifndef FORCE_ELISION |
30 | #define FORCE_ELISION(m, s) |
31 | #endif |
32 | |
33 | int |
34 | __pthread_mutex_trylock (pthread_mutex_t *mutex) |
35 | { |
36 | int oldval; |
37 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
38 | |
39 | switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), |
40 | PTHREAD_MUTEX_TIMED_NP)) |
41 | { |
42 | /* Recursive mutex. */ |
43 | case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP: |
44 | case PTHREAD_MUTEX_RECURSIVE_NP: |
45 | /* Check whether we already hold the mutex. */ |
46 | if (mutex->__data.__owner == id) |
47 | { |
48 | /* Just bump the counter. */ |
49 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
50 | /* Overflow of the counter. */ |
51 | return EAGAIN; |
52 | |
53 | ++mutex->__data.__count; |
54 | return 0; |
55 | } |
56 | |
57 | if (lll_trylock (mutex->__data.__lock) == 0) |
58 | { |
59 | /* Record the ownership. */ |
60 | mutex->__data.__owner = id; |
61 | mutex->__data.__count = 1; |
62 | ++mutex->__data.__nusers; |
63 | return 0; |
64 | } |
65 | break; |
66 | |
67 | case PTHREAD_MUTEX_TIMED_ELISION_NP: |
68 | elision: __attribute__((unused)) |
69 | if (lll_trylock_elision (mutex->__data.__lock, |
70 | mutex->__data.__elision) != 0) |
71 | break; |
72 | /* Don't record the ownership. */ |
73 | return 0; |
74 | |
75 | case PTHREAD_MUTEX_TIMED_NP: |
76 | FORCE_ELISION (mutex, goto elision); |
77 | /*FALL THROUGH*/ |
78 | case PTHREAD_MUTEX_ADAPTIVE_NP: |
79 | case PTHREAD_MUTEX_ERRORCHECK_NP: |
80 | if (lll_trylock (mutex->__data.__lock) != 0) |
81 | break; |
82 | |
83 | /* Record the ownership. */ |
84 | mutex->__data.__owner = id; |
85 | ++mutex->__data.__nusers; |
86 | |
87 | return 0; |
88 | |
89 | case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: |
90 | case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: |
91 | case PTHREAD_MUTEX_ROBUST_NORMAL_NP: |
92 | case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: |
93 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
94 | &mutex->__data.__list.__next); |
95 | /* We need to set op_pending before starting the operation. Also |
96 | see comments at ENQUEUE_MUTEX. */ |
97 | __asm ("" ::: "memory" ); |
98 | |
99 | oldval = mutex->__data.__lock; |
100 | do |
101 | { |
102 | again: |
103 | if ((oldval & FUTEX_OWNER_DIED) != 0) |
104 | { |
105 | /* The previous owner died. Try locking the mutex. */ |
106 | int newval = id | (oldval & FUTEX_WAITERS); |
107 | |
108 | newval |
109 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
110 | newval, oldval); |
111 | |
112 | if (newval != oldval) |
113 | { |
114 | oldval = newval; |
115 | goto again; |
116 | } |
117 | |
118 | /* We got the mutex. */ |
119 | mutex->__data.__count = 1; |
120 | /* But it is inconsistent unless marked otherwise. */ |
121 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
122 | |
123 | /* We must not enqueue the mutex before we have acquired it. |
124 | Also see comments at ENQUEUE_MUTEX. */ |
125 | __asm ("" ::: "memory" ); |
126 | ENQUEUE_MUTEX (mutex); |
127 | /* We need to clear op_pending after we enqueue the mutex. */ |
128 | __asm ("" ::: "memory" ); |
129 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
130 | |
131 | /* Note that we deliberately exist here. If we fall |
132 | through to the end of the function __nusers would be |
133 | incremented which is not correct because the old |
134 | owner has to be discounted. */ |
135 | return EOWNERDEAD; |
136 | } |
137 | |
138 | /* Check whether we already hold the mutex. */ |
139 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
140 | { |
141 | int kind = PTHREAD_MUTEX_TYPE (mutex); |
142 | if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) |
143 | { |
144 | /* We do not need to ensure ordering wrt another memory |
145 | access. Also see comments at ENQUEUE_MUTEX. */ |
146 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
147 | NULL); |
148 | return EDEADLK; |
149 | } |
150 | |
151 | if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) |
152 | { |
153 | /* We do not need to ensure ordering wrt another memory |
154 | access. */ |
155 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
156 | NULL); |
157 | |
158 | /* Just bump the counter. */ |
159 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
160 | /* Overflow of the counter. */ |
161 | return EAGAIN; |
162 | |
163 | ++mutex->__data.__count; |
164 | |
165 | return 0; |
166 | } |
167 | } |
168 | |
169 | oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
170 | id, 0); |
171 | if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0) |
172 | { |
173 | /* We haven't acquired the lock as it is already acquired by |
174 | another owner. We do not need to ensure ordering wrt another |
175 | memory access. */ |
176 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
177 | |
178 | return EBUSY; |
179 | } |
180 | |
181 | if (__builtin_expect (mutex->__data.__owner |
182 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
183 | { |
184 | /* This mutex is now not recoverable. */ |
185 | mutex->__data.__count = 0; |
186 | if (oldval == id) |
187 | lll_unlock (mutex->__data.__lock, |
188 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
189 | /* FIXME This violates the mutex destruction requirements. See |
190 | __pthread_mutex_unlock_full. */ |
191 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
192 | return ENOTRECOVERABLE; |
193 | } |
194 | } |
195 | while ((oldval & FUTEX_OWNER_DIED) != 0); |
196 | |
197 | /* We must not enqueue the mutex before we have acquired it. |
198 | Also see comments at ENQUEUE_MUTEX. */ |
199 | __asm ("" ::: "memory" ); |
200 | ENQUEUE_MUTEX (mutex); |
201 | /* We need to clear op_pending after we enqueue the mutex. */ |
202 | __asm ("" ::: "memory" ); |
203 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
204 | |
205 | mutex->__data.__owner = id; |
206 | ++mutex->__data.__nusers; |
207 | mutex->__data.__count = 1; |
208 | |
209 | return 0; |
210 | |
211 | /* The PI support requires the Linux futex system call. If that's not |
212 | available, pthread_mutex_init should never have allowed the type to |
213 | be set. So it will get the default case for an invalid type. */ |
214 | #ifdef __NR_futex |
215 | case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
216 | case PTHREAD_MUTEX_PI_ERRORCHECK_NP: |
217 | case PTHREAD_MUTEX_PI_NORMAL_NP: |
218 | case PTHREAD_MUTEX_PI_ADAPTIVE_NP: |
219 | case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: |
220 | case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: |
221 | case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: |
222 | case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: |
223 | { |
224 | int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; |
225 | int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
226 | |
227 | if (robust) |
228 | { |
229 | /* Note: robust PI futexes are signaled by setting bit 0. */ |
230 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
231 | (void *) (((uintptr_t) &mutex->__data.__list.__next) |
232 | | 1)); |
233 | /* We need to set op_pending before starting the operation. Also |
234 | see comments at ENQUEUE_MUTEX. */ |
235 | __asm ("" ::: "memory" ); |
236 | } |
237 | |
238 | oldval = mutex->__data.__lock; |
239 | |
240 | /* Check whether we already hold the mutex. */ |
241 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
242 | { |
243 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
244 | { |
245 | /* We do not need to ensure ordering wrt another memory |
246 | access. */ |
247 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
248 | return EDEADLK; |
249 | } |
250 | |
251 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
252 | { |
253 | /* We do not need to ensure ordering wrt another memory |
254 | access. */ |
255 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
256 | |
257 | /* Just bump the counter. */ |
258 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
259 | /* Overflow of the counter. */ |
260 | return EAGAIN; |
261 | |
262 | ++mutex->__data.__count; |
263 | |
264 | return 0; |
265 | } |
266 | } |
267 | |
268 | oldval |
269 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
270 | id, 0); |
271 | |
272 | if (oldval != 0) |
273 | { |
274 | if ((oldval & FUTEX_OWNER_DIED) == 0) |
275 | { |
276 | /* We haven't acquired the lock as it is already acquired by |
277 | another owner. We do not need to ensure ordering wrt another |
278 | memory access. */ |
279 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
280 | |
281 | return EBUSY; |
282 | } |
283 | |
284 | assert (robust); |
285 | |
286 | /* The mutex owner died. The kernel will now take care of |
287 | everything. */ |
288 | int private = (robust |
289 | ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) |
290 | : PTHREAD_MUTEX_PSHARED (mutex)); |
291 | INTERNAL_SYSCALL_DECL (__err); |
292 | int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, |
293 | __lll_private_flag (FUTEX_TRYLOCK_PI, |
294 | private), 0, 0); |
295 | |
296 | if (INTERNAL_SYSCALL_ERROR_P (e, __err) |
297 | && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK) |
298 | { |
299 | /* The kernel has not yet finished the mutex owner death. |
300 | We do not need to ensure ordering wrt another memory |
301 | access. */ |
302 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
303 | |
304 | return EBUSY; |
305 | } |
306 | |
307 | oldval = mutex->__data.__lock; |
308 | } |
309 | |
310 | if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) |
311 | { |
312 | atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); |
313 | |
314 | /* We got the mutex. */ |
315 | mutex->__data.__count = 1; |
316 | /* But it is inconsistent unless marked otherwise. */ |
317 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
318 | |
319 | /* We must not enqueue the mutex before we have acquired it. |
320 | Also see comments at ENQUEUE_MUTEX. */ |
321 | __asm ("" ::: "memory" ); |
322 | ENQUEUE_MUTEX (mutex); |
323 | /* We need to clear op_pending after we enqueue the mutex. */ |
324 | __asm ("" ::: "memory" ); |
325 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
326 | |
327 | /* Note that we deliberately exit here. If we fall |
328 | through to the end of the function __nusers would be |
329 | incremented which is not correct because the old owner |
330 | has to be discounted. */ |
331 | return EOWNERDEAD; |
332 | } |
333 | |
334 | if (robust |
335 | && __builtin_expect (mutex->__data.__owner |
336 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
337 | { |
338 | /* This mutex is now not recoverable. */ |
339 | mutex->__data.__count = 0; |
340 | |
341 | INTERNAL_SYSCALL_DECL (__err); |
342 | INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, |
343 | __lll_private_flag (FUTEX_UNLOCK_PI, |
344 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)), |
345 | 0, 0); |
346 | |
347 | /* To the kernel, this will be visible after the kernel has |
348 | acquired the mutex in the syscall. */ |
349 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
350 | return ENOTRECOVERABLE; |
351 | } |
352 | |
353 | if (robust) |
354 | { |
355 | /* We must not enqueue the mutex before we have acquired it. |
356 | Also see comments at ENQUEUE_MUTEX. */ |
357 | __asm ("" ::: "memory" ); |
358 | ENQUEUE_MUTEX_PI (mutex); |
359 | /* We need to clear op_pending after we enqueue the mutex. */ |
360 | __asm ("" ::: "memory" ); |
361 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
362 | } |
363 | |
364 | mutex->__data.__owner = id; |
365 | ++mutex->__data.__nusers; |
366 | mutex->__data.__count = 1; |
367 | |
368 | return 0; |
369 | } |
370 | #endif /* __NR_futex. */ |
371 | |
372 | case PTHREAD_MUTEX_PP_RECURSIVE_NP: |
373 | case PTHREAD_MUTEX_PP_ERRORCHECK_NP: |
374 | case PTHREAD_MUTEX_PP_NORMAL_NP: |
375 | case PTHREAD_MUTEX_PP_ADAPTIVE_NP: |
376 | { |
377 | int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; |
378 | |
379 | oldval = mutex->__data.__lock; |
380 | |
381 | /* Check whether we already hold the mutex. */ |
382 | if (mutex->__data.__owner == id) |
383 | { |
384 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
385 | return EDEADLK; |
386 | |
387 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
388 | { |
389 | /* Just bump the counter. */ |
390 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
391 | /* Overflow of the counter. */ |
392 | return EAGAIN; |
393 | |
394 | ++mutex->__data.__count; |
395 | |
396 | return 0; |
397 | } |
398 | } |
399 | |
400 | int oldprio = -1, ceilval; |
401 | do |
402 | { |
403 | int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) |
404 | >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
405 | |
406 | if (__pthread_current_priority () > ceiling) |
407 | { |
408 | if (oldprio != -1) |
409 | __pthread_tpp_change_priority (oldprio, -1); |
410 | return EINVAL; |
411 | } |
412 | |
413 | int retval = __pthread_tpp_change_priority (oldprio, ceiling); |
414 | if (retval) |
415 | return retval; |
416 | |
417 | ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
418 | oldprio = ceiling; |
419 | |
420 | oldval |
421 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
422 | ceilval | 1, ceilval); |
423 | |
424 | if (oldval == ceilval) |
425 | break; |
426 | } |
427 | while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); |
428 | |
429 | if (oldval != ceilval) |
430 | { |
431 | __pthread_tpp_change_priority (oldprio, -1); |
432 | break; |
433 | } |
434 | |
435 | assert (mutex->__data.__owner == 0); |
436 | /* Record the ownership. */ |
437 | mutex->__data.__owner = id; |
438 | ++mutex->__data.__nusers; |
439 | mutex->__data.__count = 1; |
440 | |
441 | return 0; |
442 | } |
443 | break; |
444 | |
445 | default: |
446 | /* Correct code cannot set any other type. */ |
447 | return EINVAL; |
448 | } |
449 | |
450 | return EBUSY; |
451 | } |
452 | |
453 | #ifndef __pthread_mutex_trylock |
454 | #ifndef pthread_mutex_trylock |
455 | strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock) |
456 | #endif |
457 | #endif |
458 | |