1 | /* Copyright (C) 2002-2020 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <errno.h> |
21 | #include <stdlib.h> |
22 | #include <unistd.h> |
23 | #include <sys/param.h> |
24 | #include <not-cancel.h> |
25 | #include "pthreadP.h" |
26 | #include <atomic.h> |
27 | #include <futex-internal.h> |
28 | #include <stap-probe.h> |
29 | |
30 | #ifndef lll_lock_elision |
31 | #define lll_lock_elision(lock, try_lock, private) ({ \ |
32 | lll_lock (lock, private); 0; }) |
33 | #endif |
34 | |
35 | #ifndef lll_trylock_elision |
36 | #define lll_trylock_elision(a,t) lll_trylock(a) |
37 | #endif |
38 | |
39 | /* Some of the following definitions differ when pthread_mutex_cond_lock.c |
40 | includes this file. */ |
41 | #ifndef LLL_MUTEX_LOCK |
42 | # define LLL_MUTEX_LOCK(mutex) \ |
43 | lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)) |
44 | # define LLL_MUTEX_TRYLOCK(mutex) \ |
45 | lll_trylock ((mutex)->__data.__lock) |
46 | # define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0 |
47 | # define LLL_MUTEX_LOCK_ELISION(mutex) \ |
48 | lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \ |
49 | PTHREAD_MUTEX_PSHARED (mutex)) |
50 | # define LLL_MUTEX_TRYLOCK_ELISION(mutex) \ |
51 | lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \ |
52 | PTHREAD_MUTEX_PSHARED (mutex)) |
53 | #endif |
54 | |
55 | #ifndef FORCE_ELISION |
56 | #define FORCE_ELISION(m, s) |
57 | #endif |
58 | |
59 | static int __pthread_mutex_lock_full (pthread_mutex_t *mutex) |
60 | __attribute_noinline__; |
61 | |
62 | int |
63 | __pthread_mutex_lock (pthread_mutex_t *mutex) |
64 | { |
65 | /* See concurrency notes regarding mutex type which is loaded from __kind |
66 | in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ |
67 | unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex); |
68 | |
69 | LIBC_PROBE (mutex_entry, 1, mutex); |
70 | |
71 | if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP |
72 | | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0)) |
73 | return __pthread_mutex_lock_full (mutex); |
74 | |
75 | if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP)) |
76 | { |
77 | FORCE_ELISION (mutex, goto elision); |
78 | simple: |
79 | /* Normal mutex. */ |
80 | LLL_MUTEX_LOCK (mutex); |
81 | assert (mutex->__data.__owner == 0); |
82 | } |
83 | #ifdef HAVE_ELISION |
84 | else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP)) |
85 | { |
86 | elision: __attribute__((unused)) |
87 | /* This case can never happen on a system without elision, |
88 | as the mutex type initialization functions will not |
89 | allow to set the elision flags. */ |
90 | /* Don't record owner or users for elision case. This is a |
91 | tail call. */ |
92 | return LLL_MUTEX_LOCK_ELISION (mutex); |
93 | } |
94 | #endif |
95 | else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) |
96 | == PTHREAD_MUTEX_RECURSIVE_NP, 1)) |
97 | { |
98 | /* Recursive mutex. */ |
99 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
100 | |
101 | /* Check whether we already hold the mutex. */ |
102 | if (mutex->__data.__owner == id) |
103 | { |
104 | /* Just bump the counter. */ |
105 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
106 | /* Overflow of the counter. */ |
107 | return EAGAIN; |
108 | |
109 | ++mutex->__data.__count; |
110 | |
111 | return 0; |
112 | } |
113 | |
114 | /* We have to get the mutex. */ |
115 | LLL_MUTEX_LOCK (mutex); |
116 | |
117 | assert (mutex->__data.__owner == 0); |
118 | mutex->__data.__count = 1; |
119 | } |
120 | else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) |
121 | == PTHREAD_MUTEX_ADAPTIVE_NP, 1)) |
122 | { |
123 | if (! __is_smp) |
124 | goto simple; |
125 | |
126 | if (LLL_MUTEX_TRYLOCK (mutex) != 0) |
127 | { |
128 | int cnt = 0; |
129 | int max_cnt = MIN (max_adaptive_count (), |
130 | mutex->__data.__spins * 2 + 10); |
131 | do |
132 | { |
133 | if (cnt++ >= max_cnt) |
134 | { |
135 | LLL_MUTEX_LOCK (mutex); |
136 | break; |
137 | } |
138 | atomic_spin_nop (); |
139 | } |
140 | while (LLL_MUTEX_TRYLOCK (mutex) != 0); |
141 | |
142 | mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; |
143 | } |
144 | assert (mutex->__data.__owner == 0); |
145 | } |
146 | else |
147 | { |
148 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
149 | assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP); |
150 | /* Check whether we already hold the mutex. */ |
151 | if (__glibc_unlikely (mutex->__data.__owner == id)) |
152 | return EDEADLK; |
153 | goto simple; |
154 | } |
155 | |
156 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
157 | |
158 | /* Record the ownership. */ |
159 | mutex->__data.__owner = id; |
160 | #ifndef NO_INCR |
161 | ++mutex->__data.__nusers; |
162 | #endif |
163 | |
164 | LIBC_PROBE (mutex_acquired, 1, mutex); |
165 | |
166 | return 0; |
167 | } |
168 | |
169 | static int |
170 | __pthread_mutex_lock_full (pthread_mutex_t *mutex) |
171 | { |
172 | int oldval; |
173 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
174 | |
175 | switch (PTHREAD_MUTEX_TYPE (mutex)) |
176 | { |
177 | case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: |
178 | case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: |
179 | case PTHREAD_MUTEX_ROBUST_NORMAL_NP: |
180 | case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: |
181 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
182 | &mutex->__data.__list.__next); |
183 | /* We need to set op_pending before starting the operation. Also |
184 | see comments at ENQUEUE_MUTEX. */ |
185 | __asm ("" ::: "memory" ); |
186 | |
187 | oldval = mutex->__data.__lock; |
188 | /* This is set to FUTEX_WAITERS iff we might have shared the |
189 | FUTEX_WAITERS flag with other threads, and therefore need to keep it |
190 | set to avoid lost wake-ups. We have the same requirement in the |
191 | simple mutex algorithm. |
192 | We start with value zero for a normal mutex, and FUTEX_WAITERS if we |
193 | are building the special case mutexes for use from within condition |
194 | variables. */ |
195 | unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER; |
196 | while (1) |
197 | { |
198 | /* Try to acquire the lock through a CAS from 0 (not acquired) to |
199 | our TID | assume_other_futex_waiters. */ |
200 | if (__glibc_likely (oldval == 0)) |
201 | { |
202 | oldval |
203 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
204 | id | assume_other_futex_waiters, 0); |
205 | if (__glibc_likely (oldval == 0)) |
206 | break; |
207 | } |
208 | |
209 | if ((oldval & FUTEX_OWNER_DIED) != 0) |
210 | { |
211 | /* The previous owner died. Try locking the mutex. */ |
212 | int newval = id; |
213 | #ifdef NO_INCR |
214 | /* We are not taking assume_other_futex_waiters into accoount |
215 | here simply because we'll set FUTEX_WAITERS anyway. */ |
216 | newval |= FUTEX_WAITERS; |
217 | #else |
218 | newval |= (oldval & FUTEX_WAITERS) | assume_other_futex_waiters; |
219 | #endif |
220 | |
221 | newval |
222 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
223 | newval, oldval); |
224 | |
225 | if (newval != oldval) |
226 | { |
227 | oldval = newval; |
228 | continue; |
229 | } |
230 | |
231 | /* We got the mutex. */ |
232 | mutex->__data.__count = 1; |
233 | /* But it is inconsistent unless marked otherwise. */ |
234 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
235 | |
236 | /* We must not enqueue the mutex before we have acquired it. |
237 | Also see comments at ENQUEUE_MUTEX. */ |
238 | __asm ("" ::: "memory" ); |
239 | ENQUEUE_MUTEX (mutex); |
240 | /* We need to clear op_pending after we enqueue the mutex. */ |
241 | __asm ("" ::: "memory" ); |
242 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
243 | |
244 | /* Note that we deliberately exit here. If we fall |
245 | through to the end of the function __nusers would be |
246 | incremented which is not correct because the old |
247 | owner has to be discounted. If we are not supposed |
248 | to increment __nusers we actually have to decrement |
249 | it here. */ |
250 | #ifdef NO_INCR |
251 | --mutex->__data.__nusers; |
252 | #endif |
253 | |
254 | return EOWNERDEAD; |
255 | } |
256 | |
257 | /* Check whether we already hold the mutex. */ |
258 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
259 | { |
260 | int kind = PTHREAD_MUTEX_TYPE (mutex); |
261 | if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) |
262 | { |
263 | /* We do not need to ensure ordering wrt another memory |
264 | access. Also see comments at ENQUEUE_MUTEX. */ |
265 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
266 | NULL); |
267 | return EDEADLK; |
268 | } |
269 | |
270 | if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) |
271 | { |
272 | /* We do not need to ensure ordering wrt another memory |
273 | access. */ |
274 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
275 | NULL); |
276 | |
277 | /* Just bump the counter. */ |
278 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
279 | /* Overflow of the counter. */ |
280 | return EAGAIN; |
281 | |
282 | ++mutex->__data.__count; |
283 | |
284 | return 0; |
285 | } |
286 | } |
287 | |
288 | /* We cannot acquire the mutex nor has its owner died. Thus, try |
289 | to block using futexes. Set FUTEX_WAITERS if necessary so that |
290 | other threads are aware that there are potentially threads |
291 | blocked on the futex. Restart if oldval changed in the |
292 | meantime. */ |
293 | if ((oldval & FUTEX_WAITERS) == 0) |
294 | { |
295 | if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock, |
296 | oldval | FUTEX_WAITERS, |
297 | oldval) |
298 | != 0) |
299 | { |
300 | oldval = mutex->__data.__lock; |
301 | continue; |
302 | } |
303 | oldval |= FUTEX_WAITERS; |
304 | } |
305 | |
306 | /* It is now possible that we share the FUTEX_WAITERS flag with |
307 | another thread; therefore, update assume_other_futex_waiters so |
308 | that we do not forget about this when handling other cases |
309 | above and thus do not cause lost wake-ups. */ |
310 | assume_other_futex_waiters |= FUTEX_WAITERS; |
311 | |
312 | /* Block using the futex and reload current lock value. */ |
313 | lll_futex_wait (&mutex->__data.__lock, oldval, |
314 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
315 | oldval = mutex->__data.__lock; |
316 | } |
317 | |
318 | /* We have acquired the mutex; check if it is still consistent. */ |
319 | if (__builtin_expect (mutex->__data.__owner |
320 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
321 | { |
322 | /* This mutex is now not recoverable. */ |
323 | mutex->__data.__count = 0; |
324 | int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex); |
325 | lll_unlock (mutex->__data.__lock, private); |
326 | /* FIXME This violates the mutex destruction requirements. See |
327 | __pthread_mutex_unlock_full. */ |
328 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
329 | return ENOTRECOVERABLE; |
330 | } |
331 | |
332 | mutex->__data.__count = 1; |
333 | /* We must not enqueue the mutex before we have acquired it. |
334 | Also see comments at ENQUEUE_MUTEX. */ |
335 | __asm ("" ::: "memory" ); |
336 | ENQUEUE_MUTEX (mutex); |
337 | /* We need to clear op_pending after we enqueue the mutex. */ |
338 | __asm ("" ::: "memory" ); |
339 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
340 | break; |
341 | |
342 | /* The PI support requires the Linux futex system call. If that's not |
343 | available, pthread_mutex_init should never have allowed the type to |
344 | be set. So it will get the default case for an invalid type. */ |
345 | #ifdef __NR_futex |
346 | case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
347 | case PTHREAD_MUTEX_PI_ERRORCHECK_NP: |
348 | case PTHREAD_MUTEX_PI_NORMAL_NP: |
349 | case PTHREAD_MUTEX_PI_ADAPTIVE_NP: |
350 | case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: |
351 | case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: |
352 | case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: |
353 | case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: |
354 | { |
355 | int kind, robust; |
356 | { |
357 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
358 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
359 | int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); |
360 | kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; |
361 | robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
362 | } |
363 | |
364 | if (robust) |
365 | { |
366 | /* Note: robust PI futexes are signaled by setting bit 0. */ |
367 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
368 | (void *) (((uintptr_t) &mutex->__data.__list.__next) |
369 | | 1)); |
370 | /* We need to set op_pending before starting the operation. Also |
371 | see comments at ENQUEUE_MUTEX. */ |
372 | __asm ("" ::: "memory" ); |
373 | } |
374 | |
375 | oldval = mutex->__data.__lock; |
376 | |
377 | /* Check whether we already hold the mutex. */ |
378 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
379 | { |
380 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
381 | { |
382 | /* We do not need to ensure ordering wrt another memory |
383 | access. */ |
384 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
385 | return EDEADLK; |
386 | } |
387 | |
388 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
389 | { |
390 | /* We do not need to ensure ordering wrt another memory |
391 | access. */ |
392 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
393 | |
394 | /* Just bump the counter. */ |
395 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
396 | /* Overflow of the counter. */ |
397 | return EAGAIN; |
398 | |
399 | ++mutex->__data.__count; |
400 | |
401 | return 0; |
402 | } |
403 | } |
404 | |
405 | int newval = id; |
406 | # ifdef NO_INCR |
407 | newval |= FUTEX_WAITERS; |
408 | # endif |
409 | oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
410 | newval, 0); |
411 | |
412 | if (oldval != 0) |
413 | { |
414 | /* The mutex is locked. The kernel will now take care of |
415 | everything. */ |
416 | int private = (robust |
417 | ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) |
418 | : PTHREAD_MUTEX_PSHARED (mutex)); |
419 | int e = futex_lock_pi ((unsigned int *) &mutex->__data.__lock, |
420 | NULL, private); |
421 | if (e == ESRCH || e == EDEADLK) |
422 | { |
423 | assert (e != EDEADLK |
424 | || (kind != PTHREAD_MUTEX_ERRORCHECK_NP |
425 | && kind != PTHREAD_MUTEX_RECURSIVE_NP)); |
426 | /* ESRCH can happen only for non-robust PI mutexes where |
427 | the owner of the lock died. */ |
428 | assert (e != ESRCH || !robust); |
429 | |
430 | /* Delay the thread indefinitely. */ |
431 | while (1) |
432 | lll_timedwait (&(int){0}, 0, 0 /* ignored */, NULL, |
433 | private); |
434 | } |
435 | |
436 | oldval = mutex->__data.__lock; |
437 | |
438 | assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); |
439 | } |
440 | |
441 | if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) |
442 | { |
443 | atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); |
444 | |
445 | /* We got the mutex. */ |
446 | mutex->__data.__count = 1; |
447 | /* But it is inconsistent unless marked otherwise. */ |
448 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
449 | |
450 | /* We must not enqueue the mutex before we have acquired it. |
451 | Also see comments at ENQUEUE_MUTEX. */ |
452 | __asm ("" ::: "memory" ); |
453 | ENQUEUE_MUTEX_PI (mutex); |
454 | /* We need to clear op_pending after we enqueue the mutex. */ |
455 | __asm ("" ::: "memory" ); |
456 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
457 | |
458 | /* Note that we deliberately exit here. If we fall |
459 | through to the end of the function __nusers would be |
460 | incremented which is not correct because the old owner |
461 | has to be discounted. If we are not supposed to |
462 | increment __nusers we actually have to decrement it here. */ |
463 | # ifdef NO_INCR |
464 | --mutex->__data.__nusers; |
465 | # endif |
466 | |
467 | return EOWNERDEAD; |
468 | } |
469 | |
470 | if (robust |
471 | && __builtin_expect (mutex->__data.__owner |
472 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
473 | { |
474 | /* This mutex is now not recoverable. */ |
475 | mutex->__data.__count = 0; |
476 | |
477 | futex_unlock_pi ((unsigned int *) &mutex->__data.__lock, |
478 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
479 | |
480 | /* To the kernel, this will be visible after the kernel has |
481 | acquired the mutex in the syscall. */ |
482 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
483 | return ENOTRECOVERABLE; |
484 | } |
485 | |
486 | mutex->__data.__count = 1; |
487 | if (robust) |
488 | { |
489 | /* We must not enqueue the mutex before we have acquired it. |
490 | Also see comments at ENQUEUE_MUTEX. */ |
491 | __asm ("" ::: "memory" ); |
492 | ENQUEUE_MUTEX_PI (mutex); |
493 | /* We need to clear op_pending after we enqueue the mutex. */ |
494 | __asm ("" ::: "memory" ); |
495 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
496 | } |
497 | } |
498 | break; |
499 | #endif /* __NR_futex. */ |
500 | |
501 | case PTHREAD_MUTEX_PP_RECURSIVE_NP: |
502 | case PTHREAD_MUTEX_PP_ERRORCHECK_NP: |
503 | case PTHREAD_MUTEX_PP_NORMAL_NP: |
504 | case PTHREAD_MUTEX_PP_ADAPTIVE_NP: |
505 | { |
506 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
507 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
508 | int kind = atomic_load_relaxed (&(mutex->__data.__kind)) |
509 | & PTHREAD_MUTEX_KIND_MASK_NP; |
510 | |
511 | oldval = mutex->__data.__lock; |
512 | |
513 | /* Check whether we already hold the mutex. */ |
514 | if (mutex->__data.__owner == id) |
515 | { |
516 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
517 | return EDEADLK; |
518 | |
519 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
520 | { |
521 | /* Just bump the counter. */ |
522 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
523 | /* Overflow of the counter. */ |
524 | return EAGAIN; |
525 | |
526 | ++mutex->__data.__count; |
527 | |
528 | return 0; |
529 | } |
530 | } |
531 | |
532 | int oldprio = -1, ceilval; |
533 | do |
534 | { |
535 | int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) |
536 | >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
537 | |
538 | if (__pthread_current_priority () > ceiling) |
539 | { |
540 | if (oldprio != -1) |
541 | __pthread_tpp_change_priority (oldprio, -1); |
542 | return EINVAL; |
543 | } |
544 | |
545 | int retval = __pthread_tpp_change_priority (oldprio, ceiling); |
546 | if (retval) |
547 | return retval; |
548 | |
549 | ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
550 | oldprio = ceiling; |
551 | |
552 | oldval |
553 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
554 | #ifdef NO_INCR |
555 | ceilval | 2, |
556 | #else |
557 | ceilval | 1, |
558 | #endif |
559 | ceilval); |
560 | |
561 | if (oldval == ceilval) |
562 | break; |
563 | |
564 | do |
565 | { |
566 | oldval |
567 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
568 | ceilval | 2, |
569 | ceilval | 1); |
570 | |
571 | if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval) |
572 | break; |
573 | |
574 | if (oldval != ceilval) |
575 | lll_futex_wait (&mutex->__data.__lock, ceilval | 2, |
576 | PTHREAD_MUTEX_PSHARED (mutex)); |
577 | } |
578 | while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
579 | ceilval | 2, ceilval) |
580 | != ceilval); |
581 | } |
582 | while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); |
583 | |
584 | assert (mutex->__data.__owner == 0); |
585 | mutex->__data.__count = 1; |
586 | } |
587 | break; |
588 | |
589 | default: |
590 | /* Correct code cannot set any other type. */ |
591 | return EINVAL; |
592 | } |
593 | |
594 | /* Record the ownership. */ |
595 | mutex->__data.__owner = id; |
596 | #ifndef NO_INCR |
597 | ++mutex->__data.__nusers; |
598 | #endif |
599 | |
600 | LIBC_PROBE (mutex_acquired, 1, mutex); |
601 | |
602 | return 0; |
603 | } |
604 | #ifndef __pthread_mutex_lock |
605 | weak_alias (__pthread_mutex_lock, pthread_mutex_lock) |
606 | hidden_def (__pthread_mutex_lock) |
607 | #endif |
608 | |
609 | |
610 | #ifdef NO_INCR |
611 | void |
612 | __pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex) |
613 | { |
614 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
615 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
616 | int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); |
617 | assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0); |
618 | assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0); |
619 | assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0); |
620 | |
621 | /* Record the ownership. */ |
622 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
623 | mutex->__data.__owner = id; |
624 | |
625 | if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP) |
626 | ++mutex->__data.__count; |
627 | } |
628 | #endif |
629 | |