1 | /* Copyright (C) 2002-2020 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <errno.h> |
21 | #include <time.h> |
22 | #include <sys/param.h> |
23 | #include <sys/time.h> |
24 | #include "pthreadP.h" |
25 | #include <atomic.h> |
26 | #include <lowlevellock.h> |
27 | #include <not-cancel.h> |
28 | #include <futex-internal.h> |
29 | |
30 | #include <stap-probe.h> |
31 | |
32 | #ifndef lll_clocklock_elision |
33 | #define lll_clocklock_elision(futex, adapt_count, clockid, abstime, private) \ |
34 | lll_clocklock (futex, clockid, abstime, private) |
35 | #endif |
36 | |
37 | #ifndef lll_trylock_elision |
38 | #define lll_trylock_elision(a,t) lll_trylock(a) |
39 | #endif |
40 | |
41 | #ifndef FORCE_ELISION |
42 | #define FORCE_ELISION(m, s) |
43 | #endif |
44 | |
45 | int |
46 | __pthread_mutex_clocklock_common (pthread_mutex_t *mutex, |
47 | clockid_t clockid, |
48 | const struct timespec *abstime) |
49 | { |
50 | int oldval; |
51 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
52 | int result = 0; |
53 | |
54 | /* We must not check ABSTIME here. If the thread does not block |
55 | abstime must not be checked for a valid value. */ |
56 | |
57 | /* See concurrency notes regarding mutex type which is loaded from __kind |
58 | in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ |
59 | switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), |
60 | PTHREAD_MUTEX_TIMED_NP)) |
61 | { |
62 | /* Recursive mutex. */ |
63 | case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP: |
64 | case PTHREAD_MUTEX_RECURSIVE_NP: |
65 | /* Check whether we already hold the mutex. */ |
66 | if (mutex->__data.__owner == id) |
67 | { |
68 | /* Just bump the counter. */ |
69 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
70 | /* Overflow of the counter. */ |
71 | return EAGAIN; |
72 | |
73 | ++mutex->__data.__count; |
74 | |
75 | goto out; |
76 | } |
77 | |
78 | /* We have to get the mutex. */ |
79 | result = lll_clocklock (mutex->__data.__lock, clockid, abstime, |
80 | PTHREAD_MUTEX_PSHARED (mutex)); |
81 | |
82 | if (result != 0) |
83 | goto out; |
84 | |
85 | /* Only locked once so far. */ |
86 | mutex->__data.__count = 1; |
87 | break; |
88 | |
89 | /* Error checking mutex. */ |
90 | case PTHREAD_MUTEX_ERRORCHECK_NP: |
91 | /* Check whether we already hold the mutex. */ |
92 | if (__glibc_unlikely (mutex->__data.__owner == id)) |
93 | return EDEADLK; |
94 | |
95 | /* Don't do lock elision on an error checking mutex. */ |
96 | goto simple; |
97 | |
98 | case PTHREAD_MUTEX_TIMED_NP: |
99 | FORCE_ELISION (mutex, goto elision); |
100 | simple: |
101 | /* Normal mutex. */ |
102 | result = lll_clocklock (mutex->__data.__lock, clockid, abstime, |
103 | PTHREAD_MUTEX_PSHARED (mutex)); |
104 | break; |
105 | |
106 | case PTHREAD_MUTEX_TIMED_ELISION_NP: |
107 | elision: __attribute__((unused)) |
108 | /* Don't record ownership */ |
109 | return lll_clocklock_elision (mutex->__data.__lock, |
110 | mutex->__data.__spins, |
111 | clockid, abstime, |
112 | PTHREAD_MUTEX_PSHARED (mutex)); |
113 | |
114 | |
115 | case PTHREAD_MUTEX_ADAPTIVE_NP: |
116 | if (! __is_smp) |
117 | goto simple; |
118 | |
119 | if (lll_trylock (mutex->__data.__lock) != 0) |
120 | { |
121 | int cnt = 0; |
122 | int max_cnt = MIN (max_adaptive_count (), |
123 | mutex->__data.__spins * 2 + 10); |
124 | do |
125 | { |
126 | if (cnt++ >= max_cnt) |
127 | { |
128 | result = lll_clocklock (mutex->__data.__lock, |
129 | clockid, abstime, |
130 | PTHREAD_MUTEX_PSHARED (mutex)); |
131 | break; |
132 | } |
133 | atomic_spin_nop (); |
134 | } |
135 | while (lll_trylock (mutex->__data.__lock) != 0); |
136 | |
137 | mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; |
138 | } |
139 | break; |
140 | |
141 | case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: |
142 | case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: |
143 | case PTHREAD_MUTEX_ROBUST_NORMAL_NP: |
144 | case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: |
145 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
146 | &mutex->__data.__list.__next); |
147 | /* We need to set op_pending before starting the operation. Also |
148 | see comments at ENQUEUE_MUTEX. */ |
149 | __asm ("" ::: "memory" ); |
150 | |
151 | oldval = mutex->__data.__lock; |
152 | /* This is set to FUTEX_WAITERS iff we might have shared the |
153 | FUTEX_WAITERS flag with other threads, and therefore need to keep it |
154 | set to avoid lost wake-ups. We have the same requirement in the |
155 | simple mutex algorithm. */ |
156 | unsigned int assume_other_futex_waiters = 0; |
157 | while (1) |
158 | { |
159 | /* Try to acquire the lock through a CAS from 0 (not acquired) to |
160 | our TID | assume_other_futex_waiters. */ |
161 | if (__glibc_likely (oldval == 0)) |
162 | { |
163 | oldval |
164 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
165 | id | assume_other_futex_waiters, 0); |
166 | if (__glibc_likely (oldval == 0)) |
167 | break; |
168 | } |
169 | |
170 | if ((oldval & FUTEX_OWNER_DIED) != 0) |
171 | { |
172 | /* The previous owner died. Try locking the mutex. */ |
173 | int newval = id | (oldval & FUTEX_WAITERS) |
174 | | assume_other_futex_waiters; |
175 | |
176 | newval |
177 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
178 | newval, oldval); |
179 | if (newval != oldval) |
180 | { |
181 | oldval = newval; |
182 | continue; |
183 | } |
184 | |
185 | /* We got the mutex. */ |
186 | mutex->__data.__count = 1; |
187 | /* But it is inconsistent unless marked otherwise. */ |
188 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
189 | |
190 | /* We must not enqueue the mutex before we have acquired it. |
191 | Also see comments at ENQUEUE_MUTEX. */ |
192 | __asm ("" ::: "memory" ); |
193 | ENQUEUE_MUTEX (mutex); |
194 | /* We need to clear op_pending after we enqueue the mutex. */ |
195 | __asm ("" ::: "memory" ); |
196 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
197 | |
198 | /* Note that we deliberately exit here. If we fall |
199 | through to the end of the function __nusers would be |
200 | incremented which is not correct because the old |
201 | owner has to be discounted. */ |
202 | return EOWNERDEAD; |
203 | } |
204 | |
205 | /* Check whether we already hold the mutex. */ |
206 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
207 | { |
208 | int kind = PTHREAD_MUTEX_TYPE (mutex); |
209 | if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) |
210 | { |
211 | /* We do not need to ensure ordering wrt another memory |
212 | access. Also see comments at ENQUEUE_MUTEX. */ |
213 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
214 | NULL); |
215 | return EDEADLK; |
216 | } |
217 | |
218 | if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) |
219 | { |
220 | /* We do not need to ensure ordering wrt another memory |
221 | access. */ |
222 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
223 | NULL); |
224 | |
225 | /* Just bump the counter. */ |
226 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
227 | /* Overflow of the counter. */ |
228 | return EAGAIN; |
229 | |
230 | ++mutex->__data.__count; |
231 | |
232 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
233 | |
234 | return 0; |
235 | } |
236 | } |
237 | |
238 | /* We are about to block; check whether the timeout is invalid. */ |
239 | if (! valid_nanoseconds (abstime->tv_nsec)) |
240 | return EINVAL; |
241 | /* Work around the fact that the kernel rejects negative timeout |
242 | values despite them being valid. */ |
243 | if (__glibc_unlikely (abstime->tv_sec < 0)) |
244 | return ETIMEDOUT; |
245 | |
246 | /* We cannot acquire the mutex nor has its owner died. Thus, try |
247 | to block using futexes. Set FUTEX_WAITERS if necessary so that |
248 | other threads are aware that there are potentially threads |
249 | blocked on the futex. Restart if oldval changed in the |
250 | meantime. */ |
251 | if ((oldval & FUTEX_WAITERS) == 0) |
252 | { |
253 | if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock, |
254 | oldval | FUTEX_WAITERS, |
255 | oldval) |
256 | != 0) |
257 | { |
258 | oldval = mutex->__data.__lock; |
259 | continue; |
260 | } |
261 | oldval |= FUTEX_WAITERS; |
262 | } |
263 | |
264 | /* It is now possible that we share the FUTEX_WAITERS flag with |
265 | another thread; therefore, update assume_other_futex_waiters so |
266 | that we do not forget about this when handling other cases |
267 | above and thus do not cause lost wake-ups. */ |
268 | assume_other_futex_waiters |= FUTEX_WAITERS; |
269 | |
270 | /* Block using the futex. */ |
271 | int err = lll_futex_clock_wait_bitset (&mutex->__data.__lock, |
272 | oldval, clockid, abstime, |
273 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
274 | /* The futex call timed out. */ |
275 | if (err == -ETIMEDOUT) |
276 | return -err; |
277 | /* Reload current lock value. */ |
278 | oldval = mutex->__data.__lock; |
279 | } |
280 | |
281 | /* We have acquired the mutex; check if it is still consistent. */ |
282 | if (__builtin_expect (mutex->__data.__owner |
283 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
284 | { |
285 | /* This mutex is now not recoverable. */ |
286 | mutex->__data.__count = 0; |
287 | int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex); |
288 | lll_unlock (mutex->__data.__lock, private); |
289 | /* FIXME This violates the mutex destruction requirements. See |
290 | __pthread_mutex_unlock_full. */ |
291 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
292 | return ENOTRECOVERABLE; |
293 | } |
294 | |
295 | mutex->__data.__count = 1; |
296 | /* We must not enqueue the mutex before we have acquired it. |
297 | Also see comments at ENQUEUE_MUTEX. */ |
298 | __asm ("" ::: "memory" ); |
299 | ENQUEUE_MUTEX (mutex); |
300 | /* We need to clear op_pending after we enqueue the mutex. */ |
301 | __asm ("" ::: "memory" ); |
302 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
303 | break; |
304 | |
305 | /* The PI support requires the Linux futex system call. If that's not |
306 | available, pthread_mutex_init should never have allowed the type to |
307 | be set. So it will get the default case for an invalid type. */ |
308 | #ifdef __NR_futex |
309 | case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
310 | case PTHREAD_MUTEX_PI_ERRORCHECK_NP: |
311 | case PTHREAD_MUTEX_PI_NORMAL_NP: |
312 | case PTHREAD_MUTEX_PI_ADAPTIVE_NP: |
313 | case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: |
314 | case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: |
315 | case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: |
316 | case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: |
317 | { |
318 | int kind, robust; |
319 | { |
320 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
321 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
322 | int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); |
323 | kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; |
324 | robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
325 | } |
326 | |
327 | if (robust) |
328 | { |
329 | /* Note: robust PI futexes are signaled by setting bit 0. */ |
330 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
331 | (void *) (((uintptr_t) &mutex->__data.__list.__next) |
332 | | 1)); |
333 | /* We need to set op_pending before starting the operation. Also |
334 | see comments at ENQUEUE_MUTEX. */ |
335 | __asm ("" ::: "memory" ); |
336 | } |
337 | |
338 | oldval = mutex->__data.__lock; |
339 | |
340 | /* Check whether we already hold the mutex. */ |
341 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
342 | { |
343 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
344 | { |
345 | /* We do not need to ensure ordering wrt another memory |
346 | access. */ |
347 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
348 | return EDEADLK; |
349 | } |
350 | |
351 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
352 | { |
353 | /* We do not need to ensure ordering wrt another memory |
354 | access. */ |
355 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
356 | |
357 | /* Just bump the counter. */ |
358 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
359 | /* Overflow of the counter. */ |
360 | return EAGAIN; |
361 | |
362 | ++mutex->__data.__count; |
363 | |
364 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
365 | |
366 | return 0; |
367 | } |
368 | } |
369 | |
370 | oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
371 | id, 0); |
372 | |
373 | if (oldval != 0) |
374 | { |
375 | /* The mutex is locked. The kernel will now take care of |
376 | everything. The timeout value must be a relative value. |
377 | Convert it. */ |
378 | int private = (robust |
379 | ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) |
380 | : PTHREAD_MUTEX_PSHARED (mutex)); |
381 | int e = futex_lock_pi ((unsigned int *) &mutex->__data.__lock, |
382 | abstime, private); |
383 | if (e == ETIMEDOUT) |
384 | return ETIMEDOUT; |
385 | else if (e == ESRCH || e == EDEADLK) |
386 | { |
387 | assert (e != EDEADLK |
388 | || (kind != PTHREAD_MUTEX_ERRORCHECK_NP |
389 | && kind != PTHREAD_MUTEX_RECURSIVE_NP)); |
390 | /* ESRCH can happen only for non-robust PI mutexes where |
391 | the owner of the lock died. */ |
392 | assert (e != ESRCH || !robust); |
393 | |
394 | /* Delay the thread until the timeout is reached. Then return |
395 | ETIMEDOUT. */ |
396 | do |
397 | e = lll_timedwait (&(int){0}, 0, clockid, abstime, |
398 | private); |
399 | while (e != ETIMEDOUT); |
400 | return ETIMEDOUT; |
401 | } |
402 | else if (e != 0) |
403 | return e; |
404 | |
405 | oldval = mutex->__data.__lock; |
406 | |
407 | assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); |
408 | } |
409 | |
410 | if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) |
411 | { |
412 | atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); |
413 | |
414 | /* We got the mutex. */ |
415 | mutex->__data.__count = 1; |
416 | /* But it is inconsistent unless marked otherwise. */ |
417 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
418 | |
419 | /* We must not enqueue the mutex before we have acquired it. |
420 | Also see comments at ENQUEUE_MUTEX. */ |
421 | __asm ("" ::: "memory" ); |
422 | ENQUEUE_MUTEX_PI (mutex); |
423 | /* We need to clear op_pending after we enqueue the mutex. */ |
424 | __asm ("" ::: "memory" ); |
425 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
426 | |
427 | /* Note that we deliberately exit here. If we fall |
428 | through to the end of the function __nusers would be |
429 | incremented which is not correct because the old owner |
430 | has to be discounted. */ |
431 | return EOWNERDEAD; |
432 | } |
433 | |
434 | if (robust |
435 | && __builtin_expect (mutex->__data.__owner |
436 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
437 | { |
438 | /* This mutex is now not recoverable. */ |
439 | mutex->__data.__count = 0; |
440 | |
441 | futex_unlock_pi ((unsigned int *) &mutex->__data.__lock, |
442 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
443 | |
444 | /* To the kernel, this will be visible after the kernel has |
445 | acquired the mutex in the syscall. */ |
446 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
447 | return ENOTRECOVERABLE; |
448 | } |
449 | |
450 | mutex->__data.__count = 1; |
451 | if (robust) |
452 | { |
453 | /* We must not enqueue the mutex before we have acquired it. |
454 | Also see comments at ENQUEUE_MUTEX. */ |
455 | __asm ("" ::: "memory" ); |
456 | ENQUEUE_MUTEX_PI (mutex); |
457 | /* We need to clear op_pending after we enqueue the mutex. */ |
458 | __asm ("" ::: "memory" ); |
459 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
460 | } |
461 | } |
462 | break; |
463 | #endif /* __NR_futex. */ |
464 | |
465 | case PTHREAD_MUTEX_PP_RECURSIVE_NP: |
466 | case PTHREAD_MUTEX_PP_ERRORCHECK_NP: |
467 | case PTHREAD_MUTEX_PP_NORMAL_NP: |
468 | case PTHREAD_MUTEX_PP_ADAPTIVE_NP: |
469 | { |
470 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
471 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
472 | int kind = atomic_load_relaxed (&(mutex->__data.__kind)) |
473 | & PTHREAD_MUTEX_KIND_MASK_NP; |
474 | |
475 | oldval = mutex->__data.__lock; |
476 | |
477 | /* Check whether we already hold the mutex. */ |
478 | if (mutex->__data.__owner == id) |
479 | { |
480 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
481 | return EDEADLK; |
482 | |
483 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
484 | { |
485 | /* Just bump the counter. */ |
486 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
487 | /* Overflow of the counter. */ |
488 | return EAGAIN; |
489 | |
490 | ++mutex->__data.__count; |
491 | |
492 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
493 | |
494 | return 0; |
495 | } |
496 | } |
497 | |
498 | int oldprio = -1, ceilval; |
499 | do |
500 | { |
501 | int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) |
502 | >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
503 | |
504 | if (__pthread_current_priority () > ceiling) |
505 | { |
506 | result = EINVAL; |
507 | failpp: |
508 | if (oldprio != -1) |
509 | __pthread_tpp_change_priority (oldprio, -1); |
510 | return result; |
511 | } |
512 | |
513 | result = __pthread_tpp_change_priority (oldprio, ceiling); |
514 | if (result) |
515 | return result; |
516 | |
517 | ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
518 | oldprio = ceiling; |
519 | |
520 | oldval |
521 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
522 | ceilval | 1, ceilval); |
523 | |
524 | if (oldval == ceilval) |
525 | break; |
526 | |
527 | do |
528 | { |
529 | oldval |
530 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
531 | ceilval | 2, |
532 | ceilval | 1); |
533 | |
534 | if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval) |
535 | break; |
536 | |
537 | if (oldval != ceilval) |
538 | { |
539 | /* Reject invalid timeouts. */ |
540 | if (! valid_nanoseconds (abstime->tv_nsec)) |
541 | { |
542 | result = EINVAL; |
543 | goto failpp; |
544 | } |
545 | |
546 | struct timespec rt; |
547 | |
548 | /* Get the current time. */ |
549 | __clock_gettime (CLOCK_REALTIME, &rt); |
550 | |
551 | /* Compute relative timeout. */ |
552 | rt.tv_sec = abstime->tv_sec - rt.tv_sec; |
553 | rt.tv_nsec = abstime->tv_nsec - rt.tv_nsec; |
554 | if (rt.tv_nsec < 0) |
555 | { |
556 | rt.tv_nsec += 1000000000; |
557 | --rt.tv_sec; |
558 | } |
559 | |
560 | /* Already timed out? */ |
561 | if (rt.tv_sec < 0) |
562 | { |
563 | result = ETIMEDOUT; |
564 | goto failpp; |
565 | } |
566 | |
567 | lll_futex_timed_wait (&mutex->__data.__lock, |
568 | ceilval | 2, &rt, |
569 | PTHREAD_MUTEX_PSHARED (mutex)); |
570 | } |
571 | } |
572 | while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
573 | ceilval | 2, ceilval) |
574 | != ceilval); |
575 | } |
576 | while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); |
577 | |
578 | assert (mutex->__data.__owner == 0); |
579 | mutex->__data.__count = 1; |
580 | } |
581 | break; |
582 | |
583 | default: |
584 | /* Correct code cannot set any other type. */ |
585 | return EINVAL; |
586 | } |
587 | |
588 | if (result == 0) |
589 | { |
590 | /* Record the ownership. */ |
591 | mutex->__data.__owner = id; |
592 | ++mutex->__data.__nusers; |
593 | |
594 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
595 | } |
596 | |
597 | out: |
598 | return result; |
599 | } |
600 | |
601 | int |
602 | __pthread_mutex_clocklock (pthread_mutex_t *mutex, |
603 | clockid_t clockid, |
604 | const struct timespec *abstime) |
605 | { |
606 | if (__glibc_unlikely (!lll_futex_supported_clockid (clockid))) |
607 | return EINVAL; |
608 | |
609 | LIBC_PROBE (mutex_clocklock_entry, 3, mutex, clockid, abstime); |
610 | return __pthread_mutex_clocklock_common (mutex, clockid, abstime); |
611 | } |
612 | weak_alias (__pthread_mutex_clocklock, pthread_mutex_clocklock) |
613 | |
614 | int |
615 | __pthread_mutex_timedlock (pthread_mutex_t *mutex, |
616 | const struct timespec *abstime) |
617 | { |
618 | LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime); |
619 | return __pthread_mutex_clocklock_common (mutex, CLOCK_REALTIME, abstime); |
620 | } |
621 | weak_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock) |
622 | |