1 | /* Copyright (C) 2002-2019 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <errno.h> |
21 | #include <time.h> |
22 | #include <sys/param.h> |
23 | #include <sys/time.h> |
24 | #include "pthreadP.h" |
25 | #include <atomic.h> |
26 | #include <lowlevellock.h> |
27 | #include <not-cancel.h> |
28 | |
29 | #include <stap-probe.h> |
30 | |
31 | #ifndef lll_clocklock_elision |
32 | #define lll_clocklock_elision(futex, adapt_count, clockid, abstime, private) \ |
33 | lll_clocklock (futex, clockid, abstime, private) |
34 | #endif |
35 | |
36 | #ifndef lll_trylock_elision |
37 | #define lll_trylock_elision(a,t) lll_trylock(a) |
38 | #endif |
39 | |
40 | #ifndef FORCE_ELISION |
41 | #define FORCE_ELISION(m, s) |
42 | #endif |
43 | |
44 | int |
45 | __pthread_mutex_clocklock_common (pthread_mutex_t *mutex, |
46 | clockid_t clockid, |
47 | const struct timespec *abstime) |
48 | { |
49 | int oldval; |
50 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
51 | int result = 0; |
52 | |
53 | /* We must not check ABSTIME here. If the thread does not block |
54 | abstime must not be checked for a valid value. */ |
55 | |
56 | /* See concurrency notes regarding mutex type which is loaded from __kind |
57 | in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ |
58 | switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), |
59 | PTHREAD_MUTEX_TIMED_NP)) |
60 | { |
61 | /* Recursive mutex. */ |
62 | case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP: |
63 | case PTHREAD_MUTEX_RECURSIVE_NP: |
64 | /* Check whether we already hold the mutex. */ |
65 | if (mutex->__data.__owner == id) |
66 | { |
67 | /* Just bump the counter. */ |
68 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
69 | /* Overflow of the counter. */ |
70 | return EAGAIN; |
71 | |
72 | ++mutex->__data.__count; |
73 | |
74 | goto out; |
75 | } |
76 | |
77 | /* We have to get the mutex. */ |
78 | result = lll_clocklock (mutex->__data.__lock, clockid, abstime, |
79 | PTHREAD_MUTEX_PSHARED (mutex)); |
80 | |
81 | if (result != 0) |
82 | goto out; |
83 | |
84 | /* Only locked once so far. */ |
85 | mutex->__data.__count = 1; |
86 | break; |
87 | |
88 | /* Error checking mutex. */ |
89 | case PTHREAD_MUTEX_ERRORCHECK_NP: |
90 | /* Check whether we already hold the mutex. */ |
91 | if (__glibc_unlikely (mutex->__data.__owner == id)) |
92 | return EDEADLK; |
93 | |
94 | /* Don't do lock elision on an error checking mutex. */ |
95 | goto simple; |
96 | |
97 | case PTHREAD_MUTEX_TIMED_NP: |
98 | FORCE_ELISION (mutex, goto elision); |
99 | simple: |
100 | /* Normal mutex. */ |
101 | result = lll_clocklock (mutex->__data.__lock, clockid, abstime, |
102 | PTHREAD_MUTEX_PSHARED (mutex)); |
103 | break; |
104 | |
105 | case PTHREAD_MUTEX_TIMED_ELISION_NP: |
106 | elision: __attribute__((unused)) |
107 | /* Don't record ownership */ |
108 | return lll_clocklock_elision (mutex->__data.__lock, |
109 | mutex->__data.__spins, |
110 | clockid, abstime, |
111 | PTHREAD_MUTEX_PSHARED (mutex)); |
112 | |
113 | |
114 | case PTHREAD_MUTEX_ADAPTIVE_NP: |
115 | if (! __is_smp) |
116 | goto simple; |
117 | |
118 | if (lll_trylock (mutex->__data.__lock) != 0) |
119 | { |
120 | int cnt = 0; |
121 | int max_cnt = MIN (max_adaptive_count (), |
122 | mutex->__data.__spins * 2 + 10); |
123 | do |
124 | { |
125 | if (cnt++ >= max_cnt) |
126 | { |
127 | result = lll_clocklock (mutex->__data.__lock, |
128 | clockid, abstime, |
129 | PTHREAD_MUTEX_PSHARED (mutex)); |
130 | break; |
131 | } |
132 | atomic_spin_nop (); |
133 | } |
134 | while (lll_trylock (mutex->__data.__lock) != 0); |
135 | |
136 | mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; |
137 | } |
138 | break; |
139 | |
140 | case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: |
141 | case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: |
142 | case PTHREAD_MUTEX_ROBUST_NORMAL_NP: |
143 | case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: |
144 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
145 | &mutex->__data.__list.__next); |
146 | /* We need to set op_pending before starting the operation. Also |
147 | see comments at ENQUEUE_MUTEX. */ |
148 | __asm ("" ::: "memory" ); |
149 | |
150 | oldval = mutex->__data.__lock; |
151 | /* This is set to FUTEX_WAITERS iff we might have shared the |
152 | FUTEX_WAITERS flag with other threads, and therefore need to keep it |
153 | set to avoid lost wake-ups. We have the same requirement in the |
154 | simple mutex algorithm. */ |
155 | unsigned int assume_other_futex_waiters = 0; |
156 | while (1) |
157 | { |
158 | /* Try to acquire the lock through a CAS from 0 (not acquired) to |
159 | our TID | assume_other_futex_waiters. */ |
160 | if (__glibc_likely (oldval == 0)) |
161 | { |
162 | oldval |
163 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
164 | id | assume_other_futex_waiters, 0); |
165 | if (__glibc_likely (oldval == 0)) |
166 | break; |
167 | } |
168 | |
169 | if ((oldval & FUTEX_OWNER_DIED) != 0) |
170 | { |
171 | /* The previous owner died. Try locking the mutex. */ |
172 | int newval = id | (oldval & FUTEX_WAITERS) |
173 | | assume_other_futex_waiters; |
174 | |
175 | newval |
176 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
177 | newval, oldval); |
178 | if (newval != oldval) |
179 | { |
180 | oldval = newval; |
181 | continue; |
182 | } |
183 | |
184 | /* We got the mutex. */ |
185 | mutex->__data.__count = 1; |
186 | /* But it is inconsistent unless marked otherwise. */ |
187 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
188 | |
189 | /* We must not enqueue the mutex before we have acquired it. |
190 | Also see comments at ENQUEUE_MUTEX. */ |
191 | __asm ("" ::: "memory" ); |
192 | ENQUEUE_MUTEX (mutex); |
193 | /* We need to clear op_pending after we enqueue the mutex. */ |
194 | __asm ("" ::: "memory" ); |
195 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
196 | |
197 | /* Note that we deliberately exit here. If we fall |
198 | through to the end of the function __nusers would be |
199 | incremented which is not correct because the old |
200 | owner has to be discounted. */ |
201 | return EOWNERDEAD; |
202 | } |
203 | |
204 | /* Check whether we already hold the mutex. */ |
205 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
206 | { |
207 | int kind = PTHREAD_MUTEX_TYPE (mutex); |
208 | if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) |
209 | { |
210 | /* We do not need to ensure ordering wrt another memory |
211 | access. Also see comments at ENQUEUE_MUTEX. */ |
212 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
213 | NULL); |
214 | return EDEADLK; |
215 | } |
216 | |
217 | if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) |
218 | { |
219 | /* We do not need to ensure ordering wrt another memory |
220 | access. */ |
221 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
222 | NULL); |
223 | |
224 | /* Just bump the counter. */ |
225 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
226 | /* Overflow of the counter. */ |
227 | return EAGAIN; |
228 | |
229 | ++mutex->__data.__count; |
230 | |
231 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
232 | |
233 | return 0; |
234 | } |
235 | } |
236 | |
237 | /* We are about to block; check whether the timeout is invalid. */ |
238 | if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) |
239 | return EINVAL; |
240 | /* Work around the fact that the kernel rejects negative timeout |
241 | values despite them being valid. */ |
242 | if (__glibc_unlikely (abstime->tv_sec < 0)) |
243 | return ETIMEDOUT; |
244 | |
245 | /* We cannot acquire the mutex nor has its owner died. Thus, try |
246 | to block using futexes. Set FUTEX_WAITERS if necessary so that |
247 | other threads are aware that there are potentially threads |
248 | blocked on the futex. Restart if oldval changed in the |
249 | meantime. */ |
250 | if ((oldval & FUTEX_WAITERS) == 0) |
251 | { |
252 | if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock, |
253 | oldval | FUTEX_WAITERS, |
254 | oldval) |
255 | != 0) |
256 | { |
257 | oldval = mutex->__data.__lock; |
258 | continue; |
259 | } |
260 | oldval |= FUTEX_WAITERS; |
261 | } |
262 | |
263 | /* It is now possible that we share the FUTEX_WAITERS flag with |
264 | another thread; therefore, update assume_other_futex_waiters so |
265 | that we do not forget about this when handling other cases |
266 | above and thus do not cause lost wake-ups. */ |
267 | assume_other_futex_waiters |= FUTEX_WAITERS; |
268 | |
269 | /* Block using the futex. */ |
270 | int err = lll_futex_clock_wait_bitset (&mutex->__data.__lock, |
271 | oldval, clockid, abstime, |
272 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); |
273 | /* The futex call timed out. */ |
274 | if (err == -ETIMEDOUT) |
275 | return -err; |
276 | /* Reload current lock value. */ |
277 | oldval = mutex->__data.__lock; |
278 | } |
279 | |
280 | /* We have acquired the mutex; check if it is still consistent. */ |
281 | if (__builtin_expect (mutex->__data.__owner |
282 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
283 | { |
284 | /* This mutex is now not recoverable. */ |
285 | mutex->__data.__count = 0; |
286 | int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex); |
287 | lll_unlock (mutex->__data.__lock, private); |
288 | /* FIXME This violates the mutex destruction requirements. See |
289 | __pthread_mutex_unlock_full. */ |
290 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
291 | return ENOTRECOVERABLE; |
292 | } |
293 | |
294 | mutex->__data.__count = 1; |
295 | /* We must not enqueue the mutex before we have acquired it. |
296 | Also see comments at ENQUEUE_MUTEX. */ |
297 | __asm ("" ::: "memory" ); |
298 | ENQUEUE_MUTEX (mutex); |
299 | /* We need to clear op_pending after we enqueue the mutex. */ |
300 | __asm ("" ::: "memory" ); |
301 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
302 | break; |
303 | |
304 | /* The PI support requires the Linux futex system call. If that's not |
305 | available, pthread_mutex_init should never have allowed the type to |
306 | be set. So it will get the default case for an invalid type. */ |
307 | #ifdef __NR_futex |
308 | case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
309 | case PTHREAD_MUTEX_PI_ERRORCHECK_NP: |
310 | case PTHREAD_MUTEX_PI_NORMAL_NP: |
311 | case PTHREAD_MUTEX_PI_ADAPTIVE_NP: |
312 | case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: |
313 | case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: |
314 | case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: |
315 | case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: |
316 | { |
317 | int kind, robust; |
318 | { |
319 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
320 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
321 | int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); |
322 | kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; |
323 | robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
324 | } |
325 | |
326 | if (robust) |
327 | { |
328 | /* Note: robust PI futexes are signaled by setting bit 0. */ |
329 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
330 | (void *) (((uintptr_t) &mutex->__data.__list.__next) |
331 | | 1)); |
332 | /* We need to set op_pending before starting the operation. Also |
333 | see comments at ENQUEUE_MUTEX. */ |
334 | __asm ("" ::: "memory" ); |
335 | } |
336 | |
337 | oldval = mutex->__data.__lock; |
338 | |
339 | /* Check whether we already hold the mutex. */ |
340 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
341 | { |
342 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
343 | { |
344 | /* We do not need to ensure ordering wrt another memory |
345 | access. */ |
346 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
347 | return EDEADLK; |
348 | } |
349 | |
350 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
351 | { |
352 | /* We do not need to ensure ordering wrt another memory |
353 | access. */ |
354 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
355 | |
356 | /* Just bump the counter. */ |
357 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
358 | /* Overflow of the counter. */ |
359 | return EAGAIN; |
360 | |
361 | ++mutex->__data.__count; |
362 | |
363 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
364 | |
365 | return 0; |
366 | } |
367 | } |
368 | |
369 | oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
370 | id, 0); |
371 | |
372 | if (oldval != 0) |
373 | { |
374 | /* The mutex is locked. The kernel will now take care of |
375 | everything. The timeout value must be a relative value. |
376 | Convert it. */ |
377 | int private = (robust |
378 | ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) |
379 | : PTHREAD_MUTEX_PSHARED (mutex)); |
380 | INTERNAL_SYSCALL_DECL (__err); |
381 | |
382 | int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, |
383 | __lll_private_flag (FUTEX_LOCK_PI, |
384 | private), 1, |
385 | abstime); |
386 | if (INTERNAL_SYSCALL_ERROR_P (e, __err)) |
387 | { |
388 | if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT) |
389 | return ETIMEDOUT; |
390 | |
391 | if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH |
392 | || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK) |
393 | { |
394 | assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK |
395 | || (kind != PTHREAD_MUTEX_ERRORCHECK_NP |
396 | && kind != PTHREAD_MUTEX_RECURSIVE_NP)); |
397 | /* ESRCH can happen only for non-robust PI mutexes where |
398 | the owner of the lock died. */ |
399 | assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH |
400 | || !robust); |
401 | |
402 | /* Delay the thread until the timeout is reached. |
403 | Then return ETIMEDOUT. */ |
404 | struct timespec reltime; |
405 | struct timespec now; |
406 | |
407 | INTERNAL_SYSCALL (clock_gettime, __err, 2, clockid, |
408 | &now); |
409 | reltime.tv_sec = abstime->tv_sec - now.tv_sec; |
410 | reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec; |
411 | if (reltime.tv_nsec < 0) |
412 | { |
413 | reltime.tv_nsec += 1000000000; |
414 | --reltime.tv_sec; |
415 | } |
416 | if (reltime.tv_sec >= 0) |
417 | while (__nanosleep_nocancel (&reltime, &reltime) != 0) |
418 | continue; |
419 | |
420 | return ETIMEDOUT; |
421 | } |
422 | |
423 | return INTERNAL_SYSCALL_ERRNO (e, __err); |
424 | } |
425 | |
426 | oldval = mutex->__data.__lock; |
427 | |
428 | assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); |
429 | } |
430 | |
431 | if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) |
432 | { |
433 | atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); |
434 | |
435 | /* We got the mutex. */ |
436 | mutex->__data.__count = 1; |
437 | /* But it is inconsistent unless marked otherwise. */ |
438 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
439 | |
440 | /* We must not enqueue the mutex before we have acquired it. |
441 | Also see comments at ENQUEUE_MUTEX. */ |
442 | __asm ("" ::: "memory" ); |
443 | ENQUEUE_MUTEX_PI (mutex); |
444 | /* We need to clear op_pending after we enqueue the mutex. */ |
445 | __asm ("" ::: "memory" ); |
446 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
447 | |
448 | /* Note that we deliberately exit here. If we fall |
449 | through to the end of the function __nusers would be |
450 | incremented which is not correct because the old owner |
451 | has to be discounted. */ |
452 | return EOWNERDEAD; |
453 | } |
454 | |
455 | if (robust |
456 | && __builtin_expect (mutex->__data.__owner |
457 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
458 | { |
459 | /* This mutex is now not recoverable. */ |
460 | mutex->__data.__count = 0; |
461 | |
462 | INTERNAL_SYSCALL_DECL (__err); |
463 | INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, |
464 | __lll_private_flag (FUTEX_UNLOCK_PI, |
465 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)), |
466 | 0, 0); |
467 | |
468 | /* To the kernel, this will be visible after the kernel has |
469 | acquired the mutex in the syscall. */ |
470 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
471 | return ENOTRECOVERABLE; |
472 | } |
473 | |
474 | mutex->__data.__count = 1; |
475 | if (robust) |
476 | { |
477 | /* We must not enqueue the mutex before we have acquired it. |
478 | Also see comments at ENQUEUE_MUTEX. */ |
479 | __asm ("" ::: "memory" ); |
480 | ENQUEUE_MUTEX_PI (mutex); |
481 | /* We need to clear op_pending after we enqueue the mutex. */ |
482 | __asm ("" ::: "memory" ); |
483 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
484 | } |
485 | } |
486 | break; |
487 | #endif /* __NR_futex. */ |
488 | |
489 | case PTHREAD_MUTEX_PP_RECURSIVE_NP: |
490 | case PTHREAD_MUTEX_PP_ERRORCHECK_NP: |
491 | case PTHREAD_MUTEX_PP_NORMAL_NP: |
492 | case PTHREAD_MUTEX_PP_ADAPTIVE_NP: |
493 | { |
494 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
495 | in sysdeps/nptl/bits/thread-shared-types.h. */ |
496 | int kind = atomic_load_relaxed (&(mutex->__data.__kind)) |
497 | & PTHREAD_MUTEX_KIND_MASK_NP; |
498 | |
499 | oldval = mutex->__data.__lock; |
500 | |
501 | /* Check whether we already hold the mutex. */ |
502 | if (mutex->__data.__owner == id) |
503 | { |
504 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
505 | return EDEADLK; |
506 | |
507 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
508 | { |
509 | /* Just bump the counter. */ |
510 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
511 | /* Overflow of the counter. */ |
512 | return EAGAIN; |
513 | |
514 | ++mutex->__data.__count; |
515 | |
516 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
517 | |
518 | return 0; |
519 | } |
520 | } |
521 | |
522 | int oldprio = -1, ceilval; |
523 | do |
524 | { |
525 | int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) |
526 | >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
527 | |
528 | if (__pthread_current_priority () > ceiling) |
529 | { |
530 | result = EINVAL; |
531 | failpp: |
532 | if (oldprio != -1) |
533 | __pthread_tpp_change_priority (oldprio, -1); |
534 | return result; |
535 | } |
536 | |
537 | result = __pthread_tpp_change_priority (oldprio, ceiling); |
538 | if (result) |
539 | return result; |
540 | |
541 | ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; |
542 | oldprio = ceiling; |
543 | |
544 | oldval |
545 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
546 | ceilval | 1, ceilval); |
547 | |
548 | if (oldval == ceilval) |
549 | break; |
550 | |
551 | do |
552 | { |
553 | oldval |
554 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
555 | ceilval | 2, |
556 | ceilval | 1); |
557 | |
558 | if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval) |
559 | break; |
560 | |
561 | if (oldval != ceilval) |
562 | { |
563 | /* Reject invalid timeouts. */ |
564 | if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) |
565 | { |
566 | result = EINVAL; |
567 | goto failpp; |
568 | } |
569 | |
570 | struct timeval tv; |
571 | struct timespec rt; |
572 | |
573 | /* Get the current time. */ |
574 | (void) __gettimeofday (&tv, NULL); |
575 | |
576 | /* Compute relative timeout. */ |
577 | rt.tv_sec = abstime->tv_sec - tv.tv_sec; |
578 | rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000; |
579 | if (rt.tv_nsec < 0) |
580 | { |
581 | rt.tv_nsec += 1000000000; |
582 | --rt.tv_sec; |
583 | } |
584 | |
585 | /* Already timed out? */ |
586 | if (rt.tv_sec < 0) |
587 | { |
588 | result = ETIMEDOUT; |
589 | goto failpp; |
590 | } |
591 | |
592 | lll_futex_timed_wait (&mutex->__data.__lock, |
593 | ceilval | 2, &rt, |
594 | PTHREAD_MUTEX_PSHARED (mutex)); |
595 | } |
596 | } |
597 | while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
598 | ceilval | 2, ceilval) |
599 | != ceilval); |
600 | } |
601 | while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); |
602 | |
603 | assert (mutex->__data.__owner == 0); |
604 | mutex->__data.__count = 1; |
605 | } |
606 | break; |
607 | |
608 | default: |
609 | /* Correct code cannot set any other type. */ |
610 | return EINVAL; |
611 | } |
612 | |
613 | if (result == 0) |
614 | { |
615 | /* Record the ownership. */ |
616 | mutex->__data.__owner = id; |
617 | ++mutex->__data.__nusers; |
618 | |
619 | LIBC_PROBE (mutex_timedlock_acquired, 1, mutex); |
620 | } |
621 | |
622 | out: |
623 | return result; |
624 | } |
625 | |
626 | int |
627 | __pthread_mutex_clocklock (pthread_mutex_t *mutex, |
628 | clockid_t clockid, |
629 | const struct timespec *abstime) |
630 | { |
631 | if (__glibc_unlikely (!lll_futex_supported_clockid (clockid))) |
632 | return EINVAL; |
633 | |
634 | LIBC_PROBE (mutex_clocklock_entry, 3, mutex, clockid, abstime); |
635 | return __pthread_mutex_clocklock_common (mutex, clockid, abstime); |
636 | } |
637 | weak_alias (__pthread_mutex_clocklock, pthread_mutex_clocklock) |
638 | |
639 | int |
640 | __pthread_mutex_timedlock (pthread_mutex_t *mutex, |
641 | const struct timespec *abstime) |
642 | { |
643 | LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime); |
644 | return __pthread_mutex_clocklock_common (mutex, CLOCK_REALTIME, abstime); |
645 | } |
646 | weak_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock) |
647 | |