1/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <errno.h>
21#include <time.h>
22#include <sys/param.h>
23#include <sys/time.h>
24#include "pthreadP.h"
25#include <atomic.h>
26#include <lowlevellock.h>
27#include <not-cancel.h>
28
29#include <stap-probe.h>
30
31#ifndef lll_timedlock_elision
32#define lll_timedlock_elision(a,dummy,b,c) lll_timedlock(a, b, c)
33#endif
34
35#ifndef lll_trylock_elision
36#define lll_trylock_elision(a,t) lll_trylock(a)
37#endif
38
39#ifndef FORCE_ELISION
40#define FORCE_ELISION(m, s)
41#endif
42
43int
44__pthread_mutex_timedlock (pthread_mutex_t *mutex,
45 const struct timespec *abstime)
46{
47 int oldval;
48 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
49 int result = 0;
50
51 LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime);
52
53 /* We must not check ABSTIME here. If the thread does not block
54 abstime must not be checked for a valid value. */
55
56 /* See concurrency notes regarding mutex type which is loaded from __kind
57 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
58 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
59 PTHREAD_MUTEX_TIMED_NP))
60 {
61 /* Recursive mutex. */
62 case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
63 case PTHREAD_MUTEX_RECURSIVE_NP:
64 /* Check whether we already hold the mutex. */
65 if (mutex->__data.__owner == id)
66 {
67 /* Just bump the counter. */
68 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
69 /* Overflow of the counter. */
70 return EAGAIN;
71
72 ++mutex->__data.__count;
73
74 goto out;
75 }
76
77 /* We have to get the mutex. */
78 result = lll_timedlock (mutex->__data.__lock, abstime,
79 PTHREAD_MUTEX_PSHARED (mutex));
80
81 if (result != 0)
82 goto out;
83
84 /* Only locked once so far. */
85 mutex->__data.__count = 1;
86 break;
87
88 /* Error checking mutex. */
89 case PTHREAD_MUTEX_ERRORCHECK_NP:
90 /* Check whether we already hold the mutex. */
91 if (__glibc_unlikely (mutex->__data.__owner == id))
92 return EDEADLK;
93
94 /* Don't do lock elision on an error checking mutex. */
95 goto simple;
96
97 case PTHREAD_MUTEX_TIMED_NP:
98 FORCE_ELISION (mutex, goto elision);
99 simple:
100 /* Normal mutex. */
101 result = lll_timedlock (mutex->__data.__lock, abstime,
102 PTHREAD_MUTEX_PSHARED (mutex));
103 break;
104
105 case PTHREAD_MUTEX_TIMED_ELISION_NP:
106 elision: __attribute__((unused))
107 /* Don't record ownership */
108 return lll_timedlock_elision (mutex->__data.__lock,
109 mutex->__data.__spins,
110 abstime,
111 PTHREAD_MUTEX_PSHARED (mutex));
112
113
114 case PTHREAD_MUTEX_ADAPTIVE_NP:
115 if (! __is_smp)
116 goto simple;
117
118 if (lll_trylock (mutex->__data.__lock) != 0)
119 {
120 int cnt = 0;
121 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
122 mutex->__data.__spins * 2 + 10);
123 do
124 {
125 if (cnt++ >= max_cnt)
126 {
127 result = lll_timedlock (mutex->__data.__lock, abstime,
128 PTHREAD_MUTEX_PSHARED (mutex));
129 break;
130 }
131 atomic_spin_nop ();
132 }
133 while (lll_trylock (mutex->__data.__lock) != 0);
134
135 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
136 }
137 break;
138
139 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
140 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
141 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
142 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
143 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
144 &mutex->__data.__list.__next);
145 /* We need to set op_pending before starting the operation. Also
146 see comments at ENQUEUE_MUTEX. */
147 __asm ("" ::: "memory");
148
149 oldval = mutex->__data.__lock;
150 /* This is set to FUTEX_WAITERS iff we might have shared the
151 FUTEX_WAITERS flag with other threads, and therefore need to keep it
152 set to avoid lost wake-ups. We have the same requirement in the
153 simple mutex algorithm. */
154 unsigned int assume_other_futex_waiters = 0;
155 while (1)
156 {
157 /* Try to acquire the lock through a CAS from 0 (not acquired) to
158 our TID | assume_other_futex_waiters. */
159 if (__glibc_likely (oldval == 0))
160 {
161 oldval
162 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
163 id | assume_other_futex_waiters, 0);
164 if (__glibc_likely (oldval == 0))
165 break;
166 }
167
168 if ((oldval & FUTEX_OWNER_DIED) != 0)
169 {
170 /* The previous owner died. Try locking the mutex. */
171 int newval = id | (oldval & FUTEX_WAITERS)
172 | assume_other_futex_waiters;
173
174 newval
175 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
176 newval, oldval);
177 if (newval != oldval)
178 {
179 oldval = newval;
180 continue;
181 }
182
183 /* We got the mutex. */
184 mutex->__data.__count = 1;
185 /* But it is inconsistent unless marked otherwise. */
186 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
187
188 /* We must not enqueue the mutex before we have acquired it.
189 Also see comments at ENQUEUE_MUTEX. */
190 __asm ("" ::: "memory");
191 ENQUEUE_MUTEX (mutex);
192 /* We need to clear op_pending after we enqueue the mutex. */
193 __asm ("" ::: "memory");
194 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
195
196 /* Note that we deliberately exit here. If we fall
197 through to the end of the function __nusers would be
198 incremented which is not correct because the old
199 owner has to be discounted. */
200 return EOWNERDEAD;
201 }
202
203 /* Check whether we already hold the mutex. */
204 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
205 {
206 int kind = PTHREAD_MUTEX_TYPE (mutex);
207 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
208 {
209 /* We do not need to ensure ordering wrt another memory
210 access. Also see comments at ENQUEUE_MUTEX. */
211 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
212 NULL);
213 return EDEADLK;
214 }
215
216 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
217 {
218 /* We do not need to ensure ordering wrt another memory
219 access. */
220 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
221 NULL);
222
223 /* Just bump the counter. */
224 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
225 /* Overflow of the counter. */
226 return EAGAIN;
227
228 ++mutex->__data.__count;
229
230 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
231
232 return 0;
233 }
234 }
235
236 /* We are about to block; check whether the timeout is invalid. */
237 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
238 return EINVAL;
239 /* Work around the fact that the kernel rejects negative timeout
240 values despite them being valid. */
241 if (__glibc_unlikely (abstime->tv_sec < 0))
242 return ETIMEDOUT;
243#if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \
244 || !defined lll_futex_timed_wait_bitset)
245 struct timeval tv;
246 struct timespec rt;
247
248 /* Get the current time. */
249 (void) __gettimeofday (&tv, NULL);
250
251 /* Compute relative timeout. */
252 rt.tv_sec = abstime->tv_sec - tv.tv_sec;
253 rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
254 if (rt.tv_nsec < 0)
255 {
256 rt.tv_nsec += 1000000000;
257 --rt.tv_sec;
258 }
259
260 /* Already timed out? */
261 if (rt.tv_sec < 0)
262 return ETIMEDOUT;
263#endif
264
265 /* We cannot acquire the mutex nor has its owner died. Thus, try
266 to block using futexes. Set FUTEX_WAITERS if necessary so that
267 other threads are aware that there are potentially threads
268 blocked on the futex. Restart if oldval changed in the
269 meantime. */
270 if ((oldval & FUTEX_WAITERS) == 0)
271 {
272 if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
273 oldval | FUTEX_WAITERS,
274 oldval)
275 != 0)
276 {
277 oldval = mutex->__data.__lock;
278 continue;
279 }
280 oldval |= FUTEX_WAITERS;
281 }
282
283 /* It is now possible that we share the FUTEX_WAITERS flag with
284 another thread; therefore, update assume_other_futex_waiters so
285 that we do not forget about this when handling other cases
286 above and thus do not cause lost wake-ups. */
287 assume_other_futex_waiters |= FUTEX_WAITERS;
288
289 /* Block using the futex. */
290#if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \
291 || !defined lll_futex_timed_wait_bitset)
292 lll_futex_timed wait (&mutex->__data.__lock, oldval,
293 &rt, PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
294#else
295 int err = lll_futex_timed_wait_bitset (&mutex->__data.__lock,
296 oldval, abstime, FUTEX_CLOCK_REALTIME,
297 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
298 /* The futex call timed out. */
299 if (err == -ETIMEDOUT)
300 return -err;
301#endif
302 /* Reload current lock value. */
303 oldval = mutex->__data.__lock;
304 }
305
306 /* We have acquired the mutex; check if it is still consistent. */
307 if (__builtin_expect (mutex->__data.__owner
308 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
309 {
310 /* This mutex is now not recoverable. */
311 mutex->__data.__count = 0;
312 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
313 lll_unlock (mutex->__data.__lock, private);
314 /* FIXME This violates the mutex destruction requirements. See
315 __pthread_mutex_unlock_full. */
316 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
317 return ENOTRECOVERABLE;
318 }
319
320 mutex->__data.__count = 1;
321 /* We must not enqueue the mutex before we have acquired it.
322 Also see comments at ENQUEUE_MUTEX. */
323 __asm ("" ::: "memory");
324 ENQUEUE_MUTEX (mutex);
325 /* We need to clear op_pending after we enqueue the mutex. */
326 __asm ("" ::: "memory");
327 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
328 break;
329
330 /* The PI support requires the Linux futex system call. If that's not
331 available, pthread_mutex_init should never have allowed the type to
332 be set. So it will get the default case for an invalid type. */
333#ifdef __NR_futex
334 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
335 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
336 case PTHREAD_MUTEX_PI_NORMAL_NP:
337 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
338 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
339 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
340 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
341 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
342 {
343 int kind, robust;
344 {
345 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
346 in sysdeps/nptl/bits/thread-shared-types.h. */
347 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
348 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
349 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
350 }
351
352 if (robust)
353 {
354 /* Note: robust PI futexes are signaled by setting bit 0. */
355 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
356 (void *) (((uintptr_t) &mutex->__data.__list.__next)
357 | 1));
358 /* We need to set op_pending before starting the operation. Also
359 see comments at ENQUEUE_MUTEX. */
360 __asm ("" ::: "memory");
361 }
362
363 oldval = mutex->__data.__lock;
364
365 /* Check whether we already hold the mutex. */
366 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
367 {
368 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
369 {
370 /* We do not need to ensure ordering wrt another memory
371 access. */
372 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
373 return EDEADLK;
374 }
375
376 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
377 {
378 /* We do not need to ensure ordering wrt another memory
379 access. */
380 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
381
382 /* Just bump the counter. */
383 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
384 /* Overflow of the counter. */
385 return EAGAIN;
386
387 ++mutex->__data.__count;
388
389 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
390
391 return 0;
392 }
393 }
394
395 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
396 id, 0);
397
398 if (oldval != 0)
399 {
400 /* The mutex is locked. The kernel will now take care of
401 everything. The timeout value must be a relative value.
402 Convert it. */
403 int private = (robust
404 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
405 : PTHREAD_MUTEX_PSHARED (mutex));
406 INTERNAL_SYSCALL_DECL (__err);
407
408 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
409 __lll_private_flag (FUTEX_LOCK_PI,
410 private), 1,
411 abstime);
412 if (INTERNAL_SYSCALL_ERROR_P (e, __err))
413 {
414 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
415 return ETIMEDOUT;
416
417 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
418 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
419 {
420 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
421 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
422 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
423 /* ESRCH can happen only for non-robust PI mutexes where
424 the owner of the lock died. */
425 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
426 || !robust);
427
428 /* Delay the thread until the timeout is reached.
429 Then return ETIMEDOUT. */
430 struct timespec reltime;
431 struct timespec now;
432
433 INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
434 &now);
435 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
436 reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
437 if (reltime.tv_nsec < 0)
438 {
439 reltime.tv_nsec += 1000000000;
440 --reltime.tv_sec;
441 }
442 if (reltime.tv_sec >= 0)
443 while (__nanosleep_nocancel (&reltime, &reltime) != 0)
444 continue;
445
446 return ETIMEDOUT;
447 }
448
449 return INTERNAL_SYSCALL_ERRNO (e, __err);
450 }
451
452 oldval = mutex->__data.__lock;
453
454 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
455 }
456
457 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
458 {
459 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
460
461 /* We got the mutex. */
462 mutex->__data.__count = 1;
463 /* But it is inconsistent unless marked otherwise. */
464 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
465
466 /* We must not enqueue the mutex before we have acquired it.
467 Also see comments at ENQUEUE_MUTEX. */
468 __asm ("" ::: "memory");
469 ENQUEUE_MUTEX_PI (mutex);
470 /* We need to clear op_pending after we enqueue the mutex. */
471 __asm ("" ::: "memory");
472 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
473
474 /* Note that we deliberately exit here. If we fall
475 through to the end of the function __nusers would be
476 incremented which is not correct because the old owner
477 has to be discounted. */
478 return EOWNERDEAD;
479 }
480
481 if (robust
482 && __builtin_expect (mutex->__data.__owner
483 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
484 {
485 /* This mutex is now not recoverable. */
486 mutex->__data.__count = 0;
487
488 INTERNAL_SYSCALL_DECL (__err);
489 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
490 __lll_private_flag (FUTEX_UNLOCK_PI,
491 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
492 0, 0);
493
494 /* To the kernel, this will be visible after the kernel has
495 acquired the mutex in the syscall. */
496 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
497 return ENOTRECOVERABLE;
498 }
499
500 mutex->__data.__count = 1;
501 if (robust)
502 {
503 /* We must not enqueue the mutex before we have acquired it.
504 Also see comments at ENQUEUE_MUTEX. */
505 __asm ("" ::: "memory");
506 ENQUEUE_MUTEX_PI (mutex);
507 /* We need to clear op_pending after we enqueue the mutex. */
508 __asm ("" ::: "memory");
509 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
510 }
511 }
512 break;
513#endif /* __NR_futex. */
514
515 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
516 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
517 case PTHREAD_MUTEX_PP_NORMAL_NP:
518 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
519 {
520 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
521 in sysdeps/nptl/bits/thread-shared-types.h. */
522 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
523 & PTHREAD_MUTEX_KIND_MASK_NP;
524
525 oldval = mutex->__data.__lock;
526
527 /* Check whether we already hold the mutex. */
528 if (mutex->__data.__owner == id)
529 {
530 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
531 return EDEADLK;
532
533 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
534 {
535 /* Just bump the counter. */
536 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
537 /* Overflow of the counter. */
538 return EAGAIN;
539
540 ++mutex->__data.__count;
541
542 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
543
544 return 0;
545 }
546 }
547
548 int oldprio = -1, ceilval;
549 do
550 {
551 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
552 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
553
554 if (__pthread_current_priority () > ceiling)
555 {
556 result = EINVAL;
557 failpp:
558 if (oldprio != -1)
559 __pthread_tpp_change_priority (oldprio, -1);
560 return result;
561 }
562
563 result = __pthread_tpp_change_priority (oldprio, ceiling);
564 if (result)
565 return result;
566
567 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
568 oldprio = ceiling;
569
570 oldval
571 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
572 ceilval | 1, ceilval);
573
574 if (oldval == ceilval)
575 break;
576
577 do
578 {
579 oldval
580 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
581 ceilval | 2,
582 ceilval | 1);
583
584 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
585 break;
586
587 if (oldval != ceilval)
588 {
589 /* Reject invalid timeouts. */
590 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
591 {
592 result = EINVAL;
593 goto failpp;
594 }
595
596 struct timeval tv;
597 struct timespec rt;
598
599 /* Get the current time. */
600 (void) __gettimeofday (&tv, NULL);
601
602 /* Compute relative timeout. */
603 rt.tv_sec = abstime->tv_sec - tv.tv_sec;
604 rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
605 if (rt.tv_nsec < 0)
606 {
607 rt.tv_nsec += 1000000000;
608 --rt.tv_sec;
609 }
610
611 /* Already timed out? */
612 if (rt.tv_sec < 0)
613 {
614 result = ETIMEDOUT;
615 goto failpp;
616 }
617
618 lll_futex_timed_wait (&mutex->__data.__lock,
619 ceilval | 2, &rt,
620 PTHREAD_MUTEX_PSHARED (mutex));
621 }
622 }
623 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
624 ceilval | 2, ceilval)
625 != ceilval);
626 }
627 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
628
629 assert (mutex->__data.__owner == 0);
630 mutex->__data.__count = 1;
631 }
632 break;
633
634 default:
635 /* Correct code cannot set any other type. */
636 return EINVAL;
637 }
638
639 if (result == 0)
640 {
641 /* Record the ownership. */
642 mutex->__data.__owner = id;
643 ++mutex->__data.__nusers;
644
645 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
646 }
647
648 out:
649 return result;
650}
651weak_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock)
652