1/* Copyright (C) 2002-2016 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#include <sysdep.h>
20#include <shlib-compat.h>
21#include <lowlevellock.h>
22#include <lowlevelcond.h>
23#include <tcb-offsets.h>
24#include <pthread-pi-defines.h>
25#include <pthread-errnos.h>
26#include <stap-probe.h>
27
28#include <kernel-features.h>
29
30
31 .text
32
33/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) */
34 .globl __pthread_cond_wait
35 .type __pthread_cond_wait, @function
36 .align 16
37__pthread_cond_wait:
38.LSTARTCODE:
39 cfi_startproc
40#ifdef SHARED
41 cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
42 DW.ref.__gcc_personality_v0)
43 cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
44#else
45 cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
46 cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
47#endif
48
49#define FRAME_SIZE (32+8)
50 leaq -FRAME_SIZE(%rsp), %rsp
51 cfi_adjust_cfa_offset(FRAME_SIZE)
52
53 /* Stack frame:
54
55 rsp + 32
56 +--------------------------+
57 rsp + 24 | old wake_seq value |
58 +--------------------------+
59 rsp + 16 | mutex pointer |
60 +--------------------------+
61 rsp + 8 | condvar pointer |
62 +--------------------------+
63 rsp + 4 | old broadcast_seq value |
64 +--------------------------+
65 rsp + 0 | old cancellation mode |
66 +--------------------------+
67 */
68
69 LIBC_PROBE (cond_wait, 2, %rdi, %rsi)
70
71 LP_OP(cmp) $-1, dep_mutex(%rdi)
72
73 /* Prepare structure passed to cancellation handler. */
74 movq %rdi, 8(%rsp)
75 movq %rsi, 16(%rsp)
76
77 je 15f
78 mov %RSI_LP, dep_mutex(%rdi)
79
80 /* Get internal lock. */
8115: movl $1, %esi
82 xorl %eax, %eax
83 LOCK
84#if cond_lock == 0
85 cmpxchgl %esi, (%rdi)
86#else
87 cmpxchgl %esi, cond_lock(%rdi)
88#endif
89 jne 1f
90
91 /* Unlock the mutex. */
922: movq 16(%rsp), %rdi
93 xorl %esi, %esi
94 callq __pthread_mutex_unlock_usercnt
95
96 testl %eax, %eax
97 jne 12f
98
99 movq 8(%rsp), %rdi
100 incq total_seq(%rdi)
101 incl cond_futex(%rdi)
102 addl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
103
104 /* Get and store current wakeup_seq value. */
105 movq 8(%rsp), %rdi
106 movq wakeup_seq(%rdi), %r9
107 movl broadcast_seq(%rdi), %edx
108 movq %r9, 24(%rsp)
109 movl %edx, 4(%rsp)
110
111 /* Unlock. */
1128: movl cond_futex(%rdi), %edx
113 LOCK
114#if cond_lock == 0
115 decl (%rdi)
116#else
117 decl cond_lock(%rdi)
118#endif
119 jne 3f
120
121.LcleanupSTART:
1224: callq __pthread_enable_asynccancel
123 movl %eax, (%rsp)
124
125 xorq %r10, %r10
126 LP_OP(cmp) $-1, dep_mutex(%rdi)
127 leaq cond_futex(%rdi), %rdi
128 movl $FUTEX_WAIT, %esi
129 je 60f
130
131 mov dep_mutex-cond_futex(%rdi), %R8_LP
132 /* Requeue to a non-robust PI mutex if the PI bit is set and
133 the robust bit is not set. */
134 movl MUTEX_KIND(%r8), %eax
135 andl $(ROBUST_BIT|PI_BIT), %eax
136 cmpl $PI_BIT, %eax
137 jne 61f
138
139 movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
140 movl $SYS_futex, %eax
141 syscall
142
143 cmpl $0, %eax
144 sete %r8b
145
146#ifdef __ASSUME_REQUEUE_PI
147 jmp 62f
148#else
149 je 62f
150
151 /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
152 successfully, it has already locked the mutex for us and the
153 pi_flag (%r8b) is set to denote that fact. However, if another
154 thread changed the futex value before we entered the wait, the
155 syscall may return an EAGAIN and the mutex is not locked. We go
156 ahead with a success anyway since later we look at the pi_flag to
157 decide if we got the mutex or not. The sequence numbers then make
158 sure that only one of the threads actually wake up. We retry using
159 normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
160 and PI futexes don't mix.
161
162 Note that we don't check for EAGAIN specifically; we assume that the
163 only other error the futex function could return is EAGAIN since
164 anything else would mean an error in our function. It is too
165 expensive to do that check for every call (which is quite common in
166 case of a large number of threads), so it has been skipped. */
167 cmpl $-ENOSYS, %eax
168 jne 62f
169
170# ifndef __ASSUME_PRIVATE_FUTEX
171 movl $FUTEX_WAIT, %esi
172# endif
173#endif
174
17561:
176#ifdef __ASSUME_PRIVATE_FUTEX
177 movl $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
178#else
179 orl %fs:PRIVATE_FUTEX, %esi
180#endif
18160: xorb %r8b, %r8b
182 movl $SYS_futex, %eax
183 syscall
184
18562: movl (%rsp), %edi
186 callq __pthread_disable_asynccancel
187.LcleanupEND:
188
189 /* Lock. */
190 movq 8(%rsp), %rdi
191 movl $1, %esi
192 xorl %eax, %eax
193 LOCK
194#if cond_lock == 0
195 cmpxchgl %esi, (%rdi)
196#else
197 cmpxchgl %esi, cond_lock(%rdi)
198#endif
199 jnz 5f
200
2016: movl broadcast_seq(%rdi), %edx
202
203 movq woken_seq(%rdi), %rax
204
205 movq wakeup_seq(%rdi), %r9
206
207 cmpl 4(%rsp), %edx
208 jne 16f
209
210 cmpq 24(%rsp), %r9
211 jbe 19f
212
213 cmpq %rax, %r9
214 jna 19f
215
216 incq woken_seq(%rdi)
217
218 /* Unlock */
21916: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
220
221 /* Wake up a thread which wants to destroy the condvar object. */
222 cmpq $0xffffffffffffffff, total_seq(%rdi)
223 jne 17f
224 movl cond_nwaiters(%rdi), %eax
225 andl $~((1 << nwaiters_shift) - 1), %eax
226 jne 17f
227
228 addq $cond_nwaiters, %rdi
229 LP_OP(cmp) $-1, dep_mutex-cond_nwaiters(%rdi)
230 movl $1, %edx
231#ifdef __ASSUME_PRIVATE_FUTEX
232 movl $FUTEX_WAKE, %eax
233 movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
234 cmove %eax, %esi
235#else
236 movl $0, %eax
237 movl %fs:PRIVATE_FUTEX, %esi
238 cmove %eax, %esi
239 orl $FUTEX_WAKE, %esi
240#endif
241 movl $SYS_futex, %eax
242 syscall
243 subq $cond_nwaiters, %rdi
244
24517: LOCK
246#if cond_lock == 0
247 decl (%rdi)
248#else
249 decl cond_lock(%rdi)
250#endif
251 jne 10f
252
253 /* If requeue_pi is used the kernel performs the locking of the
254 mutex. */
25511: movq 16(%rsp), %rdi
256 testb %r8b, %r8b
257 jnz 18f
258
259 callq __pthread_mutex_cond_lock
260
26114: leaq FRAME_SIZE(%rsp), %rsp
262 cfi_adjust_cfa_offset(-FRAME_SIZE)
263
264 /* We return the result of the mutex_lock operation. */
265 retq
266
267 cfi_adjust_cfa_offset(FRAME_SIZE)
268
26918: callq __pthread_mutex_cond_lock_adjust
270 xorl %eax, %eax
271 jmp 14b
272
273 /* We need to go back to futex_wait. If we're using requeue_pi, then
274 release the mutex we had acquired and go back. */
27519: testb %r8b, %r8b
276 jz 8b
277
278 /* Adjust the mutex values first and then unlock it. The unlock
279 should always succeed or else the kernel did not lock the mutex
280 correctly. */
281 movq 16(%rsp), %rdi
282 callq __pthread_mutex_cond_lock_adjust
283 movq %rdi, %r8
284 xorl %esi, %esi
285 callq __pthread_mutex_unlock_usercnt
286 /* Reload cond_var. */
287 movq 8(%rsp), %rdi
288 jmp 8b
289
290 /* Initial locking failed. */
2911:
292#if cond_lock != 0
293 addq $cond_lock, %rdi
294#endif
295 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
296 movl $LLL_PRIVATE, %eax
297 movl $LLL_SHARED, %esi
298 cmovne %eax, %esi
299 callq __lll_lock_wait
300 jmp 2b
301
302 /* Unlock in loop requires wakeup. */
3033:
304#if cond_lock != 0
305 addq $cond_lock, %rdi
306#endif
307 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
308 movl $LLL_PRIVATE, %eax
309 movl $LLL_SHARED, %esi
310 cmovne %eax, %esi
311 /* The call preserves %rdx. */
312 callq __lll_unlock_wake
313#if cond_lock != 0
314 subq $cond_lock, %rdi
315#endif
316 jmp 4b
317
318 /* Locking in loop failed. */
3195:
320#if cond_lock != 0
321 addq $cond_lock, %rdi
322#endif
323 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
324 movl $LLL_PRIVATE, %eax
325 movl $LLL_SHARED, %esi
326 cmovne %eax, %esi
327 callq __lll_lock_wait
328#if cond_lock != 0
329 subq $cond_lock, %rdi
330#endif
331 jmp 6b
332
333 /* Unlock after loop requires wakeup. */
33410:
335#if cond_lock != 0
336 addq $cond_lock, %rdi
337#endif
338 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
339 movl $LLL_PRIVATE, %eax
340 movl $LLL_SHARED, %esi
341 cmovne %eax, %esi
342 callq __lll_unlock_wake
343 jmp 11b
344
345 /* The initial unlocking of the mutex failed. */
34612: movq %rax, %r10
347 movq 8(%rsp), %rdi
348 LOCK
349#if cond_lock == 0
350 decl (%rdi)
351#else
352 decl cond_lock(%rdi)
353#endif
354 je 13f
355
356#if cond_lock != 0
357 addq $cond_lock, %rdi
358#endif
359 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
360 movl $LLL_PRIVATE, %eax
361 movl $LLL_SHARED, %esi
362 cmovne %eax, %esi
363 callq __lll_unlock_wake
364
36513: movq %r10, %rax
366 jmp 14b
367
368 .size __pthread_cond_wait, .-__pthread_cond_wait
369versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
370 GLIBC_2_3_2)
371
372
373 .align 16
374 .type __condvar_cleanup1, @function
375 .globl __condvar_cleanup1
376 .hidden __condvar_cleanup1
377__condvar_cleanup1:
378 /* Stack frame:
379
380 rsp + 32
381 +--------------------------+
382 rsp + 24 | unused |
383 +--------------------------+
384 rsp + 16 | mutex pointer |
385 +--------------------------+
386 rsp + 8 | condvar pointer |
387 +--------------------------+
388 rsp + 4 | old broadcast_seq value |
389 +--------------------------+
390 rsp + 0 | old cancellation mode |
391 +--------------------------+
392 */
393
394 movq %rax, 24(%rsp)
395
396 /* Get internal lock. */
397 movq 8(%rsp), %rdi
398 movl $1, %esi
399 xorl %eax, %eax
400 LOCK
401#if cond_lock == 0
402 cmpxchgl %esi, (%rdi)
403#else
404 cmpxchgl %esi, cond_lock(%rdi)
405#endif
406 jz 1f
407
408#if cond_lock != 0
409 addq $cond_lock, %rdi
410#endif
411 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
412 movl $LLL_PRIVATE, %eax
413 movl $LLL_SHARED, %esi
414 cmovne %eax, %esi
415 callq __lll_lock_wait
416#if cond_lock != 0
417 subq $cond_lock, %rdi
418#endif
419
4201: movl broadcast_seq(%rdi), %edx
421 cmpl 4(%rsp), %edx
422 jne 3f
423
424 /* We increment the wakeup_seq counter only if it is lower than
425 total_seq. If this is not the case the thread was woken and
426 then canceled. In this case we ignore the signal. */
427 movq total_seq(%rdi), %rax
428 cmpq wakeup_seq(%rdi), %rax
429 jbe 6f
430 incq wakeup_seq(%rdi)
431 incl cond_futex(%rdi)
4326: incq woken_seq(%rdi)
433
4343: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
435
436 /* Wake up a thread which wants to destroy the condvar object. */
437 xorl %ecx, %ecx
438 cmpq $0xffffffffffffffff, total_seq(%rdi)
439 jne 4f
440 movl cond_nwaiters(%rdi), %eax
441 andl $~((1 << nwaiters_shift) - 1), %eax
442 jne 4f
443
444 LP_OP(cmp) $-1, dep_mutex(%rdi)
445 leaq cond_nwaiters(%rdi), %rdi
446 movl $1, %edx
447#ifdef __ASSUME_PRIVATE_FUTEX
448 movl $FUTEX_WAKE, %eax
449 movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
450 cmove %eax, %esi
451#else
452 movl $0, %eax
453 movl %fs:PRIVATE_FUTEX, %esi
454 cmove %eax, %esi
455 orl $FUTEX_WAKE, %esi
456#endif
457 movl $SYS_futex, %eax
458 syscall
459 subq $cond_nwaiters, %rdi
460 movl $1, %ecx
461
4624: LOCK
463#if cond_lock == 0
464 decl (%rdi)
465#else
466 decl cond_lock(%rdi)
467#endif
468 je 2f
469#if cond_lock != 0
470 addq $cond_lock, %rdi
471#endif
472 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
473 movl $LLL_PRIVATE, %eax
474 movl $LLL_SHARED, %esi
475 cmovne %eax, %esi
476 /* The call preserves %rcx. */
477 callq __lll_unlock_wake
478
479 /* Wake up all waiters to make sure no signal gets lost. */
4802: testl %ecx, %ecx
481 jnz 5f
482 addq $cond_futex, %rdi
483 LP_OP(cmp) $-1, dep_mutex-cond_futex(%rdi)
484 movl $0x7fffffff, %edx
485#ifdef __ASSUME_PRIVATE_FUTEX
486 movl $FUTEX_WAKE, %eax
487 movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
488 cmove %eax, %esi
489#else
490 movl $0, %eax
491 movl %fs:PRIVATE_FUTEX, %esi
492 cmove %eax, %esi
493 orl $FUTEX_WAKE, %esi
494#endif
495 movl $SYS_futex, %eax
496 syscall
497
498 /* Lock the mutex only if we don't own it already. This only happens
499 in case of PI mutexes, if we got cancelled after a successful
500 return of the futex syscall and before disabling async
501 cancellation. */
5025: movq 16(%rsp), %rdi
503 movl MUTEX_KIND(%rdi), %eax
504 andl $(ROBUST_BIT|PI_BIT), %eax
505 cmpl $PI_BIT, %eax
506 jne 7f
507
508 movl (%rdi), %eax
509 andl $TID_MASK, %eax
510 cmpl %eax, %fs:TID
511 jne 7f
512 /* We managed to get the lock. Fix it up before returning. */
513 callq __pthread_mutex_cond_lock_adjust
514 jmp 8f
515
516
5177: callq __pthread_mutex_cond_lock
518
5198: movq 24(%rsp), %rdi
520.LcallUR:
521 call _Unwind_Resume
522 hlt
523.LENDCODE:
524 cfi_endproc
525 .size __condvar_cleanup1, .-__condvar_cleanup1
526
527
528 .section .gcc_except_table,"a",@progbits
529.LexceptSTART:
530 .byte DW_EH_PE_omit # @LPStart format
531 .byte DW_EH_PE_omit # @TType format
532 .byte DW_EH_PE_uleb128 # call-site format
533 .uleb128 .Lcstend-.Lcstbegin
534.Lcstbegin:
535 .uleb128 .LcleanupSTART-.LSTARTCODE
536 .uleb128 .LcleanupEND-.LcleanupSTART
537 .uleb128 __condvar_cleanup1-.LSTARTCODE
538 .uleb128 0
539 .uleb128 .LcallUR-.LSTARTCODE
540 .uleb128 .LENDCODE-.LcallUR
541 .uleb128 0
542 .uleb128 0
543.Lcstend:
544
545
546#ifdef SHARED
547 .hidden DW.ref.__gcc_personality_v0
548 .weak DW.ref.__gcc_personality_v0
549 .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
550 .align LP_SIZE
551 .type DW.ref.__gcc_personality_v0, @object
552 .size DW.ref.__gcc_personality_v0, LP_SIZE
553DW.ref.__gcc_personality_v0:
554 ASM_ADDR __gcc_personality_v0
555#endif
556