1/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <errno.h>
21#include <stdlib.h>
22#include "pthreadP.h"
23#include <lowlevellock.h>
24#include <stap-probe.h>
25
26#ifndef lll_unlock_elision
27#define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; })
28#endif
29
30static int
31__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
32 __attribute_noinline__;
33
34int
35attribute_hidden
36__pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
37{
38 /* See concurrency notes regarding mutex type which is loaded from __kind
39 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
40 int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
41 if (__builtin_expect (type &
42 ~(PTHREAD_MUTEX_KIND_MASK_NP|PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
43 return __pthread_mutex_unlock_full (mutex, decr);
44
45 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
46 == PTHREAD_MUTEX_TIMED_NP)
47 {
48 /* Always reset the owner field. */
49 normal:
50 mutex->__data.__owner = 0;
51 if (decr)
52 /* One less user. */
53 --mutex->__data.__nusers;
54
55 /* Unlock. */
56 lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
57
58 LIBC_PROBE (mutex_release, 1, mutex);
59
60 return 0;
61 }
62 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
63 {
64 /* Don't reset the owner/users fields for elision. */
65 return lll_unlock_elision (mutex->__data.__lock, mutex->__data.__elision,
66 PTHREAD_MUTEX_PSHARED (mutex));
67 }
68 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
69 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
70 {
71 /* Recursive mutex. */
72 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
73 return EPERM;
74
75 if (--mutex->__data.__count != 0)
76 /* We still hold the mutex. */
77 return 0;
78 goto normal;
79 }
80 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
81 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
82 goto normal;
83 else
84 {
85 /* Error checking mutex. */
86 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
87 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
88 || ! lll_islocked (mutex->__data.__lock))
89 return EPERM;
90 goto normal;
91 }
92}
93
94
95static int
96__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
97{
98 int newowner = 0;
99 int private;
100
101 switch (PTHREAD_MUTEX_TYPE (mutex))
102 {
103 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
104 /* Recursive mutex. */
105 if ((mutex->__data.__lock & FUTEX_TID_MASK)
106 == THREAD_GETMEM (THREAD_SELF, tid)
107 && __builtin_expect (mutex->__data.__owner
108 == PTHREAD_MUTEX_INCONSISTENT, 0))
109 {
110 if (--mutex->__data.__count != 0)
111 /* We still hold the mutex. */
112 return ENOTRECOVERABLE;
113
114 goto notrecoverable;
115 }
116
117 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
118 return EPERM;
119
120 if (--mutex->__data.__count != 0)
121 /* We still hold the mutex. */
122 return 0;
123
124 goto robust;
125
126 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
127 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
128 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
129 if ((mutex->__data.__lock & FUTEX_TID_MASK)
130 != THREAD_GETMEM (THREAD_SELF, tid)
131 || ! lll_islocked (mutex->__data.__lock))
132 return EPERM;
133
134 /* If the previous owner died and the caller did not succeed in
135 making the state consistent, mark the mutex as unrecoverable
136 and make all waiters. */
137 if (__builtin_expect (mutex->__data.__owner
138 == PTHREAD_MUTEX_INCONSISTENT, 0))
139 notrecoverable:
140 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
141
142 robust:
143 /* Remove mutex from the list. */
144 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
145 &mutex->__data.__list.__next);
146 /* We must set op_pending before we dequeue the mutex. Also see
147 comments at ENQUEUE_MUTEX. */
148 __asm ("" ::: "memory");
149 DEQUEUE_MUTEX (mutex);
150
151 mutex->__data.__owner = newowner;
152 if (decr)
153 /* One less user. */
154 --mutex->__data.__nusers;
155
156 /* Unlock by setting the lock to 0 (not acquired); if the lock had
157 FUTEX_WAITERS set previously, then wake any waiters.
158 The unlock operation must be the last access to the mutex to not
159 violate the mutex destruction requirements (see __lll_unlock). */
160 private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
161 if (__glibc_unlikely ((atomic_exchange_rel (&mutex->__data.__lock, 0)
162 & FUTEX_WAITERS) != 0))
163 lll_futex_wake (&mutex->__data.__lock, 1, private);
164
165 /* We must clear op_pending after we release the mutex.
166 FIXME However, this violates the mutex destruction requirements
167 because another thread could acquire the mutex, destroy it, and
168 reuse the memory for something else; then, if this thread crashes,
169 and the memory happens to have a value equal to the TID, the kernel
170 will believe it is still related to the mutex (which has been
171 destroyed already) and will modify some other random object. */
172 __asm ("" ::: "memory");
173 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
174 break;
175
176 /* The PI support requires the Linux futex system call. If that's not
177 available, pthread_mutex_init should never have allowed the type to
178 be set. So it will get the default case for an invalid type. */
179#ifdef __NR_futex
180 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
181 /* Recursive mutex. */
182 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
183 return EPERM;
184
185 if (--mutex->__data.__count != 0)
186 /* We still hold the mutex. */
187 return 0;
188 goto continue_pi_non_robust;
189
190 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
191 /* Recursive mutex. */
192 if ((mutex->__data.__lock & FUTEX_TID_MASK)
193 == THREAD_GETMEM (THREAD_SELF, tid)
194 && __builtin_expect (mutex->__data.__owner
195 == PTHREAD_MUTEX_INCONSISTENT, 0))
196 {
197 if (--mutex->__data.__count != 0)
198 /* We still hold the mutex. */
199 return ENOTRECOVERABLE;
200
201 goto pi_notrecoverable;
202 }
203
204 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
205 return EPERM;
206
207 if (--mutex->__data.__count != 0)
208 /* We still hold the mutex. */
209 return 0;
210
211 goto continue_pi_robust;
212
213 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
214 case PTHREAD_MUTEX_PI_NORMAL_NP:
215 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
216 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
217 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
218 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
219 if ((mutex->__data.__lock & FUTEX_TID_MASK)
220 != THREAD_GETMEM (THREAD_SELF, tid)
221 || ! lll_islocked (mutex->__data.__lock))
222 return EPERM;
223
224 /* If the previous owner died and the caller did not succeed in
225 making the state consistent, mark the mutex as unrecoverable
226 and make all waiters. */
227 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
228 in sysdeps/nptl/bits/thread-shared-types.h. */
229 if ((atomic_load_relaxed (&(mutex->__data.__kind))
230 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
231 && __builtin_expect (mutex->__data.__owner
232 == PTHREAD_MUTEX_INCONSISTENT, 0))
233 pi_notrecoverable:
234 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
235
236 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
237 in sysdeps/nptl/bits/thread-shared-types.h. */
238 if ((atomic_load_relaxed (&(mutex->__data.__kind))
239 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
240 {
241 continue_pi_robust:
242 /* Remove mutex from the list.
243 Note: robust PI futexes are signaled by setting bit 0. */
244 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
245 (void *) (((uintptr_t) &mutex->__data.__list.__next)
246 | 1));
247 /* We must set op_pending before we dequeue the mutex. Also see
248 comments at ENQUEUE_MUTEX. */
249 __asm ("" ::: "memory");
250 DEQUEUE_MUTEX (mutex);
251 }
252
253 continue_pi_non_robust:
254 mutex->__data.__owner = newowner;
255 if (decr)
256 /* One less user. */
257 --mutex->__data.__nusers;
258
259 /* Unlock. Load all necessary mutex data before releasing the mutex
260 to not violate the mutex destruction requirements (see
261 lll_unlock). */
262 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
263 in sysdeps/nptl/bits/thread-shared-types.h. */
264 int robust = atomic_load_relaxed (&(mutex->__data.__kind))
265 & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
266 private = (robust
267 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
268 : PTHREAD_MUTEX_PSHARED (mutex));
269 /* Unlock the mutex using a CAS unless there are futex waiters or our
270 TID is not the value of __lock anymore, in which case we let the
271 kernel take care of the situation. Use release MO in the CAS to
272 synchronize with acquire MO in lock acquisitions. */
273 int l = atomic_load_relaxed (&mutex->__data.__lock);
274 do
275 {
276 if (((l & FUTEX_WAITERS) != 0)
277 || (l != THREAD_GETMEM (THREAD_SELF, tid)))
278 {
279 INTERNAL_SYSCALL_DECL (__err);
280 INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
281 __lll_private_flag (FUTEX_UNLOCK_PI, private));
282 break;
283 }
284 }
285 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
286 &l, 0));
287
288 /* This happens after the kernel releases the mutex but violates the
289 mutex destruction requirements; see comments in the code handling
290 PTHREAD_MUTEX_ROBUST_NORMAL_NP. */
291 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
292 break;
293#endif /* __NR_futex. */
294
295 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
296 /* Recursive mutex. */
297 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
298 return EPERM;
299
300 if (--mutex->__data.__count != 0)
301 /* We still hold the mutex. */
302 return 0;
303 goto pp;
304
305 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
306 /* Error checking mutex. */
307 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
308 || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
309 return EPERM;
310 /* FALLTHROUGH */
311
312 case PTHREAD_MUTEX_PP_NORMAL_NP:
313 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
314 /* Always reset the owner field. */
315 pp:
316 mutex->__data.__owner = 0;
317
318 if (decr)
319 /* One less user. */
320 --mutex->__data.__nusers;
321
322 /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
323 lock acquisitions. */
324 int newval;
325 int oldval = atomic_load_relaxed (&mutex->__data.__lock);
326 do
327 {
328 newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
329 }
330 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
331 &oldval, newval));
332
333 if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
334 lll_futex_wake (&mutex->__data.__lock, 1,
335 PTHREAD_MUTEX_PSHARED (mutex));
336
337 int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
338
339 LIBC_PROBE (mutex_release, 1, mutex);
340
341 return __pthread_tpp_change_priority (oldprio, -1);
342
343 default:
344 /* Correct code cannot set any other type. */
345 return EINVAL;
346 }
347
348 LIBC_PROBE (mutex_release, 1, mutex);
349 return 0;
350}
351
352
353int
354__pthread_mutex_unlock (pthread_mutex_t *mutex)
355{
356 return __pthread_mutex_unlock_usercnt (mutex, 1);
357}
358weak_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
359hidden_def (__pthread_mutex_unlock)
360