1/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <errno.h>
21#include <stdlib.h>
22#include "pthreadP.h"
23#include <lowlevellock.h>
24#include <stap-probe.h>
25
26#ifndef lll_unlock_elision
27#define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; })
28#endif
29
30static int
31__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
32 __attribute_noinline__;
33
34int
35attribute_hidden
36__pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
37{
38 /* See concurrency notes regarding mutex type which is loaded from __kind
39 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
40 int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
41 if (__builtin_expect (type
42 & ~(PTHREAD_MUTEX_KIND_MASK_NP
43 |PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
44 return __pthread_mutex_unlock_full (mutex, decr);
45
46 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
47 == PTHREAD_MUTEX_TIMED_NP)
48 {
49 /* Always reset the owner field. */
50 normal:
51 mutex->__data.__owner = 0;
52 if (decr)
53 /* One less user. */
54 --mutex->__data.__nusers;
55
56 /* Unlock. */
57 lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
58
59 LIBC_PROBE (mutex_release, 1, mutex);
60
61 return 0;
62 }
63 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
64 {
65 /* Don't reset the owner/users fields for elision. */
66 return lll_unlock_elision (mutex->__data.__lock, mutex->__data.__elision,
67 PTHREAD_MUTEX_PSHARED (mutex));
68 }
69 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
70 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
71 {
72 /* Recursive mutex. */
73 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
74 return EPERM;
75
76 if (--mutex->__data.__count != 0)
77 /* We still hold the mutex. */
78 return 0;
79 goto normal;
80 }
81 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
82 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
83 goto normal;
84 else
85 {
86 /* Error checking mutex. */
87 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
88 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
89 || ! lll_islocked (mutex->__data.__lock))
90 return EPERM;
91 goto normal;
92 }
93}
94
95
96static int
97__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
98{
99 int newowner = 0;
100 int private;
101
102 switch (PTHREAD_MUTEX_TYPE (mutex))
103 {
104 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
105 /* Recursive mutex. */
106 if ((mutex->__data.__lock & FUTEX_TID_MASK)
107 == THREAD_GETMEM (THREAD_SELF, tid)
108 && __builtin_expect (mutex->__data.__owner
109 == PTHREAD_MUTEX_INCONSISTENT, 0))
110 {
111 if (--mutex->__data.__count != 0)
112 /* We still hold the mutex. */
113 return ENOTRECOVERABLE;
114
115 goto notrecoverable;
116 }
117
118 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
119 return EPERM;
120
121 if (--mutex->__data.__count != 0)
122 /* We still hold the mutex. */
123 return 0;
124
125 goto robust;
126
127 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
128 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
129 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
130 if ((mutex->__data.__lock & FUTEX_TID_MASK)
131 != THREAD_GETMEM (THREAD_SELF, tid)
132 || ! lll_islocked (mutex->__data.__lock))
133 return EPERM;
134
135 /* If the previous owner died and the caller did not succeed in
136 making the state consistent, mark the mutex as unrecoverable
137 and make all waiters. */
138 if (__builtin_expect (mutex->__data.__owner
139 == PTHREAD_MUTEX_INCONSISTENT, 0))
140 notrecoverable:
141 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
142
143 robust:
144 /* Remove mutex from the list. */
145 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
146 &mutex->__data.__list.__next);
147 /* We must set op_pending before we dequeue the mutex. Also see
148 comments at ENQUEUE_MUTEX. */
149 __asm ("" ::: "memory");
150 DEQUEUE_MUTEX (mutex);
151
152 mutex->__data.__owner = newowner;
153 if (decr)
154 /* One less user. */
155 --mutex->__data.__nusers;
156
157 /* Unlock by setting the lock to 0 (not acquired); if the lock had
158 FUTEX_WAITERS set previously, then wake any waiters.
159 The unlock operation must be the last access to the mutex to not
160 violate the mutex destruction requirements (see __lll_unlock). */
161 private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
162 if (__glibc_unlikely ((atomic_exchange_rel (&mutex->__data.__lock, 0)
163 & FUTEX_WAITERS) != 0))
164 lll_futex_wake (&mutex->__data.__lock, 1, private);
165
166 /* We must clear op_pending after we release the mutex.
167 FIXME However, this violates the mutex destruction requirements
168 because another thread could acquire the mutex, destroy it, and
169 reuse the memory for something else; then, if this thread crashes,
170 and the memory happens to have a value equal to the TID, the kernel
171 will believe it is still related to the mutex (which has been
172 destroyed already) and will modify some other random object. */
173 __asm ("" ::: "memory");
174 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
175 break;
176
177 /* The PI support requires the Linux futex system call. If that's not
178 available, pthread_mutex_init should never have allowed the type to
179 be set. So it will get the default case for an invalid type. */
180#ifdef __NR_futex
181 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
182 /* Recursive mutex. */
183 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
184 return EPERM;
185
186 if (--mutex->__data.__count != 0)
187 /* We still hold the mutex. */
188 return 0;
189 goto continue_pi_non_robust;
190
191 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
192 /* Recursive mutex. */
193 if ((mutex->__data.__lock & FUTEX_TID_MASK)
194 == THREAD_GETMEM (THREAD_SELF, tid)
195 && __builtin_expect (mutex->__data.__owner
196 == PTHREAD_MUTEX_INCONSISTENT, 0))
197 {
198 if (--mutex->__data.__count != 0)
199 /* We still hold the mutex. */
200 return ENOTRECOVERABLE;
201
202 goto pi_notrecoverable;
203 }
204
205 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
206 return EPERM;
207
208 if (--mutex->__data.__count != 0)
209 /* We still hold the mutex. */
210 return 0;
211
212 goto continue_pi_robust;
213
214 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
215 case PTHREAD_MUTEX_PI_NORMAL_NP:
216 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
217 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
218 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
219 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
220 if ((mutex->__data.__lock & FUTEX_TID_MASK)
221 != THREAD_GETMEM (THREAD_SELF, tid)
222 || ! lll_islocked (mutex->__data.__lock))
223 return EPERM;
224
225 /* If the previous owner died and the caller did not succeed in
226 making the state consistent, mark the mutex as unrecoverable
227 and make all waiters. */
228 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
229 in sysdeps/nptl/bits/thread-shared-types.h. */
230 if ((atomic_load_relaxed (&(mutex->__data.__kind))
231 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
232 && __builtin_expect (mutex->__data.__owner
233 == PTHREAD_MUTEX_INCONSISTENT, 0))
234 pi_notrecoverable:
235 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
236
237 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
238 in sysdeps/nptl/bits/thread-shared-types.h. */
239 if ((atomic_load_relaxed (&(mutex->__data.__kind))
240 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
241 {
242 continue_pi_robust:
243 /* Remove mutex from the list.
244 Note: robust PI futexes are signaled by setting bit 0. */
245 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
246 (void *) (((uintptr_t) &mutex->__data.__list.__next)
247 | 1));
248 /* We must set op_pending before we dequeue the mutex. Also see
249 comments at ENQUEUE_MUTEX. */
250 __asm ("" ::: "memory");
251 DEQUEUE_MUTEX (mutex);
252 }
253
254 continue_pi_non_robust:
255 mutex->__data.__owner = newowner;
256 if (decr)
257 /* One less user. */
258 --mutex->__data.__nusers;
259
260 /* Unlock. Load all necessary mutex data before releasing the mutex
261 to not violate the mutex destruction requirements (see
262 lll_unlock). */
263 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
264 in sysdeps/nptl/bits/thread-shared-types.h. */
265 int robust = atomic_load_relaxed (&(mutex->__data.__kind))
266 & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
267 private = (robust
268 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
269 : PTHREAD_MUTEX_PSHARED (mutex));
270 /* Unlock the mutex using a CAS unless there are futex waiters or our
271 TID is not the value of __lock anymore, in which case we let the
272 kernel take care of the situation. Use release MO in the CAS to
273 synchronize with acquire MO in lock acquisitions. */
274 int l = atomic_load_relaxed (&mutex->__data.__lock);
275 do
276 {
277 if (((l & FUTEX_WAITERS) != 0)
278 || (l != THREAD_GETMEM (THREAD_SELF, tid)))
279 {
280 INTERNAL_SYSCALL_DECL (__err);
281 INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
282 __lll_private_flag (FUTEX_UNLOCK_PI, private));
283 break;
284 }
285 }
286 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
287 &l, 0));
288
289 /* This happens after the kernel releases the mutex but violates the
290 mutex destruction requirements; see comments in the code handling
291 PTHREAD_MUTEX_ROBUST_NORMAL_NP. */
292 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
293 break;
294#endif /* __NR_futex. */
295
296 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
297 /* Recursive mutex. */
298 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
299 return EPERM;
300
301 if (--mutex->__data.__count != 0)
302 /* We still hold the mutex. */
303 return 0;
304 goto pp;
305
306 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
307 /* Error checking mutex. */
308 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
309 || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
310 return EPERM;
311 /* FALLTHROUGH */
312
313 case PTHREAD_MUTEX_PP_NORMAL_NP:
314 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
315 /* Always reset the owner field. */
316 pp:
317 mutex->__data.__owner = 0;
318
319 if (decr)
320 /* One less user. */
321 --mutex->__data.__nusers;
322
323 /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
324 lock acquisitions. */
325 int newval;
326 int oldval = atomic_load_relaxed (&mutex->__data.__lock);
327 do
328 {
329 newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
330 }
331 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
332 &oldval, newval));
333
334 if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
335 lll_futex_wake (&mutex->__data.__lock, 1,
336 PTHREAD_MUTEX_PSHARED (mutex));
337
338 int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
339
340 LIBC_PROBE (mutex_release, 1, mutex);
341
342 return __pthread_tpp_change_priority (oldprio, -1);
343
344 default:
345 /* Correct code cannot set any other type. */
346 return EINVAL;
347 }
348
349 LIBC_PROBE (mutex_release, 1, mutex);
350 return 0;
351}
352
353
354int
355__pthread_mutex_unlock (pthread_mutex_t *mutex)
356{
357 return __pthread_mutex_unlock_usercnt (mutex, 1);
358}
359weak_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
360hidden_def (__pthread_mutex_unlock)
361