1/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <errno.h>
21#include <limits.h>
22#include <signal.h>
23#include <stdlib.h>
24#include <unistd.h>
25#include <sys/param.h>
26#include <sys/resource.h>
27#include <pthreadP.h>
28#include <atomic.h>
29#include <ldsodefs.h>
30#include <tls.h>
31#include <list.h>
32#include <fork.h>
33#include <version.h>
34#include <shlib-compat.h>
35#include <smp.h>
36#include <lowlevellock.h>
37#include <futex-internal.h>
38#include <kernel-features.h>
39#include <libc-pointer-arith.h>
40#include <pthread-pids.h>
41
42#ifndef TLS_MULTIPLE_THREADS_IN_TCB
43/* Pointer to the corresponding variable in libc. */
44int *__libc_multiple_threads_ptr attribute_hidden;
45#endif
46
47/* Size and alignment of static TLS block. */
48size_t __static_tls_size;
49size_t __static_tls_align_m1;
50
51#ifndef __ASSUME_SET_ROBUST_LIST
52/* Negative if we do not have the system call and we can use it. */
53int __set_robust_list_avail;
54# define set_robust_list_not_avail() \
55 __set_robust_list_avail = -1
56#else
57# define set_robust_list_not_avail() do { } while (0)
58#endif
59
60#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
61/* Nonzero if we do not have FUTEX_CLOCK_REALTIME. */
62int __have_futex_clock_realtime;
63# define __set_futex_clock_realtime() \
64 __have_futex_clock_realtime = 1
65#else
66#define __set_futex_clock_realtime() do { } while (0)
67#endif
68
69/* Version of the library, used in libthread_db to detect mismatches. */
70static const char nptl_version[] __attribute_used__ = VERSION;
71
72
73#ifdef SHARED
74static
75#else
76extern
77#endif
78void __nptl_set_robust (struct pthread *);
79
80#ifdef SHARED
81static const struct pthread_functions pthread_functions =
82 {
83 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
84# if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
85 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
86# endif
87 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
88 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
89 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
90 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
91 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
92 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
93 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
94 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
95 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
96 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
97 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
98 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
99 .ptr_pthread_condattr_init = __pthread_condattr_init,
100 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
101 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
102 .ptr___pthread_cond_init = __pthread_cond_init,
103 .ptr___pthread_cond_signal = __pthread_cond_signal,
104 .ptr___pthread_cond_wait = __pthread_cond_wait,
105 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
106# if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
107 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
108 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
109 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
110 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
111 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
112 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
113# endif
114 .ptr_pthread_equal = __pthread_equal,
115 .ptr___pthread_exit = __pthread_exit,
116 .ptr_pthread_getschedparam = __pthread_getschedparam,
117 .ptr_pthread_setschedparam = __pthread_setschedparam,
118 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
119 .ptr_pthread_mutex_init = __pthread_mutex_init,
120 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
121 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
122 .ptr___pthread_setcancelstate = __pthread_setcancelstate,
123 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
124 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
125 .ptr___pthread_once = __pthread_once,
126 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock,
127 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock,
128 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock,
129 .ptr___pthread_key_create = __pthread_key_create,
130 .ptr___pthread_getspecific = __pthread_getspecific,
131 .ptr___pthread_setspecific = __pthread_setspecific,
132 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
133 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
134 .ptr_nthreads = &__nptl_nthreads,
135 .ptr___pthread_unwind = &__pthread_unwind,
136 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
137# ifdef SIGSETXID
138 .ptr__nptl_setxid = __nptl_setxid,
139# endif
140 .ptr_set_robust = __nptl_set_robust
141 };
142# define ptr_pthread_functions &pthread_functions
143#else
144# define ptr_pthread_functions NULL
145#endif
146
147
148#ifdef SHARED
149static
150#endif
151void
152__nptl_set_robust (struct pthread *self)
153{
154#ifdef __NR_set_robust_list
155 INTERNAL_SYSCALL_DECL (err);
156 INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
157 sizeof (struct robust_list_head));
158#endif
159}
160
161
162#ifdef SIGCANCEL
163/* For asynchronous cancellation we use a signal. This is the handler. */
164static void
165sigcancel_handler (int sig, siginfo_t *si, void *ctx)
166{
167 /* Safety check. It would be possible to call this function for
168 other signals and send a signal from another process. This is not
169 correct and might even be a security problem. Try to catch as
170 many incorrect invocations as possible. */
171 if (sig != SIGCANCEL
172 || si->si_pid != __getpid()
173 || si->si_code != SI_TKILL)
174 return;
175
176 struct pthread *self = THREAD_SELF;
177
178 int oldval = THREAD_GETMEM (self, cancelhandling);
179 while (1)
180 {
181 /* We are canceled now. When canceled by another thread this flag
182 is already set but if the signal is directly send (internally or
183 from another process) is has to be done here. */
184 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
185
186 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
187 /* Already canceled or exiting. */
188 break;
189
190 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
191 oldval);
192 if (curval == oldval)
193 {
194 /* Set the return value. */
195 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
196
197 /* Make sure asynchronous cancellation is still enabled. */
198 if ((newval & CANCELTYPE_BITMASK) != 0)
199 /* Run the registered destructors and terminate the thread. */
200 __do_cancel ();
201
202 break;
203 }
204
205 oldval = curval;
206 }
207}
208#endif
209
210
211#ifdef SIGSETXID
212struct xid_command *__xidcmd attribute_hidden;
213
214/* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to
215 tell each thread to call the respective setxid syscall on itself. This is
216 the handler. */
217static void
218sighandler_setxid (int sig, siginfo_t *si, void *ctx)
219{
220 int result;
221
222 /* Safety check. It would be possible to call this function for
223 other signals and send a signal from another process. This is not
224 correct and might even be a security problem. Try to catch as
225 many incorrect invocations as possible. */
226 if (sig != SIGSETXID
227 || si->si_pid != __getpid ()
228 || si->si_code != SI_TKILL)
229 return;
230
231 INTERNAL_SYSCALL_DECL (err);
232 result = INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
233 __xidcmd->id[1], __xidcmd->id[2]);
234 int error = 0;
235 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
236 error = INTERNAL_SYSCALL_ERRNO (result, err);
237 __nptl_setxid_error (__xidcmd, error);
238
239 /* Reset the SETXID flag. */
240 struct pthread *self = THREAD_SELF;
241 int flags, newval;
242 do
243 {
244 flags = THREAD_GETMEM (self, cancelhandling);
245 newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
246 flags & ~SETXID_BITMASK, flags);
247 }
248 while (flags != newval);
249
250 /* And release the futex. */
251 self->setxid_futex = 1;
252 futex_wake (&self->setxid_futex, 1, FUTEX_PRIVATE);
253
254 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
255 futex_wake ((unsigned int *) &__xidcmd->cntr, 1, FUTEX_PRIVATE);
256}
257#endif
258
259
260/* When using __thread for this, we do it in libc so as not
261 to give libpthread its own TLS segment just for this. */
262extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
263
264
265/* This can be set by the debugger before initialization is complete. */
266static bool __nptl_initial_report_events __attribute_used__;
267
268void
269__pthread_initialize_minimal_internal (void)
270{
271 /* Minimal initialization of the thread descriptor. */
272 struct pthread *pd = THREAD_SELF;
273 __pthread_initialize_pids (pd);
274 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
275 THREAD_SETMEM (pd, user_stack, true);
276 if (LLL_LOCK_INITIALIZER != 0)
277 THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
278#if HP_TIMING_AVAIL
279 THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
280#endif
281
282 /* Initialize the robust mutex data. */
283 {
284#if __PTHREAD_MUTEX_HAVE_PREV
285 pd->robust_prev = &pd->robust_head;
286#endif
287 pd->robust_head.list = &pd->robust_head;
288#ifdef __NR_set_robust_list
289 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
290 - offsetof (pthread_mutex_t,
291 __data.__list.__next));
292 INTERNAL_SYSCALL_DECL (err);
293 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
294 sizeof (struct robust_list_head));
295 if (INTERNAL_SYSCALL_ERROR_P (res, err))
296#endif
297 set_robust_list_not_avail ();
298 }
299
300#ifdef __NR_futex
301# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
302 {
303 int word = 0;
304 /* NB: the syscall actually takes six parameters. The last is the
305 bit mask. But since we will not actually wait at all the value
306 is irrelevant. Given that passing six parameters is difficult
307 on some architectures we just pass whatever random value the
308 calling convention calls for to the kernel. It causes no harm. */
309 INTERNAL_SYSCALL_DECL (err);
310 word = INTERNAL_SYSCALL (futex, err, 5, &word,
311 FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
312 | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
313 assert (INTERNAL_SYSCALL_ERROR_P (word, err));
314 if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
315 __set_futex_clock_realtime ();
316 }
317# endif
318#endif
319
320 /* Set initial thread's stack block from 0 up to __libc_stack_end.
321 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
322 purposes this is good enough. */
323 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
324
325 /* Initialize the list of all running threads with the main thread. */
326 INIT_LIST_HEAD (&__stack_user);
327 list_add (&pd->list, &__stack_user);
328
329 /* Before initializing __stack_user, the debugger could not find us and
330 had to set __nptl_initial_report_events. Propagate its setting. */
331 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
332
333#if defined SIGCANCEL || defined SIGSETXID
334 struct sigaction sa;
335 __sigemptyset (&sa.sa_mask);
336
337# ifdef SIGCANCEL
338 /* Install the cancellation signal handler. If for some reason we
339 cannot install the handler we do not abort. Maybe we should, but
340 it is only asynchronous cancellation which is affected. */
341 sa.sa_sigaction = sigcancel_handler;
342 sa.sa_flags = SA_SIGINFO;
343 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
344# endif
345
346# ifdef SIGSETXID
347 /* Install the handle to change the threads' uid/gid. */
348 sa.sa_sigaction = sighandler_setxid;
349 sa.sa_flags = SA_SIGINFO | SA_RESTART;
350 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
351# endif
352
353 /* The parent process might have left the signals blocked. Just in
354 case, unblock it. We reuse the signal mask in the sigaction
355 structure. It is already cleared. */
356# ifdef SIGCANCEL
357 __sigaddset (&sa.sa_mask, SIGCANCEL);
358# endif
359# ifdef SIGSETXID
360 __sigaddset (&sa.sa_mask, SIGSETXID);
361# endif
362 {
363 INTERNAL_SYSCALL_DECL (err);
364 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
365 NULL, _NSIG / 8);
366 }
367#endif
368
369 /* Get the size of the static and alignment requirements for the TLS
370 block. */
371 size_t static_tls_align;
372 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
373
374 /* Make sure the size takes all the alignments into account. */
375 if (STACK_ALIGN > static_tls_align)
376 static_tls_align = STACK_ALIGN;
377 __static_tls_align_m1 = static_tls_align - 1;
378
379 __static_tls_size = roundup (__static_tls_size, static_tls_align);
380
381 /* Determine the default allowed stack size. This is the size used
382 in case the user does not specify one. */
383 struct rlimit limit;
384 if (__getrlimit (RLIMIT_STACK, &limit) != 0
385 || limit.rlim_cur == RLIM_INFINITY)
386 /* The system limit is not usable. Use an architecture-specific
387 default. */
388 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
389 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
390 /* The system limit is unusably small.
391 Use the minimal size acceptable. */
392 limit.rlim_cur = PTHREAD_STACK_MIN;
393
394 /* Make sure it meets the minimum size that allocate_stack
395 (allocatestack.c) will demand, which depends on the page size. */
396 const uintptr_t pagesz = GLRO(dl_pagesize);
397 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
398 if (limit.rlim_cur < minstack)
399 limit.rlim_cur = minstack;
400
401 /* Round the resource limit up to page size. */
402 limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz);
403 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
404 __default_pthread_attr.stacksize = limit.rlim_cur;
405 __default_pthread_attr.guardsize = GLRO (dl_pagesize);
406 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
407
408#ifdef SHARED
409 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
410 keep the lock count from the ld.so implementation. */
411 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
412 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
413 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
414 GL(dl_load_lock).mutex.__data.__count = 0;
415 while (rtld_lock_count-- > 0)
416 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
417
418 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
419#endif
420
421 GL(dl_init_static_tls) = &__pthread_init_static_tls;
422
423 GL(dl_wait_lookup_done) = &__wait_lookup_done;
424
425 /* Register the fork generation counter with the libc. */
426#ifndef TLS_MULTIPLE_THREADS_IN_TCB
427 __libc_multiple_threads_ptr =
428#endif
429 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
430 ptr_pthread_functions);
431
432 /* Determine whether the machine is SMP or not. */
433 __is_smp = is_smp_system ();
434}
435strong_alias (__pthread_initialize_minimal_internal,
436 __pthread_initialize_minimal)
437
438
439size_t
440__pthread_get_minstack (const pthread_attr_t *attr)
441{
442 return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN;
443}
444