1/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <errno.h>
21#include <limits.h>
22#include <signal.h>
23#include <stdlib.h>
24#include <unistd.h>
25#include <sys/param.h>
26#include <sys/resource.h>
27#include <pthreadP.h>
28#include <atomic.h>
29#include <ldsodefs.h>
30#include <tls.h>
31#include <list.h>
32#include <fork.h>
33#include <version.h>
34#include <shlib-compat.h>
35#include <smp.h>
36#include <lowlevellock.h>
37#include <futex-internal.h>
38#include <kernel-features.h>
39#include <libc-pointer-arith.h>
40#include <pthread-pids.h>
41#include <pthread_mutex_conf.h>
42
43#ifndef TLS_MULTIPLE_THREADS_IN_TCB
44/* Pointer to the corresponding variable in libc. */
45int *__libc_multiple_threads_ptr attribute_hidden;
46#endif
47
48/* Size and alignment of static TLS block. */
49size_t __static_tls_size;
50size_t __static_tls_align_m1;
51
52#ifndef __ASSUME_SET_ROBUST_LIST
53/* Negative if we do not have the system call and we can use it. */
54int __set_robust_list_avail;
55# define set_robust_list_not_avail() \
56 __set_robust_list_avail = -1
57#else
58# define set_robust_list_not_avail() do { } while (0)
59#endif
60
61/* Version of the library, used in libthread_db to detect mismatches. */
62static const char nptl_version[] __attribute_used__ = VERSION;
63
64
65#ifdef SHARED
66static
67#else
68extern
69#endif
70void __nptl_set_robust (struct pthread *);
71
72#ifdef SHARED
73static const struct pthread_functions pthread_functions =
74 {
75 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
76# if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
77 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
78# endif
79 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
80 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
81 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
82 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
83 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
84 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
85 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
86 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
87 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
88 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
89 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
90 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
91 .ptr_pthread_condattr_init = __pthread_condattr_init,
92 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
93 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
94 .ptr___pthread_cond_init = __pthread_cond_init,
95 .ptr___pthread_cond_signal = __pthread_cond_signal,
96 .ptr___pthread_cond_wait = __pthread_cond_wait,
97 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
98# if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
99 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
100 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
101 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
102 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
103 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
104 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
105# endif
106 .ptr_pthread_equal = __pthread_equal,
107 .ptr___pthread_exit = __pthread_exit,
108 .ptr_pthread_getschedparam = __pthread_getschedparam,
109 .ptr_pthread_setschedparam = __pthread_setschedparam,
110 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
111 .ptr_pthread_mutex_init = __pthread_mutex_init,
112 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
113 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
114 .ptr___pthread_setcancelstate = __pthread_setcancelstate,
115 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
116 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
117 .ptr___pthread_once = __pthread_once,
118 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock,
119 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock,
120 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock,
121 .ptr___pthread_key_create = __pthread_key_create,
122 .ptr___pthread_getspecific = __pthread_getspecific,
123 .ptr___pthread_setspecific = __pthread_setspecific,
124 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
125 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
126 .ptr_nthreads = &__nptl_nthreads,
127 .ptr___pthread_unwind = &__pthread_unwind,
128 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
129# ifdef SIGSETXID
130 .ptr__nptl_setxid = __nptl_setxid,
131# endif
132 .ptr_set_robust = __nptl_set_robust
133 };
134# define ptr_pthread_functions &pthread_functions
135#else
136# define ptr_pthread_functions NULL
137#endif
138
139
140#ifdef SHARED
141static
142#endif
143void
144__nptl_set_robust (struct pthread *self)
145{
146#ifdef __NR_set_robust_list
147 INTERNAL_SYSCALL_DECL (err);
148 INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
149 sizeof (struct robust_list_head));
150#endif
151}
152
153
154#ifdef SIGCANCEL
155/* For asynchronous cancellation we use a signal. This is the handler. */
156static void
157sigcancel_handler (int sig, siginfo_t *si, void *ctx)
158{
159 /* Safety check. It would be possible to call this function for
160 other signals and send a signal from another process. This is not
161 correct and might even be a security problem. Try to catch as
162 many incorrect invocations as possible. */
163 if (sig != SIGCANCEL
164 || si->si_pid != __getpid()
165 || si->si_code != SI_TKILL)
166 return;
167
168 struct pthread *self = THREAD_SELF;
169
170 int oldval = THREAD_GETMEM (self, cancelhandling);
171 while (1)
172 {
173 /* We are canceled now. When canceled by another thread this flag
174 is already set but if the signal is directly send (internally or
175 from another process) is has to be done here. */
176 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
177
178 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
179 /* Already canceled or exiting. */
180 break;
181
182 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
183 oldval);
184 if (curval == oldval)
185 {
186 /* Set the return value. */
187 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
188
189 /* Make sure asynchronous cancellation is still enabled. */
190 if ((newval & CANCELTYPE_BITMASK) != 0)
191 /* Run the registered destructors and terminate the thread. */
192 __do_cancel ();
193
194 break;
195 }
196
197 oldval = curval;
198 }
199}
200#endif
201
202
203#ifdef SIGSETXID
204struct xid_command *__xidcmd attribute_hidden;
205
206/* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to
207 tell each thread to call the respective setxid syscall on itself. This is
208 the handler. */
209static void
210sighandler_setxid (int sig, siginfo_t *si, void *ctx)
211{
212 int result;
213
214 /* Safety check. It would be possible to call this function for
215 other signals and send a signal from another process. This is not
216 correct and might even be a security problem. Try to catch as
217 many incorrect invocations as possible. */
218 if (sig != SIGSETXID
219 || si->si_pid != __getpid ()
220 || si->si_code != SI_TKILL)
221 return;
222
223 INTERNAL_SYSCALL_DECL (err);
224 result = INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
225 __xidcmd->id[1], __xidcmd->id[2]);
226 int error = 0;
227 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
228 error = INTERNAL_SYSCALL_ERRNO (result, err);
229 __nptl_setxid_error (__xidcmd, error);
230
231 /* Reset the SETXID flag. */
232 struct pthread *self = THREAD_SELF;
233 int flags, newval;
234 do
235 {
236 flags = THREAD_GETMEM (self, cancelhandling);
237 newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
238 flags & ~SETXID_BITMASK, flags);
239 }
240 while (flags != newval);
241
242 /* And release the futex. */
243 self->setxid_futex = 1;
244 futex_wake (&self->setxid_futex, 1, FUTEX_PRIVATE);
245
246 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
247 futex_wake ((unsigned int *) &__xidcmd->cntr, 1, FUTEX_PRIVATE);
248}
249#endif
250
251
252/* When using __thread for this, we do it in libc so as not
253 to give libpthread its own TLS segment just for this. */
254extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
255
256
257/* This can be set by the debugger before initialization is complete. */
258static bool __nptl_initial_report_events __attribute_used__;
259
260void
261__pthread_initialize_minimal_internal (void)
262{
263 /* Minimal initialization of the thread descriptor. */
264 struct pthread *pd = THREAD_SELF;
265 __pthread_initialize_pids (pd);
266 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
267 THREAD_SETMEM (pd, user_stack, true);
268
269 /* Initialize the robust mutex data. */
270 {
271#if __PTHREAD_MUTEX_HAVE_PREV
272 pd->robust_prev = &pd->robust_head;
273#endif
274 pd->robust_head.list = &pd->robust_head;
275#ifdef __NR_set_robust_list
276 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
277 - offsetof (pthread_mutex_t,
278 __data.__list.__next));
279 INTERNAL_SYSCALL_DECL (err);
280 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
281 sizeof (struct robust_list_head));
282 if (INTERNAL_SYSCALL_ERROR_P (res, err))
283#endif
284 set_robust_list_not_avail ();
285 }
286
287 /* Set initial thread's stack block from 0 up to __libc_stack_end.
288 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
289 purposes this is good enough. */
290 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
291
292 /* Initialize the list of all running threads with the main thread. */
293 INIT_LIST_HEAD (&__stack_user);
294 list_add (&pd->list, &__stack_user);
295
296 /* Before initializing __stack_user, the debugger could not find us and
297 had to set __nptl_initial_report_events. Propagate its setting. */
298 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
299
300#if defined SIGCANCEL || defined SIGSETXID
301 struct sigaction sa;
302 __sigemptyset (&sa.sa_mask);
303
304# ifdef SIGCANCEL
305 /* Install the cancellation signal handler. If for some reason we
306 cannot install the handler we do not abort. Maybe we should, but
307 it is only asynchronous cancellation which is affected. */
308 sa.sa_sigaction = sigcancel_handler;
309 sa.sa_flags = SA_SIGINFO;
310 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
311# endif
312
313# ifdef SIGSETXID
314 /* Install the handle to change the threads' uid/gid. */
315 sa.sa_sigaction = sighandler_setxid;
316 sa.sa_flags = SA_SIGINFO | SA_RESTART;
317 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
318# endif
319
320 /* The parent process might have left the signals blocked. Just in
321 case, unblock it. We reuse the signal mask in the sigaction
322 structure. It is already cleared. */
323# ifdef SIGCANCEL
324 __sigaddset (&sa.sa_mask, SIGCANCEL);
325# endif
326# ifdef SIGSETXID
327 __sigaddset (&sa.sa_mask, SIGSETXID);
328# endif
329 {
330 INTERNAL_SYSCALL_DECL (err);
331 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
332 NULL, _NSIG / 8);
333 }
334#endif
335
336 /* Get the size of the static and alignment requirements for the TLS
337 block. */
338 size_t static_tls_align;
339 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
340
341 /* Make sure the size takes all the alignments into account. */
342 if (STACK_ALIGN > static_tls_align)
343 static_tls_align = STACK_ALIGN;
344 __static_tls_align_m1 = static_tls_align - 1;
345
346 __static_tls_size = roundup (__static_tls_size, static_tls_align);
347
348 /* Determine the default allowed stack size. This is the size used
349 in case the user does not specify one. */
350 struct rlimit limit;
351 if (__getrlimit (RLIMIT_STACK, &limit) != 0
352 || limit.rlim_cur == RLIM_INFINITY)
353 /* The system limit is not usable. Use an architecture-specific
354 default. */
355 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
356 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
357 /* The system limit is unusably small.
358 Use the minimal size acceptable. */
359 limit.rlim_cur = PTHREAD_STACK_MIN;
360
361 /* Make sure it meets the minimum size that allocate_stack
362 (allocatestack.c) will demand, which depends on the page size. */
363 const uintptr_t pagesz = GLRO(dl_pagesize);
364 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
365 if (limit.rlim_cur < minstack)
366 limit.rlim_cur = minstack;
367
368 /* Round the resource limit up to page size. */
369 limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz);
370 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
371 __default_pthread_attr.stacksize = limit.rlim_cur;
372 __default_pthread_attr.guardsize = GLRO (dl_pagesize);
373 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
374
375#ifdef SHARED
376 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
377 keep the lock count from the ld.so implementation. */
378 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
379 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
380 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
381 GL(dl_load_lock).mutex.__data.__count = 0;
382 while (rtld_lock_count-- > 0)
383 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
384
385 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
386#endif
387
388 GL(dl_init_static_tls) = &__pthread_init_static_tls;
389
390 GL(dl_wait_lookup_done) = &__wait_lookup_done;
391
392 /* Register the fork generation counter with the libc. */
393#ifndef TLS_MULTIPLE_THREADS_IN_TCB
394 __libc_multiple_threads_ptr =
395#endif
396 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
397 ptr_pthread_functions);
398
399 /* Determine whether the machine is SMP or not. */
400 __is_smp = is_smp_system ();
401
402#if HAVE_TUNABLES
403 __pthread_tunables_init ();
404#endif
405}
406strong_alias (__pthread_initialize_minimal_internal,
407 __pthread_initialize_minimal)
408
409
410/* This function is internal (it has a GLIBC_PRIVATE) version, but it
411 is widely used (either via weak symbol, or dlsym) to obtain the
412 __static_tls_size value. This value is then used to adjust the
413 value of the stack size attribute, so that applications receive the
414 full requested stack size, not diminished by the TCB and static TLS
415 allocation on the stack. Once the TCB is separately allocated,
416 this function should be removed or renamed (if it is still
417 necessary at that point). */
418size_t
419__pthread_get_minstack (const pthread_attr_t *attr)
420{
421 return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN;
422}
423