1/* Copyright (C) 2002-2020 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <ctype.h>
20#include <errno.h>
21#include <stdbool.h>
22#include <stdlib.h>
23#include <string.h>
24#include <stdint.h>
25#include "pthreadP.h"
26#include <hp-timing.h>
27#include <ldsodefs.h>
28#include <atomic.h>
29#include <libc-internal.h>
30#include <resolv.h>
31#include <kernel-features.h>
32#include <exit-thread.h>
33#include <default-sched.h>
34#include <futex-internal.h>
35#include <tls-setup.h>
36#include "libioP.h"
37
38#include <shlib-compat.h>
39
40#include <stap-probe.h>
41
42
43/* Nozero if debugging mode is enabled. */
44int __pthread_debug;
45
46/* Globally enabled events. */
47static td_thr_events_t __nptl_threads_events __attribute_used__;
48
49/* Pointer to descriptor with the last event. */
50static struct pthread *__nptl_last_event __attribute_used__;
51
52/* Number of threads running. */
53unsigned int __nptl_nthreads = 1;
54
55
56/* Code to allocate and deallocate a stack. */
57#include "allocatestack.c"
58
59/* CONCURRENCY NOTES:
60
61 Understanding who is the owner of the 'struct pthread' or 'PD'
62 (refers to the value of the 'struct pthread *pd' function argument)
63 is critically important in determining exactly which operations are
64 allowed and which are not and when, particularly when it comes to the
65 implementation of pthread_create, pthread_join, pthread_detach, and
66 other functions which all operate on PD.
67
68 The owner of PD is responsible for freeing the final resources
69 associated with PD, and may examine the memory underlying PD at any
70 point in time until it frees it back to the OS or to reuse by the
71 runtime.
72
73 The thread which calls pthread_create is called the creating thread.
74 The creating thread begins as the owner of PD.
75
76 During startup the new thread may examine PD in coordination with the
77 owner thread (which may be itself).
78
79 The four cases of ownership transfer are:
80
81 (1) Ownership of PD is released to the process (all threads may use it)
82 after the new thread starts in a joinable state
83 i.e. pthread_create returns a usable pthread_t.
84
85 (2) Ownership of PD is released to the new thread starting in a detached
86 state.
87
88 (3) Ownership of PD is dynamically released to a running thread via
89 pthread_detach.
90
91 (4) Ownership of PD is acquired by the thread which calls pthread_join.
92
93 Implementation notes:
94
95 The PD->stopped_start and thread_ran variables are used to determine
96 exactly which of the four ownership states we are in and therefore
97 what actions can be taken. For example after (2) we cannot read or
98 write from PD anymore since the thread may no longer exist and the
99 memory may be unmapped.
100
101 It is important to point out that PD->lock is being used both
102 similar to a one-shot semaphore and subsequently as a mutex. The
103 lock is taken in the parent to force the child to wait, and then the
104 child releases the lock. However, this semaphore-like effect is used
105 only for synchronizing the parent and child. After startup the lock
106 is used like a mutex to create a critical section during which a
107 single owner modifies the thread parameters.
108
109 The most complicated cases happen during thread startup:
110
111 (a) If the created thread is in a detached (PTHREAD_CREATE_DETACHED),
112 or joinable (default PTHREAD_CREATE_JOINABLE) state and
113 STOPPED_START is true, then the creating thread has ownership of
114 PD until the PD->lock is released by pthread_create. If any
115 errors occur we are in states (c), (d), or (e) below.
116
117 (b) If the created thread is in a detached state
118 (PTHREAD_CREATED_DETACHED), and STOPPED_START is false, then the
119 creating thread has ownership of PD until it invokes the OS
120 kernel's thread creation routine. If this routine returns
121 without error, then the created thread owns PD; otherwise, see
122 (c) and (e) below.
123
124 (c) If the detached thread setup failed and THREAD_RAN is true, then
125 the creating thread releases ownership to the new thread by
126 sending a cancellation signal. All threads set THREAD_RAN to
127 true as quickly as possible after returning from the OS kernel's
128 thread creation routine.
129
130 (d) If the joinable thread setup failed and THREAD_RAN is true, then
131 then the creating thread retains ownership of PD and must cleanup
132 state. Ownership cannot be released to the process via the
133 return of pthread_create since a non-zero result entails PD is
134 undefined and therefore cannot be joined to free the resources.
135 We privately call pthread_join on the thread to finish handling
136 the resource shutdown (Or at least we should, see bug 19511).
137
138 (e) If the thread creation failed and THREAD_RAN is false, then the
139 creating thread retains ownership of PD and must cleanup state.
140 No waiting for the new thread is required because it never
141 started.
142
143 The nptl_db interface:
144
145 The interface with nptl_db requires that we enqueue PD into a linked
146 list and then call a function which the debugger will trap. The PD
147 will then be dequeued and control returned to the thread. The caller
148 at the time must have ownership of PD and such ownership remains
149 after control returns to thread. The enqueued PD is removed from the
150 linked list by the nptl_db callback td_thr_event_getmsg. The debugger
151 must ensure that the thread does not resume execution, otherwise
152 ownership of PD may be lost and examining PD will not be possible.
153
154 Note that the GNU Debugger as of (December 10th 2015) commit
155 c2c2a31fdb228d41ce3db62b268efea04bd39c18 no longer uses
156 td_thr_event_getmsg and several other related nptl_db interfaces. The
157 principal reason for this is that nptl_db does not support non-stop
158 mode where other threads can run concurrently and modify runtime
159 structures currently in use by the debugger and the nptl_db
160 interface.
161
162 Axioms:
163
164 * The create_thread function can never set stopped_start to false.
165 * The created thread can read stopped_start but never write to it.
166 * The variable thread_ran is set some time after the OS thread
167 creation routine returns, how much time after the thread is created
168 is unspecified, but it should be as quickly as possible.
169
170*/
171
172/* CREATE THREAD NOTES:
173
174 createthread.c defines the create_thread function, and two macros:
175 START_THREAD_DEFN and START_THREAD_SELF (see below).
176
177 create_thread must initialize PD->stopped_start. It should be true
178 if the STOPPED_START parameter is true, or if create_thread needs the
179 new thread to synchronize at startup for some other implementation
180 reason. If STOPPED_START will be true, then create_thread is obliged
181 to lock PD->lock before starting the thread. Then pthread_create
182 unlocks PD->lock which synchronizes-with START_THREAD_DEFN in the
183 child thread which does an acquire/release of PD->lock as the last
184 action before calling the user entry point. The goal of all of this
185 is to ensure that the required initial thread attributes are applied
186 (by the creating thread) before the new thread runs user code. Note
187 that the the functions pthread_getschedparam, pthread_setschedparam,
188 pthread_setschedprio, __pthread_tpp_change_priority, and
189 __pthread_current_priority reuse the same lock, PD->lock, for a
190 similar purpose e.g. synchronizing the setting of similar thread
191 attributes. These functions are never called before the thread is
192 created, so don't participate in startup syncronization, but given
193 that the lock is present already and in the unlocked state, reusing
194 it saves space.
195
196 The return value is zero for success or an errno code for failure.
197 If the return value is ENOMEM, that will be translated to EAGAIN,
198 so create_thread need not do that. On failure, *THREAD_RAN should
199 be set to true iff the thread actually started up and then got
200 canceled before calling user code (*PD->start_routine). */
201static int create_thread (struct pthread *pd, const struct pthread_attr *attr,
202 bool *stopped_start, STACK_VARIABLES_PARMS,
203 bool *thread_ran);
204
205#include <createthread.c>
206
207
208struct pthread *
209__find_in_stack_list (struct pthread *pd)
210{
211 list_t *entry;
212 struct pthread *result = NULL;
213
214 lll_lock (stack_cache_lock, LLL_PRIVATE);
215
216 list_for_each (entry, &stack_used)
217 {
218 struct pthread *curp;
219
220 curp = list_entry (entry, struct pthread, list);
221 if (curp == pd)
222 {
223 result = curp;
224 break;
225 }
226 }
227
228 if (result == NULL)
229 list_for_each (entry, &__stack_user)
230 {
231 struct pthread *curp;
232
233 curp = list_entry (entry, struct pthread, list);
234 if (curp == pd)
235 {
236 result = curp;
237 break;
238 }
239 }
240
241 lll_unlock (stack_cache_lock, LLL_PRIVATE);
242
243 return result;
244}
245
246
247/* Deallocate POSIX thread-local-storage. */
248void
249attribute_hidden
250__nptl_deallocate_tsd (void)
251{
252 struct pthread *self = THREAD_SELF;
253
254 /* Maybe no data was ever allocated. This happens often so we have
255 a flag for this. */
256 if (THREAD_GETMEM (self, specific_used))
257 {
258 size_t round;
259 size_t cnt;
260
261 round = 0;
262 do
263 {
264 size_t idx;
265
266 /* So far no new nonzero data entry. */
267 THREAD_SETMEM (self, specific_used, false);
268
269 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
270 {
271 struct pthread_key_data *level2;
272
273 level2 = THREAD_GETMEM_NC (self, specific, cnt);
274
275 if (level2 != NULL)
276 {
277 size_t inner;
278
279 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
280 ++inner, ++idx)
281 {
282 void *data = level2[inner].data;
283
284 if (data != NULL)
285 {
286 /* Always clear the data. */
287 level2[inner].data = NULL;
288
289 /* Make sure the data corresponds to a valid
290 key. This test fails if the key was
291 deallocated and also if it was
292 re-allocated. It is the user's
293 responsibility to free the memory in this
294 case. */
295 if (level2[inner].seq
296 == __pthread_keys[idx].seq
297 /* It is not necessary to register a destructor
298 function. */
299 && __pthread_keys[idx].destr != NULL)
300 /* Call the user-provided destructor. */
301 __pthread_keys[idx].destr (data);
302 }
303 }
304 }
305 else
306 idx += PTHREAD_KEY_1STLEVEL_SIZE;
307 }
308
309 if (THREAD_GETMEM (self, specific_used) == 0)
310 /* No data has been modified. */
311 goto just_free;
312 }
313 /* We only repeat the process a fixed number of times. */
314 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
315
316 /* Just clear the memory of the first block for reuse. */
317 memset (&THREAD_SELF->specific_1stblock, '\0',
318 sizeof (self->specific_1stblock));
319
320 just_free:
321 /* Free the memory for the other blocks. */
322 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
323 {
324 struct pthread_key_data *level2;
325
326 level2 = THREAD_GETMEM_NC (self, specific, cnt);
327 if (level2 != NULL)
328 {
329 /* The first block is allocated as part of the thread
330 descriptor. */
331 free (level2);
332 THREAD_SETMEM_NC (self, specific, cnt, NULL);
333 }
334 }
335
336 THREAD_SETMEM (self, specific_used, false);
337 }
338}
339
340
341/* Deallocate a thread's stack after optionally making sure the thread
342 descriptor is still valid. */
343void
344__free_tcb (struct pthread *pd)
345{
346 /* The thread is exiting now. */
347 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
348 TERMINATED_BIT) == 0, 1))
349 {
350 /* Remove the descriptor from the list. */
351 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
352 /* Something is really wrong. The descriptor for a still
353 running thread is gone. */
354 abort ();
355
356 /* Free TPP data. */
357 if (__glibc_unlikely (pd->tpp != NULL))
358 {
359 struct priority_protection_data *tpp = pd->tpp;
360
361 pd->tpp = NULL;
362 free (tpp);
363 }
364
365 /* Queue the stack memory block for reuse and exit the process. The
366 kernel will signal via writing to the address returned by
367 QUEUE-STACK when the stack is available. */
368 __deallocate_stack (pd);
369 }
370}
371
372
373/* Local function to start thread and handle cleanup.
374 createthread.c defines the macro START_THREAD_DEFN to the
375 declaration that its create_thread function will refer to, and
376 START_THREAD_SELF to the expression to optimally deliver the new
377 thread's THREAD_SELF value. */
378START_THREAD_DEFN
379{
380 struct pthread *pd = START_THREAD_SELF;
381
382 /* Initialize resolver state pointer. */
383 __resp = &pd->res;
384
385 /* Initialize pointers to locale data. */
386 __ctype_init ();
387
388 /* Allow setxid from now onwards. */
389 if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
390 futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
391
392#ifdef __NR_set_robust_list
393# ifndef __ASSUME_SET_ROBUST_LIST
394 if (__set_robust_list_avail >= 0)
395# endif
396 {
397 INTERNAL_SYSCALL_DECL (err);
398 /* This call should never fail because the initial call in init.c
399 succeeded. */
400 INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
401 sizeof (struct robust_list_head));
402 }
403#endif
404
405 /* If the parent was running cancellation handlers while creating
406 the thread the new thread inherited the signal mask. Reset the
407 cancellation signal mask. */
408 if (__glibc_unlikely (pd->parent_cancelhandling & CANCELING_BITMASK))
409 {
410 INTERNAL_SYSCALL_DECL (err);
411 sigset_t mask;
412 __sigemptyset (&mask);
413 __sigaddset (&mask, SIGCANCEL);
414 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
415 NULL, _NSIG / 8);
416 }
417
418 /* This is where the try/finally block should be created. For
419 compilers without that support we do use setjmp. */
420 struct pthread_unwind_buf unwind_buf;
421
422 int not_first_call;
423 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
424
425 /* No previous handlers. NB: This must be done after setjmp since the
426 private space in the unwind jump buffer may overlap space used by
427 setjmp to store extra architecture-specific information which is
428 never used by the cancellation-specific __libc_unwind_longjmp.
429
430 The private space is allowed to overlap because the unwinder never
431 has to return through any of the jumped-to call frames, and thus
432 only a minimum amount of saved data need be stored, and for example,
433 need not include the process signal mask information. This is all
434 an optimization to reduce stack usage when pushing cancellation
435 handlers. */
436 unwind_buf.priv.data.prev = NULL;
437 unwind_buf.priv.data.cleanup = NULL;
438
439 if (__glibc_likely (! not_first_call))
440 {
441 /* Store the new cleanup handler info. */
442 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
443
444 /* We are either in (a) or (b), and in either case we either own
445 PD already (2) or are about to own PD (1), and so our only
446 restriction would be that we can't free PD until we know we
447 have ownership (see CONCURRENCY NOTES above). */
448 if (__glibc_unlikely (pd->stopped_start))
449 {
450 int oldtype = CANCEL_ASYNC ();
451
452 /* Get the lock the parent locked to force synchronization. */
453 lll_lock (pd->lock, LLL_PRIVATE);
454
455 /* We have ownership of PD now. */
456
457 /* And give it up right away. */
458 lll_unlock (pd->lock, LLL_PRIVATE);
459
460 CANCEL_RESET (oldtype);
461 }
462
463 LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg);
464
465 /* Run the code the user provided. */
466 void *ret;
467 if (pd->c11)
468 {
469 /* The function pointer of the c11 thread start is cast to an incorrect
470 type on __pthread_create_2_1 call, however it is casted back to correct
471 one so the call behavior is well-defined (it is assumed that pointers
472 to void are able to represent all values of int. */
473 int (*start)(void*) = (int (*) (void*)) pd->start_routine;
474 ret = (void*) (uintptr_t) start (pd->arg);
475 }
476 else
477 ret = pd->start_routine (pd->arg);
478 THREAD_SETMEM (pd, result, ret);
479 }
480
481 /* Call destructors for the thread_local TLS variables. */
482#ifndef SHARED
483 if (&__call_tls_dtors != NULL)
484#endif
485 __call_tls_dtors ();
486
487 /* Run the destructor for the thread-local data. */
488 __nptl_deallocate_tsd ();
489
490 /* Clean up any state libc stored in thread-local variables. */
491 __libc_thread_freeres ();
492
493 /* If this is the last thread we terminate the process now. We
494 do not notify the debugger, it might just irritate it if there
495 is no thread left. */
496 if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads)))
497 /* This was the last thread. */
498 exit (0);
499
500 /* Report the death of the thread if this is wanted. */
501 if (__glibc_unlikely (pd->report_events))
502 {
503 /* See whether TD_DEATH is in any of the mask. */
504 const int idx = __td_eventword (TD_DEATH);
505 const uint32_t mask = __td_eventmask (TD_DEATH);
506
507 if ((mask & (__nptl_threads_events.event_bits[idx]
508 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
509 {
510 /* Yep, we have to signal the death. Add the descriptor to
511 the list but only if it is not already on it. */
512 if (pd->nextevent == NULL)
513 {
514 pd->eventbuf.eventnum = TD_DEATH;
515 pd->eventbuf.eventdata = pd;
516
517 do
518 pd->nextevent = __nptl_last_event;
519 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
520 pd, pd->nextevent));
521 }
522
523 /* Now call the function which signals the event. See
524 CONCURRENCY NOTES for the nptl_db interface comments. */
525 __nptl_death_event ();
526 }
527 }
528
529 /* The thread is exiting now. Don't set this bit until after we've hit
530 the event-reporting breakpoint, so that td_thr_get_info on us while at
531 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
532 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
533
534#ifndef __ASSUME_SET_ROBUST_LIST
535 /* If this thread has any robust mutexes locked, handle them now. */
536# if __PTHREAD_MUTEX_HAVE_PREV
537 void *robust = pd->robust_head.list;
538# else
539 __pthread_slist_t *robust = pd->robust_list.__next;
540# endif
541 /* We let the kernel do the notification if it is able to do so.
542 If we have to do it here there for sure are no PI mutexes involved
543 since the kernel support for them is even more recent. */
544 if (__set_robust_list_avail < 0
545 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
546 {
547 do
548 {
549 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
550 ((char *) robust - offsetof (struct __pthread_mutex_s,
551 __list.__next));
552 robust = *((void **) robust);
553
554# if __PTHREAD_MUTEX_HAVE_PREV
555 this->__list.__prev = NULL;
556# endif
557 this->__list.__next = NULL;
558
559 atomic_or (&this->__lock, FUTEX_OWNER_DIED);
560 futex_wake ((unsigned int *) &this->__lock, 1,
561 /* XYZ */ FUTEX_SHARED);
562 }
563 while (robust != (void *) &pd->robust_head);
564 }
565#endif
566
567 advise_stack_range (pd->stackblock, pd->stackblock_size, (uintptr_t) pd,
568 pd->guardsize);
569
570 /* If the thread is detached free the TCB. */
571 if (IS_DETACHED (pd))
572 /* Free the TCB. */
573 __free_tcb (pd);
574 else if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK))
575 {
576 /* Some other thread might call any of the setXid functions and expect
577 us to reply. In this case wait until we did that. */
578 do
579 /* XXX This differs from the typical futex_wait_simple pattern in that
580 the futex_wait condition (setxid_futex) is different from the
581 condition used in the surrounding loop (cancelhandling). We need
582 to check and document why this is correct. */
583 futex_wait_simple (&pd->setxid_futex, 0, FUTEX_PRIVATE);
584 while (pd->cancelhandling & SETXID_BITMASK);
585
586 /* Reset the value so that the stack can be reused. */
587 pd->setxid_futex = 0;
588 }
589
590 /* We cannot call '_exit' here. '_exit' will terminate the process.
591
592 The 'exit' implementation in the kernel will signal when the
593 process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID
594 flag. The 'tid' field in the TCB will be set to zero.
595
596 The exit code is zero since in case all threads exit by calling
597 'pthread_exit' the exit status must be 0 (zero). */
598 __exit_thread ();
599
600 /* NOTREACHED */
601}
602
603
604/* Return true iff obliged to report TD_CREATE events. */
605static bool
606report_thread_creation (struct pthread *pd)
607{
608 if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF, report_events)))
609 {
610 /* The parent thread is supposed to report events.
611 Check whether the TD_CREATE event is needed, too. */
612 const size_t idx = __td_eventword (TD_CREATE);
613 const uint32_t mask = __td_eventmask (TD_CREATE);
614
615 return ((mask & (__nptl_threads_events.event_bits[idx]
616 | pd->eventbuf.eventmask.event_bits[idx])) != 0);
617 }
618 return false;
619}
620
621
622int
623__pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr,
624 void *(*start_routine) (void *), void *arg)
625{
626 STACK_VARIABLES;
627
628 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
629 struct pthread_attr default_attr;
630 bool free_cpuset = false;
631 bool c11 = (attr == ATTR_C11_THREAD);
632 if (iattr == NULL || c11)
633 {
634 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
635 default_attr = __default_pthread_attr;
636 size_t cpusetsize = default_attr.cpusetsize;
637 if (cpusetsize > 0)
638 {
639 cpu_set_t *cpuset;
640 if (__glibc_likely (__libc_use_alloca (cpusetsize)))
641 cpuset = __alloca (cpusetsize);
642 else
643 {
644 cpuset = malloc (cpusetsize);
645 if (cpuset == NULL)
646 {
647 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
648 return ENOMEM;
649 }
650 free_cpuset = true;
651 }
652 memcpy (cpuset, default_attr.cpuset, cpusetsize);
653 default_attr.cpuset = cpuset;
654 }
655 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
656 iattr = &default_attr;
657 }
658
659 struct pthread *pd = NULL;
660 int err = ALLOCATE_STACK (iattr, &pd);
661 int retval = 0;
662
663 if (__glibc_unlikely (err != 0))
664 /* Something went wrong. Maybe a parameter of the attributes is
665 invalid or we could not allocate memory. Note we have to
666 translate error codes. */
667 {
668 retval = err == ENOMEM ? EAGAIN : err;
669 goto out;
670 }
671
672
673 /* Initialize the TCB. All initializations with zero should be
674 performed in 'get_cached_stack'. This way we avoid doing this if
675 the stack freshly allocated with 'mmap'. */
676
677#if TLS_TCB_AT_TP
678 /* Reference to the TCB itself. */
679 pd->header.self = pd;
680
681 /* Self-reference for TLS. */
682 pd->header.tcb = pd;
683#endif
684
685 /* Store the address of the start routine and the parameter. Since
686 we do not start the function directly the stillborn thread will
687 get the information from its thread descriptor. */
688 pd->start_routine = start_routine;
689 pd->arg = arg;
690 pd->c11 = c11;
691
692 /* Copy the thread attribute flags. */
693 struct pthread *self = THREAD_SELF;
694 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
695 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
696
697 /* Initialize the field for the ID of the thread which is waiting
698 for us. This is a self-reference in case the thread is created
699 detached. */
700 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
701
702 /* The debug events are inherited from the parent. */
703 pd->eventbuf = self->eventbuf;
704
705
706 /* Copy the parent's scheduling parameters. The flags will say what
707 is valid and what is not. */
708 pd->schedpolicy = self->schedpolicy;
709 pd->schedparam = self->schedparam;
710
711 /* Copy the stack guard canary. */
712#ifdef THREAD_COPY_STACK_GUARD
713 THREAD_COPY_STACK_GUARD (pd);
714#endif
715
716 /* Copy the pointer guard value. */
717#ifdef THREAD_COPY_POINTER_GUARD
718 THREAD_COPY_POINTER_GUARD (pd);
719#endif
720
721 /* Setup tcbhead. */
722 tls_setup_tcbhead (pd);
723
724 /* Verify the sysinfo bits were copied in allocate_stack if needed. */
725#ifdef NEED_DL_SYSINFO
726 CHECK_THREAD_SYSINFO (pd);
727#endif
728
729 /* Inform start_thread (above) about cancellation state that might
730 translate into inherited signal state. */
731 pd->parent_cancelhandling = THREAD_GETMEM (THREAD_SELF, cancelhandling);
732
733 /* Determine scheduling parameters for the thread. */
734 if (__builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
735 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
736 {
737 /* Use the scheduling parameters the user provided. */
738 if (iattr->flags & ATTR_FLAG_POLICY_SET)
739 {
740 pd->schedpolicy = iattr->schedpolicy;
741 pd->flags |= ATTR_FLAG_POLICY_SET;
742 }
743 if (iattr->flags & ATTR_FLAG_SCHED_SET)
744 {
745 /* The values were validated in pthread_attr_setschedparam. */
746 pd->schedparam = iattr->schedparam;
747 pd->flags |= ATTR_FLAG_SCHED_SET;
748 }
749
750 if ((pd->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
751 != (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
752 collect_default_sched (pd);
753 }
754
755 if (__glibc_unlikely (__nptl_nthreads == 1))
756 _IO_enable_locks ();
757
758 /* Pass the descriptor to the caller. */
759 *newthread = (pthread_t) pd;
760
761 LIBC_PROBE (pthread_create, 4, newthread, attr, start_routine, arg);
762
763 /* One more thread. We cannot have the thread do this itself, since it
764 might exist but not have been scheduled yet by the time we've returned
765 and need to check the value to behave correctly. We must do it before
766 creating the thread, in case it does get scheduled first and then
767 might mistakenly think it was the only thread. In the failure case,
768 we momentarily store a false value; this doesn't matter because there
769 is no kosher thing a signal handler interrupting us right here can do
770 that cares whether the thread count is correct. */
771 atomic_increment (&__nptl_nthreads);
772
773 /* Our local value of stopped_start and thread_ran can be accessed at
774 any time. The PD->stopped_start may only be accessed if we have
775 ownership of PD (see CONCURRENCY NOTES above). */
776 bool stopped_start = false; bool thread_ran = false;
777
778 /* Start the thread. */
779 if (__glibc_unlikely (report_thread_creation (pd)))
780 {
781 stopped_start = true;
782
783 /* We always create the thread stopped at startup so we can
784 notify the debugger. */
785 retval = create_thread (pd, iattr, &stopped_start,
786 STACK_VARIABLES_ARGS, &thread_ran);
787 if (retval == 0)
788 {
789 /* We retain ownership of PD until (a) (see CONCURRENCY NOTES
790 above). */
791
792 /* Assert stopped_start is true in both our local copy and the
793 PD copy. */
794 assert (stopped_start);
795 assert (pd->stopped_start);
796
797 /* Now fill in the information about the new thread in
798 the newly created thread's data structure. We cannot let
799 the new thread do this since we don't know whether it was
800 already scheduled when we send the event. */
801 pd->eventbuf.eventnum = TD_CREATE;
802 pd->eventbuf.eventdata = pd;
803
804 /* Enqueue the descriptor. */
805 do
806 pd->nextevent = __nptl_last_event;
807 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
808 pd, pd->nextevent)
809 != 0);
810
811 /* Now call the function which signals the event. See
812 CONCURRENCY NOTES for the nptl_db interface comments. */
813 __nptl_create_event ();
814 }
815 }
816 else
817 retval = create_thread (pd, iattr, &stopped_start,
818 STACK_VARIABLES_ARGS, &thread_ran);
819
820 if (__glibc_unlikely (retval != 0))
821 {
822 if (thread_ran)
823 /* State (c) or (d) and we may not have PD ownership (see
824 CONCURRENCY NOTES above). We can assert that STOPPED_START
825 must have been true because thread creation didn't fail, but
826 thread attribute setting did. */
827 /* See bug 19511 which explains why doing nothing here is a
828 resource leak for a joinable thread. */
829 assert (stopped_start);
830 else
831 {
832 /* State (e) and we have ownership of PD (see CONCURRENCY
833 NOTES above). */
834
835 /* Oops, we lied for a second. */
836 atomic_decrement (&__nptl_nthreads);
837
838 /* Perhaps a thread wants to change the IDs and is waiting for this
839 stillborn thread. */
840 if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0)
841 == -2))
842 futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
843
844 /* Free the resources. */
845 __deallocate_stack (pd);
846 }
847
848 /* We have to translate error codes. */
849 if (retval == ENOMEM)
850 retval = EAGAIN;
851 }
852 else
853 {
854 /* We don't know if we have PD ownership. Once we check the local
855 stopped_start we'll know if we're in state (a) or (b) (see
856 CONCURRENCY NOTES above). */
857 if (stopped_start)
858 /* State (a), we own PD. The thread blocked on this lock either
859 because we're doing TD_CREATE event reporting, or for some
860 other reason that create_thread chose. Now let it run
861 free. */
862 lll_unlock (pd->lock, LLL_PRIVATE);
863
864 /* We now have for sure more than one thread. The main thread might
865 not yet have the flag set. No need to set the global variable
866 again if this is what we use. */
867 THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
868 }
869
870 out:
871 if (__glibc_unlikely (free_cpuset))
872 free (default_attr.cpuset);
873
874 return retval;
875}
876versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
877
878
879#if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
880int
881__pthread_create_2_0 (pthread_t *newthread, const pthread_attr_t *attr,
882 void *(*start_routine) (void *), void *arg)
883{
884 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
885 the old size and access to the new members might crash the program.
886 We convert the struct now. */
887 struct pthread_attr new_attr;
888
889 if (attr != NULL)
890 {
891 struct pthread_attr *iattr = (struct pthread_attr *) attr;
892 size_t ps = __getpagesize ();
893
894 /* Copy values from the user-provided attributes. */
895 new_attr.schedparam = iattr->schedparam;
896 new_attr.schedpolicy = iattr->schedpolicy;
897 new_attr.flags = iattr->flags;
898
899 /* Fill in default values for the fields not present in the old
900 implementation. */
901 new_attr.guardsize = ps;
902 new_attr.stackaddr = NULL;
903 new_attr.stacksize = 0;
904 new_attr.cpuset = NULL;
905
906 /* We will pass this value on to the real implementation. */
907 attr = (pthread_attr_t *) &new_attr;
908 }
909
910 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
911}
912compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
913 GLIBC_2_0);
914#endif
915
916/* Information for libthread_db. */
917
918#include "../nptl_db/db_info.c"
919
920/* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
921 functions to be present as well. */
922PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_lock)
923PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_trylock)
924PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_unlock)
925
926PTHREAD_STATIC_FN_REQUIRE (__pthread_once)
927PTHREAD_STATIC_FN_REQUIRE (__pthread_cancel)
928
929PTHREAD_STATIC_FN_REQUIRE (__pthread_key_create)
930PTHREAD_STATIC_FN_REQUIRE (__pthread_key_delete)
931PTHREAD_STATIC_FN_REQUIRE (__pthread_setspecific)
932PTHREAD_STATIC_FN_REQUIRE (__pthread_getspecific)
933