1 | /* Copyright (C) 2002-2017 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <ctype.h> |
20 | #include <errno.h> |
21 | #include <stdbool.h> |
22 | #include <stdlib.h> |
23 | #include <string.h> |
24 | #include <stdint.h> |
25 | #include "pthreadP.h" |
26 | #include <hp-timing.h> |
27 | #include <ldsodefs.h> |
28 | #include <atomic.h> |
29 | #include <libc-internal.h> |
30 | #include <resolv.h> |
31 | #include <kernel-features.h> |
32 | #include <exit-thread.h> |
33 | #include <default-sched.h> |
34 | #include <futex-internal.h> |
35 | |
36 | #include <shlib-compat.h> |
37 | |
38 | #include <stap-probe.h> |
39 | |
40 | |
41 | /* Nozero if debugging mode is enabled. */ |
42 | int __pthread_debug; |
43 | |
44 | /* Globally enabled events. */ |
45 | static td_thr_events_t __nptl_threads_events __attribute_used__; |
46 | |
47 | /* Pointer to descriptor with the last event. */ |
48 | static struct pthread *__nptl_last_event __attribute_used__; |
49 | |
50 | /* Number of threads running. */ |
51 | unsigned int __nptl_nthreads = 1; |
52 | |
53 | |
54 | /* Code to allocate and deallocate a stack. */ |
55 | #include "allocatestack.c" |
56 | |
57 | /* CONCURRENCY NOTES: |
58 | |
59 | Understanding who is the owner of the 'struct pthread' or 'PD' |
60 | (refers to the value of the 'struct pthread *pd' function argument) |
61 | is critically important in determining exactly which operations are |
62 | allowed and which are not and when, particularly when it comes to the |
63 | implementation of pthread_create, pthread_join, pthread_detach, and |
64 | other functions which all operate on PD. |
65 | |
66 | The owner of PD is responsible for freeing the final resources |
67 | associated with PD, and may examine the memory underlying PD at any |
68 | point in time until it frees it back to the OS or to reuse by the |
69 | runtime. |
70 | |
71 | The thread which calls pthread_create is called the creating thread. |
72 | The creating thread begins as the owner of PD. |
73 | |
74 | During startup the new thread may examine PD in coordination with the |
75 | owner thread (which may be itself). |
76 | |
77 | The four cases of ownership transfer are: |
78 | |
79 | (1) Ownership of PD is released to the process (all threads may use it) |
80 | after the new thread starts in a joinable state |
81 | i.e. pthread_create returns a usable pthread_t. |
82 | |
83 | (2) Ownership of PD is released to the new thread starting in a detached |
84 | state. |
85 | |
86 | (3) Ownership of PD is dynamically released to a running thread via |
87 | pthread_detach. |
88 | |
89 | (4) Ownership of PD is acquired by the thread which calls pthread_join. |
90 | |
91 | Implementation notes: |
92 | |
93 | The PD->stopped_start and thread_ran variables are used to determine |
94 | exactly which of the four ownership states we are in and therefore |
95 | what actions can be taken. For example after (2) we cannot read or |
96 | write from PD anymore since the thread may no longer exist and the |
97 | memory may be unmapped. The most complicated cases happen during |
98 | thread startup: |
99 | |
100 | (a) If the created thread is in a detached (PTHREAD_CREATE_DETACHED), |
101 | or joinable (default PTHREAD_CREATE_JOINABLE) state and |
102 | STOPPED_START is true, then the creating thread has ownership of |
103 | PD until the PD->lock is released by pthread_create. If any |
104 | errors occur we are in states (c), (d), or (e) below. |
105 | |
106 | (b) If the created thread is in a detached state |
107 | (PTHREAD_CREATED_DETACHED), and STOPPED_START is false, then the |
108 | creating thread has ownership of PD until it invokes the OS |
109 | kernel's thread creation routine. If this routine returns |
110 | without error, then the created thread owns PD; otherwise, see |
111 | (c) and (e) below. |
112 | |
113 | (c) If the detached thread setup failed and THREAD_RAN is true, then |
114 | the creating thread releases ownership to the new thread by |
115 | sending a cancellation signal. All threads set THREAD_RAN to |
116 | true as quickly as possible after returning from the OS kernel's |
117 | thread creation routine. |
118 | |
119 | (d) If the joinable thread setup failed and THREAD_RAN is true, then |
120 | then the creating thread retains ownership of PD and must cleanup |
121 | state. Ownership cannot be released to the process via the |
122 | return of pthread_create since a non-zero result entails PD is |
123 | undefined and therefore cannot be joined to free the resources. |
124 | We privately call pthread_join on the thread to finish handling |
125 | the resource shutdown (Or at least we should, see bug 19511). |
126 | |
127 | (e) If the thread creation failed and THREAD_RAN is false, then the |
128 | creating thread retains ownership of PD and must cleanup state. |
129 | No waiting for the new thread is required because it never |
130 | started. |
131 | |
132 | The nptl_db interface: |
133 | |
134 | The interface with nptl_db requires that we enqueue PD into a linked |
135 | list and then call a function which the debugger will trap. The PD |
136 | will then be dequeued and control returned to the thread. The caller |
137 | at the time must have ownership of PD and such ownership remains |
138 | after control returns to thread. The enqueued PD is removed from the |
139 | linked list by the nptl_db callback td_thr_event_getmsg. The debugger |
140 | must ensure that the thread does not resume execution, otherwise |
141 | ownership of PD may be lost and examining PD will not be possible. |
142 | |
143 | Note that the GNU Debugger as of (December 10th 2015) commit |
144 | c2c2a31fdb228d41ce3db62b268efea04bd39c18 no longer uses |
145 | td_thr_event_getmsg and several other related nptl_db interfaces. The |
146 | principal reason for this is that nptl_db does not support non-stop |
147 | mode where other threads can run concurrently and modify runtime |
148 | structures currently in use by the debugger and the nptl_db |
149 | interface. |
150 | |
151 | Axioms: |
152 | |
153 | * The create_thread function can never set stopped_start to false. |
154 | * The created thread can read stopped_start but never write to it. |
155 | * The variable thread_ran is set some time after the OS thread |
156 | creation routine returns, how much time after the thread is created |
157 | is unspecified, but it should be as quickly as possible. |
158 | |
159 | */ |
160 | |
161 | /* CREATE THREAD NOTES: |
162 | |
163 | createthread.c defines the create_thread function, and two macros: |
164 | START_THREAD_DEFN and START_THREAD_SELF (see below). |
165 | |
166 | create_thread must initialize PD->stopped_start. It should be true |
167 | if the STOPPED_START parameter is true, or if create_thread needs the |
168 | new thread to synchronize at startup for some other implementation |
169 | reason. If STOPPED_START will be true, then create_thread is obliged |
170 | to lock PD->lock before starting the thread. Then pthread_create |
171 | unlocks PD->lock which synchronizes-with START_THREAD_DEFN in the |
172 | child thread which does an acquire/release of PD->lock as the last |
173 | action before calling the user entry point. The goal of all of this |
174 | is to ensure that the required initial thread attributes are applied |
175 | (by the creating thread) before the new thread runs user code. Note |
176 | that the the functions pthread_getschedparam, pthread_setschedparam, |
177 | pthread_setschedprio, __pthread_tpp_change_priority, and |
178 | __pthread_current_priority reuse the same lock, PD->lock, for a |
179 | similar purpose e.g. synchronizing the setting of similar thread |
180 | attributes. These functions are never called before the thread is |
181 | created, so don't participate in startup syncronization, but given |
182 | that the lock is present already and in the unlocked state, reusing |
183 | it saves space. |
184 | |
185 | The return value is zero for success or an errno code for failure. |
186 | If the return value is ENOMEM, that will be translated to EAGAIN, |
187 | so create_thread need not do that. On failure, *THREAD_RAN should |
188 | be set to true iff the thread actually started up and then got |
189 | canceled before calling user code (*PD->start_routine). */ |
190 | static int create_thread (struct pthread *pd, const struct pthread_attr *attr, |
191 | bool *stopped_start, STACK_VARIABLES_PARMS, |
192 | bool *thread_ran); |
193 | |
194 | #include <createthread.c> |
195 | |
196 | |
197 | struct pthread * |
198 | internal_function |
199 | __find_in_stack_list (struct pthread *pd) |
200 | { |
201 | list_t *entry; |
202 | struct pthread *result = NULL; |
203 | |
204 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
205 | |
206 | list_for_each (entry, &stack_used) |
207 | { |
208 | struct pthread *curp; |
209 | |
210 | curp = list_entry (entry, struct pthread, list); |
211 | if (curp == pd) |
212 | { |
213 | result = curp; |
214 | break; |
215 | } |
216 | } |
217 | |
218 | if (result == NULL) |
219 | list_for_each (entry, &__stack_user) |
220 | { |
221 | struct pthread *curp; |
222 | |
223 | curp = list_entry (entry, struct pthread, list); |
224 | if (curp == pd) |
225 | { |
226 | result = curp; |
227 | break; |
228 | } |
229 | } |
230 | |
231 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
232 | |
233 | return result; |
234 | } |
235 | |
236 | |
237 | /* Deallocate POSIX thread-local-storage. */ |
238 | void |
239 | attribute_hidden |
240 | __nptl_deallocate_tsd (void) |
241 | { |
242 | struct pthread *self = THREAD_SELF; |
243 | |
244 | /* Maybe no data was ever allocated. This happens often so we have |
245 | a flag for this. */ |
246 | if (THREAD_GETMEM (self, specific_used)) |
247 | { |
248 | size_t round; |
249 | size_t cnt; |
250 | |
251 | round = 0; |
252 | do |
253 | { |
254 | size_t idx; |
255 | |
256 | /* So far no new nonzero data entry. */ |
257 | THREAD_SETMEM (self, specific_used, false); |
258 | |
259 | for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt) |
260 | { |
261 | struct pthread_key_data *level2; |
262 | |
263 | level2 = THREAD_GETMEM_NC (self, specific, cnt); |
264 | |
265 | if (level2 != NULL) |
266 | { |
267 | size_t inner; |
268 | |
269 | for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE; |
270 | ++inner, ++idx) |
271 | { |
272 | void *data = level2[inner].data; |
273 | |
274 | if (data != NULL) |
275 | { |
276 | /* Always clear the data. */ |
277 | level2[inner].data = NULL; |
278 | |
279 | /* Make sure the data corresponds to a valid |
280 | key. This test fails if the key was |
281 | deallocated and also if it was |
282 | re-allocated. It is the user's |
283 | responsibility to free the memory in this |
284 | case. */ |
285 | if (level2[inner].seq |
286 | == __pthread_keys[idx].seq |
287 | /* It is not necessary to register a destructor |
288 | function. */ |
289 | && __pthread_keys[idx].destr != NULL) |
290 | /* Call the user-provided destructor. */ |
291 | __pthread_keys[idx].destr (data); |
292 | } |
293 | } |
294 | } |
295 | else |
296 | idx += PTHREAD_KEY_1STLEVEL_SIZE; |
297 | } |
298 | |
299 | if (THREAD_GETMEM (self, specific_used) == 0) |
300 | /* No data has been modified. */ |
301 | goto just_free; |
302 | } |
303 | /* We only repeat the process a fixed number of times. */ |
304 | while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0)); |
305 | |
306 | /* Just clear the memory of the first block for reuse. */ |
307 | memset (&THREAD_SELF->specific_1stblock, '\0', |
308 | sizeof (self->specific_1stblock)); |
309 | |
310 | just_free: |
311 | /* Free the memory for the other blocks. */ |
312 | for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt) |
313 | { |
314 | struct pthread_key_data *level2; |
315 | |
316 | level2 = THREAD_GETMEM_NC (self, specific, cnt); |
317 | if (level2 != NULL) |
318 | { |
319 | /* The first block is allocated as part of the thread |
320 | descriptor. */ |
321 | free (level2); |
322 | THREAD_SETMEM_NC (self, specific, cnt, NULL); |
323 | } |
324 | } |
325 | |
326 | THREAD_SETMEM (self, specific_used, false); |
327 | } |
328 | } |
329 | |
330 | |
331 | /* Deallocate a thread's stack after optionally making sure the thread |
332 | descriptor is still valid. */ |
333 | void |
334 | internal_function |
335 | __free_tcb (struct pthread *pd) |
336 | { |
337 | /* The thread is exiting now. */ |
338 | if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling, |
339 | TERMINATED_BIT) == 0, 1)) |
340 | { |
341 | /* Remove the descriptor from the list. */ |
342 | if (DEBUGGING_P && __find_in_stack_list (pd) == NULL) |
343 | /* Something is really wrong. The descriptor for a still |
344 | running thread is gone. */ |
345 | abort (); |
346 | |
347 | /* Free TPP data. */ |
348 | if (__glibc_unlikely (pd->tpp != NULL)) |
349 | { |
350 | struct priority_protection_data *tpp = pd->tpp; |
351 | |
352 | pd->tpp = NULL; |
353 | free (tpp); |
354 | } |
355 | |
356 | /* Queue the stack memory block for reuse and exit the process. The |
357 | kernel will signal via writing to the address returned by |
358 | QUEUE-STACK when the stack is available. */ |
359 | __deallocate_stack (pd); |
360 | } |
361 | } |
362 | |
363 | |
364 | /* Local function to start thread and handle cleanup. |
365 | createthread.c defines the macro START_THREAD_DEFN to the |
366 | declaration that its create_thread function will refer to, and |
367 | START_THREAD_SELF to the expression to optimally deliver the new |
368 | thread's THREAD_SELF value. */ |
369 | START_THREAD_DEFN |
370 | { |
371 | struct pthread *pd = START_THREAD_SELF; |
372 | |
373 | #if HP_TIMING_AVAIL |
374 | /* Remember the time when the thread was started. */ |
375 | hp_timing_t now; |
376 | HP_TIMING_NOW (now); |
377 | THREAD_SETMEM (pd, cpuclock_offset, now); |
378 | #endif |
379 | |
380 | /* Initialize resolver state pointer. */ |
381 | __resp = &pd->res; |
382 | |
383 | /* Initialize pointers to locale data. */ |
384 | __ctype_init (); |
385 | |
386 | /* Allow setxid from now onwards. */ |
387 | if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2)) |
388 | futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE); |
389 | |
390 | #ifdef __NR_set_robust_list |
391 | # ifndef __ASSUME_SET_ROBUST_LIST |
392 | if (__set_robust_list_avail >= 0) |
393 | # endif |
394 | { |
395 | INTERNAL_SYSCALL_DECL (err); |
396 | /* This call should never fail because the initial call in init.c |
397 | succeeded. */ |
398 | INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head, |
399 | sizeof (struct robust_list_head)); |
400 | } |
401 | #endif |
402 | |
403 | #ifdef SIGCANCEL |
404 | /* If the parent was running cancellation handlers while creating |
405 | the thread the new thread inherited the signal mask. Reset the |
406 | cancellation signal mask. */ |
407 | if (__glibc_unlikely (pd->parent_cancelhandling & CANCELING_BITMASK)) |
408 | { |
409 | INTERNAL_SYSCALL_DECL (err); |
410 | sigset_t mask; |
411 | __sigemptyset (&mask); |
412 | __sigaddset (&mask, SIGCANCEL); |
413 | (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask, |
414 | NULL, _NSIG / 8); |
415 | } |
416 | #endif |
417 | |
418 | /* This is where the try/finally block should be created. For |
419 | compilers without that support we do use setjmp. */ |
420 | struct pthread_unwind_buf unwind_buf; |
421 | |
422 | /* No previous handlers. */ |
423 | unwind_buf.priv.data.prev = NULL; |
424 | unwind_buf.priv.data.cleanup = NULL; |
425 | |
426 | int not_first_call; |
427 | not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf); |
428 | if (__glibc_likely (! not_first_call)) |
429 | { |
430 | /* Store the new cleanup handler info. */ |
431 | THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf); |
432 | |
433 | /* We are either in (a) or (b), and in either case we either own |
434 | PD already (2) or are about to own PD (1), and so our only |
435 | restriction would be that we can't free PD until we know we |
436 | have ownership (see CONCURRENCY NOTES above). */ |
437 | if (__glibc_unlikely (pd->stopped_start)) |
438 | { |
439 | int oldtype = CANCEL_ASYNC (); |
440 | |
441 | /* Get the lock the parent locked to force synchronization. */ |
442 | lll_lock (pd->lock, LLL_PRIVATE); |
443 | |
444 | /* We have ownership of PD now. */ |
445 | |
446 | /* And give it up right away. */ |
447 | lll_unlock (pd->lock, LLL_PRIVATE); |
448 | |
449 | CANCEL_RESET (oldtype); |
450 | } |
451 | |
452 | LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg); |
453 | |
454 | /* Run the code the user provided. */ |
455 | #ifdef CALL_THREAD_FCT |
456 | THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd)); |
457 | #else |
458 | THREAD_SETMEM (pd, result, pd->start_routine (pd->arg)); |
459 | #endif |
460 | } |
461 | |
462 | /* Call destructors for the thread_local TLS variables. */ |
463 | #ifndef SHARED |
464 | if (&__call_tls_dtors != NULL) |
465 | #endif |
466 | __call_tls_dtors (); |
467 | |
468 | /* Run the destructor for the thread-local data. */ |
469 | __nptl_deallocate_tsd (); |
470 | |
471 | /* Clean up any state libc stored in thread-local variables. */ |
472 | __libc_thread_freeres (); |
473 | |
474 | /* If this is the last thread we terminate the process now. We |
475 | do not notify the debugger, it might just irritate it if there |
476 | is no thread left. */ |
477 | if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads))) |
478 | /* This was the last thread. */ |
479 | exit (0); |
480 | |
481 | /* Report the death of the thread if this is wanted. */ |
482 | if (__glibc_unlikely (pd->report_events)) |
483 | { |
484 | /* See whether TD_DEATH is in any of the mask. */ |
485 | const int idx = __td_eventword (TD_DEATH); |
486 | const uint32_t mask = __td_eventmask (TD_DEATH); |
487 | |
488 | if ((mask & (__nptl_threads_events.event_bits[idx] |
489 | | pd->eventbuf.eventmask.event_bits[idx])) != 0) |
490 | { |
491 | /* Yep, we have to signal the death. Add the descriptor to |
492 | the list but only if it is not already on it. */ |
493 | if (pd->nextevent == NULL) |
494 | { |
495 | pd->eventbuf.eventnum = TD_DEATH; |
496 | pd->eventbuf.eventdata = pd; |
497 | |
498 | do |
499 | pd->nextevent = __nptl_last_event; |
500 | while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event, |
501 | pd, pd->nextevent)); |
502 | } |
503 | |
504 | /* Now call the function which signals the event. See |
505 | CONCURRENCY NOTES for the nptl_db interface comments. */ |
506 | __nptl_death_event (); |
507 | } |
508 | } |
509 | |
510 | /* The thread is exiting now. Don't set this bit until after we've hit |
511 | the event-reporting breakpoint, so that td_thr_get_info on us while at |
512 | the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */ |
513 | atomic_bit_set (&pd->cancelhandling, EXITING_BIT); |
514 | |
515 | #ifndef __ASSUME_SET_ROBUST_LIST |
516 | /* If this thread has any robust mutexes locked, handle them now. */ |
517 | # ifdef __PTHREAD_MUTEX_HAVE_PREV |
518 | void *robust = pd->robust_head.list; |
519 | # else |
520 | __pthread_slist_t *robust = pd->robust_list.__next; |
521 | # endif |
522 | /* We let the kernel do the notification if it is able to do so. |
523 | If we have to do it here there for sure are no PI mutexes involved |
524 | since the kernel support for them is even more recent. */ |
525 | if (__set_robust_list_avail < 0 |
526 | && __builtin_expect (robust != (void *) &pd->robust_head, 0)) |
527 | { |
528 | do |
529 | { |
530 | struct __pthread_mutex_s *this = (struct __pthread_mutex_s *) |
531 | ((char *) robust - offsetof (struct __pthread_mutex_s, |
532 | __list.__next)); |
533 | robust = *((void **) robust); |
534 | |
535 | # ifdef __PTHREAD_MUTEX_HAVE_PREV |
536 | this->__list.__prev = NULL; |
537 | # endif |
538 | this->__list.__next = NULL; |
539 | |
540 | atomic_or (&this->__lock, FUTEX_OWNER_DIED); |
541 | futex_wake ((unsigned int *) &this->__lock, 1, |
542 | /* XYZ */ FUTEX_SHARED); |
543 | } |
544 | while (robust != (void *) &pd->robust_head); |
545 | } |
546 | #endif |
547 | |
548 | /* Mark the memory of the stack as usable to the kernel. We free |
549 | everything except for the space used for the TCB itself. */ |
550 | size_t pagesize_m1 = __getpagesize () - 1; |
551 | #ifdef _STACK_GROWS_DOWN |
552 | char *sp = CURRENT_STACK_FRAME; |
553 | size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1; |
554 | assert (freesize < pd->stackblock_size); |
555 | if (freesize > PTHREAD_STACK_MIN) |
556 | __madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED); |
557 | #else |
558 | /* Page aligned start of memory to free (higher than or equal |
559 | to current sp plus the minimum stack size). */ |
560 | void *freeblock = (void*)((size_t)(CURRENT_STACK_FRAME |
561 | + PTHREAD_STACK_MIN |
562 | + pagesize_m1) |
563 | & ~pagesize_m1); |
564 | char *free_end = (char *) (((uintptr_t) pd - pd->guardsize) & ~pagesize_m1); |
565 | /* Is there any space to free? */ |
566 | if (free_end > (char *)freeblock) |
567 | { |
568 | size_t freesize = (size_t)(free_end - (char *)freeblock); |
569 | assert (freesize < pd->stackblock_size); |
570 | __madvise (freeblock, freesize, MADV_DONTNEED); |
571 | } |
572 | #endif |
573 | |
574 | /* If the thread is detached free the TCB. */ |
575 | if (IS_DETACHED (pd)) |
576 | /* Free the TCB. */ |
577 | __free_tcb (pd); |
578 | else if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK)) |
579 | { |
580 | /* Some other thread might call any of the setXid functions and expect |
581 | us to reply. In this case wait until we did that. */ |
582 | do |
583 | /* XXX This differs from the typical futex_wait_simple pattern in that |
584 | the futex_wait condition (setxid_futex) is different from the |
585 | condition used in the surrounding loop (cancelhandling). We need |
586 | to check and document why this is correct. */ |
587 | futex_wait_simple (&pd->setxid_futex, 0, FUTEX_PRIVATE); |
588 | while (pd->cancelhandling & SETXID_BITMASK); |
589 | |
590 | /* Reset the value so that the stack can be reused. */ |
591 | pd->setxid_futex = 0; |
592 | } |
593 | |
594 | /* We cannot call '_exit' here. '_exit' will terminate the process. |
595 | |
596 | The 'exit' implementation in the kernel will signal when the |
597 | process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID |
598 | flag. The 'tid' field in the TCB will be set to zero. |
599 | |
600 | The exit code is zero since in case all threads exit by calling |
601 | 'pthread_exit' the exit status must be 0 (zero). */ |
602 | __exit_thread (); |
603 | |
604 | /* NOTREACHED */ |
605 | } |
606 | |
607 | |
608 | /* Return true iff obliged to report TD_CREATE events. */ |
609 | static bool |
610 | report_thread_creation (struct pthread *pd) |
611 | { |
612 | if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF, report_events))) |
613 | { |
614 | /* The parent thread is supposed to report events. |
615 | Check whether the TD_CREATE event is needed, too. */ |
616 | const size_t idx = __td_eventword (TD_CREATE); |
617 | const uint32_t mask = __td_eventmask (TD_CREATE); |
618 | |
619 | return ((mask & (__nptl_threads_events.event_bits[idx] |
620 | | pd->eventbuf.eventmask.event_bits[idx])) != 0); |
621 | } |
622 | return false; |
623 | } |
624 | |
625 | |
626 | int |
627 | __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr, |
628 | void *(*start_routine) (void *), void *arg) |
629 | { |
630 | STACK_VARIABLES; |
631 | |
632 | const struct pthread_attr *iattr = (struct pthread_attr *) attr; |
633 | struct pthread_attr default_attr; |
634 | bool free_cpuset = false; |
635 | if (iattr == NULL) |
636 | { |
637 | lll_lock (__default_pthread_attr_lock, LLL_PRIVATE); |
638 | default_attr = __default_pthread_attr; |
639 | size_t cpusetsize = default_attr.cpusetsize; |
640 | if (cpusetsize > 0) |
641 | { |
642 | cpu_set_t *cpuset; |
643 | if (__glibc_likely (__libc_use_alloca (cpusetsize))) |
644 | cpuset = __alloca (cpusetsize); |
645 | else |
646 | { |
647 | cpuset = malloc (cpusetsize); |
648 | if (cpuset == NULL) |
649 | { |
650 | lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE); |
651 | return ENOMEM; |
652 | } |
653 | free_cpuset = true; |
654 | } |
655 | memcpy (cpuset, default_attr.cpuset, cpusetsize); |
656 | default_attr.cpuset = cpuset; |
657 | } |
658 | lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE); |
659 | iattr = &default_attr; |
660 | } |
661 | |
662 | struct pthread *pd = NULL; |
663 | int err = ALLOCATE_STACK (iattr, &pd); |
664 | int retval = 0; |
665 | |
666 | if (__glibc_unlikely (err != 0)) |
667 | /* Something went wrong. Maybe a parameter of the attributes is |
668 | invalid or we could not allocate memory. Note we have to |
669 | translate error codes. */ |
670 | { |
671 | retval = err == ENOMEM ? EAGAIN : err; |
672 | goto out; |
673 | } |
674 | |
675 | |
676 | /* Initialize the TCB. All initializations with zero should be |
677 | performed in 'get_cached_stack'. This way we avoid doing this if |
678 | the stack freshly allocated with 'mmap'. */ |
679 | |
680 | #if TLS_TCB_AT_TP |
681 | /* Reference to the TCB itself. */ |
682 | pd->header.self = pd; |
683 | |
684 | /* Self-reference for TLS. */ |
685 | pd->header.tcb = pd; |
686 | #endif |
687 | |
688 | /* Store the address of the start routine and the parameter. Since |
689 | we do not start the function directly the stillborn thread will |
690 | get the information from its thread descriptor. */ |
691 | pd->start_routine = start_routine; |
692 | pd->arg = arg; |
693 | |
694 | /* Copy the thread attribute flags. */ |
695 | struct pthread *self = THREAD_SELF; |
696 | pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) |
697 | | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))); |
698 | |
699 | /* Initialize the field for the ID of the thread which is waiting |
700 | for us. This is a self-reference in case the thread is created |
701 | detached. */ |
702 | pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL; |
703 | |
704 | /* The debug events are inherited from the parent. */ |
705 | pd->eventbuf = self->eventbuf; |
706 | |
707 | |
708 | /* Copy the parent's scheduling parameters. The flags will say what |
709 | is valid and what is not. */ |
710 | pd->schedpolicy = self->schedpolicy; |
711 | pd->schedparam = self->schedparam; |
712 | |
713 | /* Copy the stack guard canary. */ |
714 | #ifdef THREAD_COPY_STACK_GUARD |
715 | THREAD_COPY_STACK_GUARD (pd); |
716 | #endif |
717 | |
718 | /* Copy the pointer guard value. */ |
719 | #ifdef THREAD_COPY_POINTER_GUARD |
720 | THREAD_COPY_POINTER_GUARD (pd); |
721 | #endif |
722 | |
723 | /* Verify the sysinfo bits were copied in allocate_stack if needed. */ |
724 | #ifdef NEED_DL_SYSINFO |
725 | CHECK_THREAD_SYSINFO (pd); |
726 | #endif |
727 | |
728 | /* Inform start_thread (above) about cancellation state that might |
729 | translate into inherited signal state. */ |
730 | pd->parent_cancelhandling = THREAD_GETMEM (THREAD_SELF, cancelhandling); |
731 | |
732 | /* Determine scheduling parameters for the thread. */ |
733 | if (__builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0) |
734 | && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0) |
735 | { |
736 | /* Use the scheduling parameters the user provided. */ |
737 | if (iattr->flags & ATTR_FLAG_POLICY_SET) |
738 | { |
739 | pd->schedpolicy = iattr->schedpolicy; |
740 | pd->flags |= ATTR_FLAG_POLICY_SET; |
741 | } |
742 | if (iattr->flags & ATTR_FLAG_SCHED_SET) |
743 | { |
744 | /* The values were validated in pthread_attr_setschedparam. */ |
745 | pd->schedparam = iattr->schedparam; |
746 | pd->flags |= ATTR_FLAG_SCHED_SET; |
747 | } |
748 | |
749 | if ((pd->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) |
750 | != (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) |
751 | collect_default_sched (pd); |
752 | } |
753 | |
754 | /* Pass the descriptor to the caller. */ |
755 | *newthread = (pthread_t) pd; |
756 | |
757 | LIBC_PROBE (pthread_create, 4, newthread, attr, start_routine, arg); |
758 | |
759 | /* One more thread. We cannot have the thread do this itself, since it |
760 | might exist but not have been scheduled yet by the time we've returned |
761 | and need to check the value to behave correctly. We must do it before |
762 | creating the thread, in case it does get scheduled first and then |
763 | might mistakenly think it was the only thread. In the failure case, |
764 | we momentarily store a false value; this doesn't matter because there |
765 | is no kosher thing a signal handler interrupting us right here can do |
766 | that cares whether the thread count is correct. */ |
767 | atomic_increment (&__nptl_nthreads); |
768 | |
769 | /* Our local value of stopped_start and thread_ran can be accessed at |
770 | any time. The PD->stopped_start may only be accessed if we have |
771 | ownership of PD (see CONCURRENCY NOTES above). */ |
772 | bool stopped_start = false; bool thread_ran = false; |
773 | |
774 | /* Start the thread. */ |
775 | if (__glibc_unlikely (report_thread_creation (pd))) |
776 | { |
777 | stopped_start = true; |
778 | |
779 | /* We always create the thread stopped at startup so we can |
780 | notify the debugger. */ |
781 | retval = create_thread (pd, iattr, &stopped_start, |
782 | STACK_VARIABLES_ARGS, &thread_ran); |
783 | if (retval == 0) |
784 | { |
785 | /* We retain ownership of PD until (a) (see CONCURRENCY NOTES |
786 | above). */ |
787 | |
788 | /* Assert stopped_start is true in both our local copy and the |
789 | PD copy. */ |
790 | assert (stopped_start); |
791 | assert (pd->stopped_start); |
792 | |
793 | /* Now fill in the information about the new thread in |
794 | the newly created thread's data structure. We cannot let |
795 | the new thread do this since we don't know whether it was |
796 | already scheduled when we send the event. */ |
797 | pd->eventbuf.eventnum = TD_CREATE; |
798 | pd->eventbuf.eventdata = pd; |
799 | |
800 | /* Enqueue the descriptor. */ |
801 | do |
802 | pd->nextevent = __nptl_last_event; |
803 | while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event, |
804 | pd, pd->nextevent) |
805 | != 0); |
806 | |
807 | /* Now call the function which signals the event. See |
808 | CONCURRENCY NOTES for the nptl_db interface comments. */ |
809 | __nptl_create_event (); |
810 | } |
811 | } |
812 | else |
813 | retval = create_thread (pd, iattr, &stopped_start, |
814 | STACK_VARIABLES_ARGS, &thread_ran); |
815 | |
816 | if (__glibc_unlikely (retval != 0)) |
817 | { |
818 | if (thread_ran) |
819 | /* State (c) or (d) and we may not have PD ownership (see |
820 | CONCURRENCY NOTES above). We can assert that STOPPED_START |
821 | must have been true because thread creation didn't fail, but |
822 | thread attribute setting did. */ |
823 | /* See bug 19511 which explains why doing nothing here is a |
824 | resource leak for a joinable thread. */ |
825 | assert (stopped_start); |
826 | else |
827 | { |
828 | /* State (e) and we have ownership of PD (see CONCURRENCY |
829 | NOTES above). */ |
830 | |
831 | /* Oops, we lied for a second. */ |
832 | atomic_decrement (&__nptl_nthreads); |
833 | |
834 | /* Perhaps a thread wants to change the IDs and is waiting for this |
835 | stillborn thread. */ |
836 | if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) |
837 | == -2)) |
838 | futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE); |
839 | |
840 | /* Free the resources. */ |
841 | __deallocate_stack (pd); |
842 | } |
843 | |
844 | /* We have to translate error codes. */ |
845 | if (retval == ENOMEM) |
846 | retval = EAGAIN; |
847 | } |
848 | else |
849 | { |
850 | /* We don't know if we have PD ownership. Once we check the local |
851 | stopped_start we'll know if we're in state (a) or (b) (see |
852 | CONCURRENCY NOTES above). */ |
853 | if (stopped_start) |
854 | /* State (a), we own PD. The thread blocked on this lock either |
855 | because we're doing TD_CREATE event reporting, or for some |
856 | other reason that create_thread chose. Now let it run |
857 | free. */ |
858 | lll_unlock (pd->lock, LLL_PRIVATE); |
859 | |
860 | /* We now have for sure more than one thread. The main thread might |
861 | not yet have the flag set. No need to set the global variable |
862 | again if this is what we use. */ |
863 | THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1); |
864 | } |
865 | |
866 | out: |
867 | if (__glibc_unlikely (free_cpuset)) |
868 | free (default_attr.cpuset); |
869 | |
870 | return retval; |
871 | } |
872 | versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1); |
873 | |
874 | |
875 | #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1) |
876 | int |
877 | __pthread_create_2_0 (pthread_t *newthread, const pthread_attr_t *attr, |
878 | void *(*start_routine) (void *), void *arg) |
879 | { |
880 | /* The ATTR attribute is not really of type `pthread_attr_t *'. It has |
881 | the old size and access to the new members might crash the program. |
882 | We convert the struct now. */ |
883 | struct pthread_attr new_attr; |
884 | |
885 | if (attr != NULL) |
886 | { |
887 | struct pthread_attr *iattr = (struct pthread_attr *) attr; |
888 | size_t ps = __getpagesize (); |
889 | |
890 | /* Copy values from the user-provided attributes. */ |
891 | new_attr.schedparam = iattr->schedparam; |
892 | new_attr.schedpolicy = iattr->schedpolicy; |
893 | new_attr.flags = iattr->flags; |
894 | |
895 | /* Fill in default values for the fields not present in the old |
896 | implementation. */ |
897 | new_attr.guardsize = ps; |
898 | new_attr.stackaddr = NULL; |
899 | new_attr.stacksize = 0; |
900 | new_attr.cpuset = NULL; |
901 | |
902 | /* We will pass this value on to the real implementation. */ |
903 | attr = (pthread_attr_t *) &new_attr; |
904 | } |
905 | |
906 | return __pthread_create_2_1 (newthread, attr, start_routine, arg); |
907 | } |
908 | compat_symbol (libpthread, __pthread_create_2_0, pthread_create, |
909 | GLIBC_2_0); |
910 | #endif |
911 | |
912 | /* Information for libthread_db. */ |
913 | |
914 | #include "../nptl_db/db_info.c" |
915 | |
916 | /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread |
917 | functions to be present as well. */ |
918 | PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock) |
919 | PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock) |
920 | PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock) |
921 | |
922 | PTHREAD_STATIC_FN_REQUIRE (pthread_once) |
923 | PTHREAD_STATIC_FN_REQUIRE (pthread_cancel) |
924 | |
925 | PTHREAD_STATIC_FN_REQUIRE (pthread_key_create) |
926 | PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete) |
927 | PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific) |
928 | PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific) |
929 | |