1 | /* Copyright (C) 2002-2017 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <errno.h> |
21 | #include <signal.h> |
22 | #include <stdint.h> |
23 | #include <string.h> |
24 | #include <unistd.h> |
25 | #include <sys/mman.h> |
26 | #include <sys/param.h> |
27 | #include <dl-sysdep.h> |
28 | #include <dl-tls.h> |
29 | #include <tls.h> |
30 | #include <list.h> |
31 | #include <lowlevellock.h> |
32 | #include <futex-internal.h> |
33 | #include <kernel-features.h> |
34 | #include <stack-aliasing.h> |
35 | |
36 | |
37 | #ifndef NEED_SEPARATE_REGISTER_STACK |
38 | |
39 | /* Most architectures have exactly one stack pointer. Some have more. */ |
40 | # define STACK_VARIABLES void *stackaddr = NULL |
41 | |
42 | /* How to pass the values to the 'create_thread' function. */ |
43 | # define STACK_VARIABLES_ARGS stackaddr |
44 | |
45 | /* How to declare function which gets there parameters. */ |
46 | # define STACK_VARIABLES_PARMS void *stackaddr |
47 | |
48 | /* How to declare allocate_stack. */ |
49 | # define ALLOCATE_STACK_PARMS void **stack |
50 | |
51 | /* This is how the function is called. We do it this way to allow |
52 | other variants of the function to have more parameters. */ |
53 | # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr) |
54 | |
55 | #else |
56 | |
57 | /* We need two stacks. The kernel will place them but we have to tell |
58 | the kernel about the size of the reserved address space. */ |
59 | # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0 |
60 | |
61 | /* How to pass the values to the 'create_thread' function. */ |
62 | # define STACK_VARIABLES_ARGS stackaddr, stacksize |
63 | |
64 | /* How to declare function which gets there parameters. */ |
65 | # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize |
66 | |
67 | /* How to declare allocate_stack. */ |
68 | # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize |
69 | |
70 | /* This is how the function is called. We do it this way to allow |
71 | other variants of the function to have more parameters. */ |
72 | # define ALLOCATE_STACK(attr, pd) \ |
73 | allocate_stack (attr, pd, &stackaddr, &stacksize) |
74 | |
75 | #endif |
76 | |
77 | |
78 | /* Default alignment of stack. */ |
79 | #ifndef STACK_ALIGN |
80 | # define STACK_ALIGN __alignof__ (long double) |
81 | #endif |
82 | |
83 | /* Default value for minimal stack size after allocating thread |
84 | descriptor and guard. */ |
85 | #ifndef MINIMAL_REST_STACK |
86 | # define MINIMAL_REST_STACK 4096 |
87 | #endif |
88 | |
89 | |
90 | /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for |
91 | a stack. Use it when possible. */ |
92 | #ifndef MAP_STACK |
93 | # define MAP_STACK 0 |
94 | #endif |
95 | |
96 | /* This yields the pointer that TLS support code calls the thread pointer. */ |
97 | #if TLS_TCB_AT_TP |
98 | # define TLS_TPADJ(pd) (pd) |
99 | #elif TLS_DTV_AT_TP |
100 | # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE)) |
101 | #endif |
102 | |
103 | /* Cache handling for not-yet free stacks. */ |
104 | |
105 | /* Maximum size in kB of cache. */ |
106 | static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */ |
107 | static size_t stack_cache_actsize; |
108 | |
109 | /* Mutex protecting this variable. */ |
110 | static int stack_cache_lock = LLL_LOCK_INITIALIZER; |
111 | |
112 | /* List of queued stack frames. */ |
113 | static LIST_HEAD (stack_cache); |
114 | |
115 | /* List of the stacks in use. */ |
116 | static LIST_HEAD (stack_used); |
117 | |
118 | /* We need to record what list operations we are going to do so that, |
119 | in case of an asynchronous interruption due to a fork() call, we |
120 | can correct for the work. */ |
121 | static uintptr_t in_flight_stack; |
122 | |
123 | /* List of the threads with user provided stacks in use. No need to |
124 | initialize this, since it's done in __pthread_initialize_minimal. */ |
125 | list_t __stack_user __attribute__ ((nocommon)); |
126 | hidden_data_def (__stack_user) |
127 | |
128 | |
129 | /* Check whether the stack is still used or not. */ |
130 | #define FREE_P(descr) ((descr)->tid <= 0) |
131 | |
132 | |
133 | static void |
134 | stack_list_del (list_t *elem) |
135 | { |
136 | in_flight_stack = (uintptr_t) elem; |
137 | |
138 | atomic_write_barrier (); |
139 | |
140 | list_del (elem); |
141 | |
142 | atomic_write_barrier (); |
143 | |
144 | in_flight_stack = 0; |
145 | } |
146 | |
147 | |
148 | static void |
149 | stack_list_add (list_t *elem, list_t *list) |
150 | { |
151 | in_flight_stack = (uintptr_t) elem | 1; |
152 | |
153 | atomic_write_barrier (); |
154 | |
155 | list_add (elem, list); |
156 | |
157 | atomic_write_barrier (); |
158 | |
159 | in_flight_stack = 0; |
160 | } |
161 | |
162 | |
163 | /* We create a double linked list of all cache entries. Double linked |
164 | because this allows removing entries from the end. */ |
165 | |
166 | |
167 | /* Get a stack frame from the cache. We have to match by size since |
168 | some blocks might be too small or far too large. */ |
169 | static struct pthread * |
170 | get_cached_stack (size_t *sizep, void **memp) |
171 | { |
172 | size_t size = *sizep; |
173 | struct pthread *result = NULL; |
174 | list_t *entry; |
175 | |
176 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
177 | |
178 | /* Search the cache for a matching entry. We search for the |
179 | smallest stack which has at least the required size. Note that |
180 | in normal situations the size of all allocated stacks is the |
181 | same. As the very least there are only a few different sizes. |
182 | Therefore this loop will exit early most of the time with an |
183 | exact match. */ |
184 | list_for_each (entry, &stack_cache) |
185 | { |
186 | struct pthread *curr; |
187 | |
188 | curr = list_entry (entry, struct pthread, list); |
189 | if (FREE_P (curr) && curr->stackblock_size >= size) |
190 | { |
191 | if (curr->stackblock_size == size) |
192 | { |
193 | result = curr; |
194 | break; |
195 | } |
196 | |
197 | if (result == NULL |
198 | || result->stackblock_size > curr->stackblock_size) |
199 | result = curr; |
200 | } |
201 | } |
202 | |
203 | if (__builtin_expect (result == NULL, 0) |
204 | /* Make sure the size difference is not too excessive. In that |
205 | case we do not use the block. */ |
206 | || __builtin_expect (result->stackblock_size > 4 * size, 0)) |
207 | { |
208 | /* Release the lock. */ |
209 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
210 | |
211 | return NULL; |
212 | } |
213 | |
214 | /* Don't allow setxid until cloned. */ |
215 | result->setxid_futex = -1; |
216 | |
217 | /* Dequeue the entry. */ |
218 | stack_list_del (&result->list); |
219 | |
220 | /* And add to the list of stacks in use. */ |
221 | stack_list_add (&result->list, &stack_used); |
222 | |
223 | /* And decrease the cache size. */ |
224 | stack_cache_actsize -= result->stackblock_size; |
225 | |
226 | /* Release the lock early. */ |
227 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
228 | |
229 | /* Report size and location of the stack to the caller. */ |
230 | *sizep = result->stackblock_size; |
231 | *memp = result->stackblock; |
232 | |
233 | /* Cancellation handling is back to the default. */ |
234 | result->cancelhandling = 0; |
235 | result->cleanup = NULL; |
236 | |
237 | /* No pending event. */ |
238 | result->nextevent = NULL; |
239 | |
240 | /* Clear the DTV. */ |
241 | dtv_t *dtv = GET_DTV (TLS_TPADJ (result)); |
242 | for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt) |
243 | free (dtv[1 + cnt].pointer.to_free); |
244 | memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t)); |
245 | |
246 | /* Re-initialize the TLS. */ |
247 | _dl_allocate_tls_init (TLS_TPADJ (result)); |
248 | |
249 | return result; |
250 | } |
251 | |
252 | |
253 | /* Free stacks until cache size is lower than LIMIT. */ |
254 | void |
255 | __free_stacks (size_t limit) |
256 | { |
257 | /* We reduce the size of the cache. Remove the last entries until |
258 | the size is below the limit. */ |
259 | list_t *entry; |
260 | list_t *prev; |
261 | |
262 | /* Search from the end of the list. */ |
263 | list_for_each_prev_safe (entry, prev, &stack_cache) |
264 | { |
265 | struct pthread *curr; |
266 | |
267 | curr = list_entry (entry, struct pthread, list); |
268 | if (FREE_P (curr)) |
269 | { |
270 | /* Unlink the block. */ |
271 | stack_list_del (entry); |
272 | |
273 | /* Account for the freed memory. */ |
274 | stack_cache_actsize -= curr->stackblock_size; |
275 | |
276 | /* Free the memory associated with the ELF TLS. */ |
277 | _dl_deallocate_tls (TLS_TPADJ (curr), false); |
278 | |
279 | /* Remove this block. This should never fail. If it does |
280 | something is really wrong. */ |
281 | if (__munmap (curr->stackblock, curr->stackblock_size) != 0) |
282 | abort (); |
283 | |
284 | /* Maybe we have freed enough. */ |
285 | if (stack_cache_actsize <= limit) |
286 | break; |
287 | } |
288 | } |
289 | } |
290 | |
291 | |
292 | /* Add a stack frame which is not used anymore to the stack. Must be |
293 | called with the cache lock held. */ |
294 | static inline void |
295 | __attribute ((always_inline)) |
296 | queue_stack (struct pthread *stack) |
297 | { |
298 | /* We unconditionally add the stack to the list. The memory may |
299 | still be in use but it will not be reused until the kernel marks |
300 | the stack as not used anymore. */ |
301 | stack_list_add (&stack->list, &stack_cache); |
302 | |
303 | stack_cache_actsize += stack->stackblock_size; |
304 | if (__glibc_unlikely (stack_cache_actsize > stack_cache_maxsize)) |
305 | __free_stacks (stack_cache_maxsize); |
306 | } |
307 | |
308 | |
309 | static int |
310 | internal_function |
311 | change_stack_perm (struct pthread *pd |
312 | #ifdef NEED_SEPARATE_REGISTER_STACK |
313 | , size_t pagemask |
314 | #endif |
315 | ) |
316 | { |
317 | #ifdef NEED_SEPARATE_REGISTER_STACK |
318 | void *stack = (pd->stackblock |
319 | + (((((pd->stackblock_size - pd->guardsize) / 2) |
320 | & pagemask) + pd->guardsize) & pagemask)); |
321 | size_t len = pd->stackblock + pd->stackblock_size - stack; |
322 | #elif _STACK_GROWS_DOWN |
323 | void *stack = pd->stackblock + pd->guardsize; |
324 | size_t len = pd->stackblock_size - pd->guardsize; |
325 | #elif _STACK_GROWS_UP |
326 | void *stack = pd->stackblock; |
327 | size_t len = (uintptr_t) pd - pd->guardsize - (uintptr_t) pd->stackblock; |
328 | #else |
329 | # error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP" |
330 | #endif |
331 | if (__mprotect (stack, len, PROT_READ | PROT_WRITE | PROT_EXEC) != 0) |
332 | return errno; |
333 | |
334 | return 0; |
335 | } |
336 | |
337 | /* Return the guard page position on allocated stack. */ |
338 | static inline char * |
339 | __attribute ((always_inline)) |
340 | guard_position (void *mem, size_t size, size_t guardsize, struct pthread *pd, |
341 | size_t pagesize_m1) |
342 | { |
343 | #ifdef NEED_SEPARATE_REGISTER_STACK |
344 | return mem + (((size - guardsize) / 2) & ~pagesize_m1); |
345 | #elif _STACK_GROWS_DOWN |
346 | return mem; |
347 | #elif _STACK_GROWS_UP |
348 | return (char *) (((uintptr_t) pd - guardsize) & ~pagesize_m1); |
349 | #endif |
350 | } |
351 | |
352 | /* Based on stack allocated with PROT_NONE, setup the required portions with |
353 | 'prot' flags based on the guard page position. */ |
354 | static inline int |
355 | setup_stack_prot (char *mem, size_t size, char *guard, size_t guardsize, |
356 | const int prot) |
357 | { |
358 | char *guardend = guard + guardsize; |
359 | #if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK) |
360 | /* As defined at guard_position, for architectures with downward stack |
361 | the guard page is always at start of the allocated area. */ |
362 | if (__mprotect (guardend, size - guardsize, prot) != 0) |
363 | return errno; |
364 | #else |
365 | size_t mprots1 = (uintptr_t) guard - (uintptr_t) mem; |
366 | if (__mprotect (mem, mprots1, prot) != 0) |
367 | return errno; |
368 | size_t mprots2 = ((uintptr_t) mem + size) - (uintptr_t) guardend; |
369 | if (__mprotect (guardend, mprots2, prot) != 0) |
370 | return errno; |
371 | #endif |
372 | return 0; |
373 | } |
374 | |
375 | /* Mark the memory of the stack as usable to the kernel. It frees everything |
376 | except for the space used for the TCB itself. */ |
377 | static inline void |
378 | __always_inline |
379 | advise_stack_range (void *mem, size_t size, uintptr_t pd, size_t guardsize) |
380 | { |
381 | uintptr_t sp = (uintptr_t) CURRENT_STACK_FRAME; |
382 | size_t pagesize_m1 = __getpagesize () - 1; |
383 | #if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK) |
384 | size_t freesize = (sp - (uintptr_t) mem) & ~pagesize_m1; |
385 | assert (freesize < size); |
386 | if (freesize > PTHREAD_STACK_MIN) |
387 | __madvise (mem, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED); |
388 | #else |
389 | /* Page aligned start of memory to free (higher than or equal |
390 | to current sp plus the minimum stack size). */ |
391 | uintptr_t freeblock = (sp + PTHREAD_STACK_MIN + pagesize_m1) & ~pagesize_m1; |
392 | uintptr_t free_end = (pd - guardsize) & ~pagesize_m1; |
393 | if (free_end > freeblock) |
394 | { |
395 | size_t freesize = free_end - freeblock; |
396 | assert (freesize < size); |
397 | __madvise ((void*) freeblock, freesize, MADV_DONTNEED); |
398 | } |
399 | #endif |
400 | } |
401 | |
402 | /* Returns a usable stack for a new thread either by allocating a |
403 | new stack or reusing a cached stack of sufficient size. |
404 | ATTR must be non-NULL and point to a valid pthread_attr. |
405 | PDP must be non-NULL. */ |
406 | static int |
407 | allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, |
408 | ALLOCATE_STACK_PARMS) |
409 | { |
410 | struct pthread *pd; |
411 | size_t size; |
412 | size_t pagesize_m1 = __getpagesize () - 1; |
413 | |
414 | assert (powerof2 (pagesize_m1 + 1)); |
415 | assert (TCB_ALIGNMENT >= STACK_ALIGN); |
416 | |
417 | /* Get the stack size from the attribute if it is set. Otherwise we |
418 | use the default we determined at start time. */ |
419 | if (attr->stacksize != 0) |
420 | size = attr->stacksize; |
421 | else |
422 | { |
423 | lll_lock (__default_pthread_attr_lock, LLL_PRIVATE); |
424 | size = __default_pthread_attr.stacksize; |
425 | lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE); |
426 | } |
427 | |
428 | /* Get memory for the stack. */ |
429 | if (__glibc_unlikely (attr->flags & ATTR_FLAG_STACKADDR)) |
430 | { |
431 | uintptr_t adj; |
432 | char *stackaddr = (char *) attr->stackaddr; |
433 | |
434 | /* Assume the same layout as the _STACK_GROWS_DOWN case, with struct |
435 | pthread at the top of the stack block. Later we adjust the guard |
436 | location and stack address to match the _STACK_GROWS_UP case. */ |
437 | if (_STACK_GROWS_UP) |
438 | stackaddr += attr->stacksize; |
439 | |
440 | /* If the user also specified the size of the stack make sure it |
441 | is large enough. */ |
442 | if (attr->stacksize != 0 |
443 | && attr->stacksize < (__static_tls_size + MINIMAL_REST_STACK)) |
444 | return EINVAL; |
445 | |
446 | /* Adjust stack size for alignment of the TLS block. */ |
447 | #if TLS_TCB_AT_TP |
448 | adj = ((uintptr_t) stackaddr - TLS_TCB_SIZE) |
449 | & __static_tls_align_m1; |
450 | assert (size > adj + TLS_TCB_SIZE); |
451 | #elif TLS_DTV_AT_TP |
452 | adj = ((uintptr_t) stackaddr - __static_tls_size) |
453 | & __static_tls_align_m1; |
454 | assert (size > adj); |
455 | #endif |
456 | |
457 | /* The user provided some memory. Let's hope it matches the |
458 | size... We do not allocate guard pages if the user provided |
459 | the stack. It is the user's responsibility to do this if it |
460 | is wanted. */ |
461 | #if TLS_TCB_AT_TP |
462 | pd = (struct pthread *) ((uintptr_t) stackaddr |
463 | - TLS_TCB_SIZE - adj); |
464 | #elif TLS_DTV_AT_TP |
465 | pd = (struct pthread *) (((uintptr_t) stackaddr |
466 | - __static_tls_size - adj) |
467 | - TLS_PRE_TCB_SIZE); |
468 | #endif |
469 | |
470 | /* The user provided stack memory needs to be cleared. */ |
471 | memset (pd, '\0', sizeof (struct pthread)); |
472 | |
473 | /* The first TSD block is included in the TCB. */ |
474 | pd->specific[0] = pd->specific_1stblock; |
475 | |
476 | /* Remember the stack-related values. */ |
477 | pd->stackblock = (char *) stackaddr - size; |
478 | pd->stackblock_size = size; |
479 | |
480 | /* This is a user-provided stack. It will not be queued in the |
481 | stack cache nor will the memory (except the TLS memory) be freed. */ |
482 | pd->user_stack = true; |
483 | |
484 | /* This is at least the second thread. */ |
485 | pd->header.multiple_threads = 1; |
486 | #ifndef TLS_MULTIPLE_THREADS_IN_TCB |
487 | __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1; |
488 | #endif |
489 | |
490 | #ifndef __ASSUME_PRIVATE_FUTEX |
491 | /* The thread must know when private futexes are supported. */ |
492 | pd->header.private_futex = THREAD_GETMEM (THREAD_SELF, |
493 | header.private_futex); |
494 | #endif |
495 | |
496 | #ifdef NEED_DL_SYSINFO |
497 | SETUP_THREAD_SYSINFO (pd); |
498 | #endif |
499 | |
500 | /* Don't allow setxid until cloned. */ |
501 | pd->setxid_futex = -1; |
502 | |
503 | /* Allocate the DTV for this thread. */ |
504 | if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL) |
505 | { |
506 | /* Something went wrong. */ |
507 | assert (errno == ENOMEM); |
508 | return errno; |
509 | } |
510 | |
511 | |
512 | /* Prepare to modify global data. */ |
513 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
514 | |
515 | /* And add to the list of stacks in use. */ |
516 | list_add (&pd->list, &__stack_user); |
517 | |
518 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
519 | } |
520 | else |
521 | { |
522 | /* Allocate some anonymous memory. If possible use the cache. */ |
523 | size_t guardsize; |
524 | size_t reqsize; |
525 | void *mem; |
526 | const int prot = (PROT_READ | PROT_WRITE |
527 | | ((GL(dl_stack_flags) & PF_X) ? PROT_EXEC : 0)); |
528 | |
529 | /* Adjust the stack size for alignment. */ |
530 | size &= ~__static_tls_align_m1; |
531 | assert (size != 0); |
532 | |
533 | /* Make sure the size of the stack is enough for the guard and |
534 | eventually the thread descriptor. */ |
535 | guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1; |
536 | if (guardsize < attr->guardsize || size + guardsize < guardsize) |
537 | /* Arithmetic overflow. */ |
538 | return EINVAL; |
539 | size += guardsize; |
540 | if (__builtin_expect (size < ((guardsize + __static_tls_size |
541 | + MINIMAL_REST_STACK + pagesize_m1) |
542 | & ~pagesize_m1), |
543 | 0)) |
544 | /* The stack is too small (or the guard too large). */ |
545 | return EINVAL; |
546 | |
547 | /* Try to get a stack from the cache. */ |
548 | reqsize = size; |
549 | pd = get_cached_stack (&size, &mem); |
550 | if (pd == NULL) |
551 | { |
552 | /* To avoid aliasing effects on a larger scale than pages we |
553 | adjust the allocated stack size if necessary. This way |
554 | allocations directly following each other will not have |
555 | aliasing problems. */ |
556 | #if MULTI_PAGE_ALIASING != 0 |
557 | if ((size % MULTI_PAGE_ALIASING) == 0) |
558 | size += pagesize_m1 + 1; |
559 | #endif |
560 | |
561 | /* If a guard page is required, avoid committing memory by first |
562 | allocate with PROT_NONE and then reserve with required permission |
563 | excluding the guard page. */ |
564 | mem = __mmap (NULL, size, (guardsize == 0) ? prot : PROT_NONE, |
565 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); |
566 | |
567 | if (__glibc_unlikely (mem == MAP_FAILED)) |
568 | return errno; |
569 | |
570 | /* SIZE is guaranteed to be greater than zero. |
571 | So we can never get a null pointer back from mmap. */ |
572 | assert (mem != NULL); |
573 | |
574 | /* Place the thread descriptor at the end of the stack. */ |
575 | #if TLS_TCB_AT_TP |
576 | pd = (struct pthread *) ((char *) mem + size) - 1; |
577 | #elif TLS_DTV_AT_TP |
578 | pd = (struct pthread *) ((((uintptr_t) mem + size |
579 | - __static_tls_size) |
580 | & ~__static_tls_align_m1) |
581 | - TLS_PRE_TCB_SIZE); |
582 | #endif |
583 | |
584 | /* Now mprotect the required region excluding the guard area. */ |
585 | if (__glibc_likely (guardsize > 0)) |
586 | { |
587 | char *guard = guard_position (mem, size, guardsize, pd, |
588 | pagesize_m1); |
589 | if (setup_stack_prot (mem, size, guard, guardsize, prot) != 0) |
590 | { |
591 | __munmap (mem, size); |
592 | return errno; |
593 | } |
594 | } |
595 | |
596 | /* Remember the stack-related values. */ |
597 | pd->stackblock = mem; |
598 | pd->stackblock_size = size; |
599 | /* Update guardsize for newly allocated guardsize to avoid |
600 | an mprotect in guard resize below. */ |
601 | pd->guardsize = guardsize; |
602 | |
603 | /* We allocated the first block thread-specific data array. |
604 | This address will not change for the lifetime of this |
605 | descriptor. */ |
606 | pd->specific[0] = pd->specific_1stblock; |
607 | |
608 | /* This is at least the second thread. */ |
609 | pd->header.multiple_threads = 1; |
610 | #ifndef TLS_MULTIPLE_THREADS_IN_TCB |
611 | __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1; |
612 | #endif |
613 | |
614 | #ifndef __ASSUME_PRIVATE_FUTEX |
615 | /* The thread must know when private futexes are supported. */ |
616 | pd->header.private_futex = THREAD_GETMEM (THREAD_SELF, |
617 | header.private_futex); |
618 | #endif |
619 | |
620 | #ifdef NEED_DL_SYSINFO |
621 | SETUP_THREAD_SYSINFO (pd); |
622 | #endif |
623 | |
624 | /* Don't allow setxid until cloned. */ |
625 | pd->setxid_futex = -1; |
626 | |
627 | /* Allocate the DTV for this thread. */ |
628 | if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL) |
629 | { |
630 | /* Something went wrong. */ |
631 | assert (errno == ENOMEM); |
632 | |
633 | /* Free the stack memory we just allocated. */ |
634 | (void) __munmap (mem, size); |
635 | |
636 | return errno; |
637 | } |
638 | |
639 | |
640 | /* Prepare to modify global data. */ |
641 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
642 | |
643 | /* And add to the list of stacks in use. */ |
644 | stack_list_add (&pd->list, &stack_used); |
645 | |
646 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
647 | |
648 | |
649 | /* There might have been a race. Another thread might have |
650 | caused the stacks to get exec permission while this new |
651 | stack was prepared. Detect if this was possible and |
652 | change the permission if necessary. */ |
653 | if (__builtin_expect ((GL(dl_stack_flags) & PF_X) != 0 |
654 | && (prot & PROT_EXEC) == 0, 0)) |
655 | { |
656 | int err = change_stack_perm (pd |
657 | #ifdef NEED_SEPARATE_REGISTER_STACK |
658 | , ~pagesize_m1 |
659 | #endif |
660 | ); |
661 | if (err != 0) |
662 | { |
663 | /* Free the stack memory we just allocated. */ |
664 | (void) __munmap (mem, size); |
665 | |
666 | return err; |
667 | } |
668 | } |
669 | |
670 | |
671 | /* Note that all of the stack and the thread descriptor is |
672 | zeroed. This means we do not have to initialize fields |
673 | with initial value zero. This is specifically true for |
674 | the 'tid' field which is always set back to zero once the |
675 | stack is not used anymore and for the 'guardsize' field |
676 | which will be read next. */ |
677 | } |
678 | |
679 | /* Create or resize the guard area if necessary. */ |
680 | if (__glibc_unlikely (guardsize > pd->guardsize)) |
681 | { |
682 | char *guard = guard_position (mem, size, guardsize, pd, |
683 | pagesize_m1); |
684 | if (__mprotect (guard, guardsize, PROT_NONE) != 0) |
685 | { |
686 | mprot_error: |
687 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
688 | |
689 | /* Remove the thread from the list. */ |
690 | stack_list_del (&pd->list); |
691 | |
692 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
693 | |
694 | /* Get rid of the TLS block we allocated. */ |
695 | _dl_deallocate_tls (TLS_TPADJ (pd), false); |
696 | |
697 | /* Free the stack memory regardless of whether the size |
698 | of the cache is over the limit or not. If this piece |
699 | of memory caused problems we better do not use it |
700 | anymore. Uh, and we ignore possible errors. There |
701 | is nothing we could do. */ |
702 | (void) __munmap (mem, size); |
703 | |
704 | return errno; |
705 | } |
706 | |
707 | pd->guardsize = guardsize; |
708 | } |
709 | else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize, |
710 | 0)) |
711 | { |
712 | /* The old guard area is too large. */ |
713 | |
714 | #ifdef NEED_SEPARATE_REGISTER_STACK |
715 | char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1); |
716 | char *oldguard = mem + (((size - pd->guardsize) / 2) & ~pagesize_m1); |
717 | |
718 | if (oldguard < guard |
719 | && __mprotect (oldguard, guard - oldguard, prot) != 0) |
720 | goto mprot_error; |
721 | |
722 | if (__mprotect (guard + guardsize, |
723 | oldguard + pd->guardsize - guard - guardsize, |
724 | prot) != 0) |
725 | goto mprot_error; |
726 | #elif _STACK_GROWS_DOWN |
727 | if (__mprotect ((char *) mem + guardsize, pd->guardsize - guardsize, |
728 | prot) != 0) |
729 | goto mprot_error; |
730 | #elif _STACK_GROWS_UP |
731 | char *new_guard = (char *)(((uintptr_t) pd - guardsize) |
732 | & ~pagesize_m1); |
733 | char *old_guard = (char *)(((uintptr_t) pd - pd->guardsize) |
734 | & ~pagesize_m1); |
735 | /* The guard size difference might be > 0, but once rounded |
736 | to the nearest page the size difference might be zero. */ |
737 | if (new_guard > old_guard |
738 | && mprotect (old_guard, new_guard - old_guard, prot) != 0) |
739 | goto mprot_error; |
740 | #endif |
741 | |
742 | pd->guardsize = guardsize; |
743 | } |
744 | /* The pthread_getattr_np() calls need to get passed the size |
745 | requested in the attribute, regardless of how large the |
746 | actually used guardsize is. */ |
747 | pd->reported_guardsize = guardsize; |
748 | } |
749 | |
750 | /* Initialize the lock. We have to do this unconditionally since the |
751 | stillborn thread could be canceled while the lock is taken. */ |
752 | pd->lock = LLL_LOCK_INITIALIZER; |
753 | |
754 | /* The robust mutex lists also need to be initialized |
755 | unconditionally because the cleanup for the previous stack owner |
756 | might have happened in the kernel. */ |
757 | pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock) |
758 | - offsetof (pthread_mutex_t, |
759 | __data.__list.__next)); |
760 | pd->robust_head.list_op_pending = NULL; |
761 | #if __PTHREAD_MUTEX_HAVE_PREV |
762 | pd->robust_prev = &pd->robust_head; |
763 | #endif |
764 | pd->robust_head.list = &pd->robust_head; |
765 | |
766 | /* We place the thread descriptor at the end of the stack. */ |
767 | *pdp = pd; |
768 | |
769 | #if _STACK_GROWS_DOWN |
770 | void *stacktop; |
771 | |
772 | # if TLS_TCB_AT_TP |
773 | /* The stack begins before the TCB and the static TLS block. */ |
774 | stacktop = ((char *) (pd + 1) - __static_tls_size); |
775 | # elif TLS_DTV_AT_TP |
776 | stacktop = (char *) (pd - 1); |
777 | # endif |
778 | |
779 | # ifdef NEED_SEPARATE_REGISTER_STACK |
780 | *stack = pd->stackblock; |
781 | *stacksize = stacktop - *stack; |
782 | # else |
783 | *stack = stacktop; |
784 | # endif |
785 | #else |
786 | *stack = pd->stackblock; |
787 | #endif |
788 | |
789 | return 0; |
790 | } |
791 | |
792 | |
793 | void |
794 | internal_function |
795 | __deallocate_stack (struct pthread *pd) |
796 | { |
797 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
798 | |
799 | /* Remove the thread from the list of threads with user defined |
800 | stacks. */ |
801 | stack_list_del (&pd->list); |
802 | |
803 | /* Not much to do. Just free the mmap()ed memory. Note that we do |
804 | not reset the 'used' flag in the 'tid' field. This is done by |
805 | the kernel. If no thread has been created yet this field is |
806 | still zero. */ |
807 | if (__glibc_likely (! pd->user_stack)) |
808 | (void) queue_stack (pd); |
809 | else |
810 | /* Free the memory associated with the ELF TLS. */ |
811 | _dl_deallocate_tls (TLS_TPADJ (pd), false); |
812 | |
813 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
814 | } |
815 | |
816 | |
817 | int |
818 | internal_function |
819 | __make_stacks_executable (void **stack_endp) |
820 | { |
821 | /* First the main thread's stack. */ |
822 | int err = _dl_make_stack_executable (stack_endp); |
823 | if (err != 0) |
824 | return err; |
825 | |
826 | #ifdef NEED_SEPARATE_REGISTER_STACK |
827 | const size_t pagemask = ~(__getpagesize () - 1); |
828 | #endif |
829 | |
830 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
831 | |
832 | list_t *runp; |
833 | list_for_each (runp, &stack_used) |
834 | { |
835 | err = change_stack_perm (list_entry (runp, struct pthread, list) |
836 | #ifdef NEED_SEPARATE_REGISTER_STACK |
837 | , pagemask |
838 | #endif |
839 | ); |
840 | if (err != 0) |
841 | break; |
842 | } |
843 | |
844 | /* Also change the permission for the currently unused stacks. This |
845 | might be wasted time but better spend it here than adding a check |
846 | in the fast path. */ |
847 | if (err == 0) |
848 | list_for_each (runp, &stack_cache) |
849 | { |
850 | err = change_stack_perm (list_entry (runp, struct pthread, list) |
851 | #ifdef NEED_SEPARATE_REGISTER_STACK |
852 | , pagemask |
853 | #endif |
854 | ); |
855 | if (err != 0) |
856 | break; |
857 | } |
858 | |
859 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
860 | |
861 | return err; |
862 | } |
863 | |
864 | |
865 | /* In case of a fork() call the memory allocation in the child will be |
866 | the same but only one thread is running. All stacks except that of |
867 | the one running thread are not used anymore. We have to recycle |
868 | them. */ |
869 | void |
870 | __reclaim_stacks (void) |
871 | { |
872 | struct pthread *self = (struct pthread *) THREAD_SELF; |
873 | |
874 | /* No locking necessary. The caller is the only stack in use. But |
875 | we have to be aware that we might have interrupted a list |
876 | operation. */ |
877 | |
878 | if (in_flight_stack != 0) |
879 | { |
880 | bool add_p = in_flight_stack & 1; |
881 | list_t *elem = (list_t *) (in_flight_stack & ~(uintptr_t) 1); |
882 | |
883 | if (add_p) |
884 | { |
885 | /* We always add at the beginning of the list. So in this case we |
886 | only need to check the beginning of these lists to see if the |
887 | pointers at the head of the list are inconsistent. */ |
888 | list_t *l = NULL; |
889 | |
890 | if (stack_used.next->prev != &stack_used) |
891 | l = &stack_used; |
892 | else if (stack_cache.next->prev != &stack_cache) |
893 | l = &stack_cache; |
894 | |
895 | if (l != NULL) |
896 | { |
897 | assert (l->next->prev == elem); |
898 | elem->next = l->next; |
899 | elem->prev = l; |
900 | l->next = elem; |
901 | } |
902 | } |
903 | else |
904 | { |
905 | /* We can simply always replay the delete operation. */ |
906 | elem->next->prev = elem->prev; |
907 | elem->prev->next = elem->next; |
908 | } |
909 | } |
910 | |
911 | /* Mark all stacks except the still running one as free. */ |
912 | list_t *runp; |
913 | list_for_each (runp, &stack_used) |
914 | { |
915 | struct pthread *curp = list_entry (runp, struct pthread, list); |
916 | if (curp != self) |
917 | { |
918 | /* This marks the stack as free. */ |
919 | curp->tid = 0; |
920 | |
921 | /* Account for the size of the stack. */ |
922 | stack_cache_actsize += curp->stackblock_size; |
923 | |
924 | if (curp->specific_used) |
925 | { |
926 | /* Clear the thread-specific data. */ |
927 | memset (curp->specific_1stblock, '\0', |
928 | sizeof (curp->specific_1stblock)); |
929 | |
930 | curp->specific_used = false; |
931 | |
932 | for (size_t cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt) |
933 | if (curp->specific[cnt] != NULL) |
934 | { |
935 | memset (curp->specific[cnt], '\0', |
936 | sizeof (curp->specific_1stblock)); |
937 | |
938 | /* We have allocated the block which we do not |
939 | free here so re-set the bit. */ |
940 | curp->specific_used = true; |
941 | } |
942 | } |
943 | } |
944 | } |
945 | |
946 | /* Add the stack of all running threads to the cache. */ |
947 | list_splice (&stack_used, &stack_cache); |
948 | |
949 | /* Remove the entry for the current thread to from the cache list |
950 | and add it to the list of running threads. Which of the two |
951 | lists is decided by the user_stack flag. */ |
952 | stack_list_del (&self->list); |
953 | |
954 | /* Re-initialize the lists for all the threads. */ |
955 | INIT_LIST_HEAD (&stack_used); |
956 | INIT_LIST_HEAD (&__stack_user); |
957 | |
958 | if (__glibc_unlikely (THREAD_GETMEM (self, user_stack))) |
959 | list_add (&self->list, &__stack_user); |
960 | else |
961 | list_add (&self->list, &stack_used); |
962 | |
963 | /* There is one thread running. */ |
964 | __nptl_nthreads = 1; |
965 | |
966 | in_flight_stack = 0; |
967 | |
968 | /* Initialize locks. */ |
969 | stack_cache_lock = LLL_LOCK_INITIALIZER; |
970 | __default_pthread_attr_lock = LLL_LOCK_INITIALIZER; |
971 | } |
972 | |
973 | |
974 | #if HP_TIMING_AVAIL |
975 | # undef __find_thread_by_id |
976 | /* Find a thread given the thread ID. */ |
977 | attribute_hidden |
978 | struct pthread * |
979 | __find_thread_by_id (pid_t tid) |
980 | { |
981 | struct pthread *result = NULL; |
982 | |
983 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
984 | |
985 | /* Iterate over the list with system-allocated threads first. */ |
986 | list_t *runp; |
987 | list_for_each (runp, &stack_used) |
988 | { |
989 | struct pthread *curp; |
990 | |
991 | curp = list_entry (runp, struct pthread, list); |
992 | |
993 | if (curp->tid == tid) |
994 | { |
995 | result = curp; |
996 | goto out; |
997 | } |
998 | } |
999 | |
1000 | /* Now the list with threads using user-allocated stacks. */ |
1001 | list_for_each (runp, &__stack_user) |
1002 | { |
1003 | struct pthread *curp; |
1004 | |
1005 | curp = list_entry (runp, struct pthread, list); |
1006 | |
1007 | if (curp->tid == tid) |
1008 | { |
1009 | result = curp; |
1010 | goto out; |
1011 | } |
1012 | } |
1013 | |
1014 | out: |
1015 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
1016 | |
1017 | return result; |
1018 | } |
1019 | #endif |
1020 | |
1021 | |
1022 | #ifdef SIGSETXID |
1023 | static void |
1024 | internal_function |
1025 | setxid_mark_thread (struct xid_command *cmdp, struct pthread *t) |
1026 | { |
1027 | int ch; |
1028 | |
1029 | /* Wait until this thread is cloned. */ |
1030 | if (t->setxid_futex == -1 |
1031 | && ! atomic_compare_and_exchange_bool_acq (&t->setxid_futex, -2, -1)) |
1032 | do |
1033 | futex_wait_simple (&t->setxid_futex, -2, FUTEX_PRIVATE); |
1034 | while (t->setxid_futex == -2); |
1035 | |
1036 | /* Don't let the thread exit before the setxid handler runs. */ |
1037 | t->setxid_futex = 0; |
1038 | |
1039 | do |
1040 | { |
1041 | ch = t->cancelhandling; |
1042 | |
1043 | /* If the thread is exiting right now, ignore it. */ |
1044 | if ((ch & EXITING_BITMASK) != 0) |
1045 | { |
1046 | /* Release the futex if there is no other setxid in |
1047 | progress. */ |
1048 | if ((ch & SETXID_BITMASK) == 0) |
1049 | { |
1050 | t->setxid_futex = 1; |
1051 | futex_wake (&t->setxid_futex, 1, FUTEX_PRIVATE); |
1052 | } |
1053 | return; |
1054 | } |
1055 | } |
1056 | while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling, |
1057 | ch | SETXID_BITMASK, ch)); |
1058 | } |
1059 | |
1060 | |
1061 | static void |
1062 | internal_function |
1063 | setxid_unmark_thread (struct xid_command *cmdp, struct pthread *t) |
1064 | { |
1065 | int ch; |
1066 | |
1067 | do |
1068 | { |
1069 | ch = t->cancelhandling; |
1070 | if ((ch & SETXID_BITMASK) == 0) |
1071 | return; |
1072 | } |
1073 | while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling, |
1074 | ch & ~SETXID_BITMASK, ch)); |
1075 | |
1076 | /* Release the futex just in case. */ |
1077 | t->setxid_futex = 1; |
1078 | futex_wake (&t->setxid_futex, 1, FUTEX_PRIVATE); |
1079 | } |
1080 | |
1081 | |
1082 | static int |
1083 | internal_function |
1084 | setxid_signal_thread (struct xid_command *cmdp, struct pthread *t) |
1085 | { |
1086 | if ((t->cancelhandling & SETXID_BITMASK) == 0) |
1087 | return 0; |
1088 | |
1089 | int val; |
1090 | pid_t pid = __getpid (); |
1091 | INTERNAL_SYSCALL_DECL (err); |
1092 | val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, t->tid, SIGSETXID); |
1093 | |
1094 | /* If this failed, it must have had not started yet or else exited. */ |
1095 | if (!INTERNAL_SYSCALL_ERROR_P (val, err)) |
1096 | { |
1097 | atomic_increment (&cmdp->cntr); |
1098 | return 1; |
1099 | } |
1100 | else |
1101 | return 0; |
1102 | } |
1103 | |
1104 | /* Check for consistency across set*id system call results. The abort |
1105 | should not happen as long as all privileges changes happen through |
1106 | the glibc wrappers. ERROR must be 0 (no error) or an errno |
1107 | code. */ |
1108 | void |
1109 | attribute_hidden |
1110 | __nptl_setxid_error (struct xid_command *cmdp, int error) |
1111 | { |
1112 | do |
1113 | { |
1114 | int olderror = cmdp->error; |
1115 | if (olderror == error) |
1116 | break; |
1117 | if (olderror != -1) |
1118 | /* Mismatch between current and previous results. */ |
1119 | abort (); |
1120 | } |
1121 | while (atomic_compare_and_exchange_bool_acq (&cmdp->error, error, -1)); |
1122 | } |
1123 | |
1124 | int |
1125 | attribute_hidden |
1126 | __nptl_setxid (struct xid_command *cmdp) |
1127 | { |
1128 | int signalled; |
1129 | int result; |
1130 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
1131 | |
1132 | __xidcmd = cmdp; |
1133 | cmdp->cntr = 0; |
1134 | cmdp->error = -1; |
1135 | |
1136 | struct pthread *self = THREAD_SELF; |
1137 | |
1138 | /* Iterate over the list with system-allocated threads first. */ |
1139 | list_t *runp; |
1140 | list_for_each (runp, &stack_used) |
1141 | { |
1142 | struct pthread *t = list_entry (runp, struct pthread, list); |
1143 | if (t == self) |
1144 | continue; |
1145 | |
1146 | setxid_mark_thread (cmdp, t); |
1147 | } |
1148 | |
1149 | /* Now the list with threads using user-allocated stacks. */ |
1150 | list_for_each (runp, &__stack_user) |
1151 | { |
1152 | struct pthread *t = list_entry (runp, struct pthread, list); |
1153 | if (t == self) |
1154 | continue; |
1155 | |
1156 | setxid_mark_thread (cmdp, t); |
1157 | } |
1158 | |
1159 | /* Iterate until we don't succeed in signalling anyone. That means |
1160 | we have gotten all running threads, and their children will be |
1161 | automatically correct once started. */ |
1162 | do |
1163 | { |
1164 | signalled = 0; |
1165 | |
1166 | list_for_each (runp, &stack_used) |
1167 | { |
1168 | struct pthread *t = list_entry (runp, struct pthread, list); |
1169 | if (t == self) |
1170 | continue; |
1171 | |
1172 | signalled += setxid_signal_thread (cmdp, t); |
1173 | } |
1174 | |
1175 | list_for_each (runp, &__stack_user) |
1176 | { |
1177 | struct pthread *t = list_entry (runp, struct pthread, list); |
1178 | if (t == self) |
1179 | continue; |
1180 | |
1181 | signalled += setxid_signal_thread (cmdp, t); |
1182 | } |
1183 | |
1184 | int cur = cmdp->cntr; |
1185 | while (cur != 0) |
1186 | { |
1187 | futex_wait_simple ((unsigned int *) &cmdp->cntr, cur, |
1188 | FUTEX_PRIVATE); |
1189 | cur = cmdp->cntr; |
1190 | } |
1191 | } |
1192 | while (signalled != 0); |
1193 | |
1194 | /* Clean up flags, so that no thread blocks during exit waiting |
1195 | for a signal which will never come. */ |
1196 | list_for_each (runp, &stack_used) |
1197 | { |
1198 | struct pthread *t = list_entry (runp, struct pthread, list); |
1199 | if (t == self) |
1200 | continue; |
1201 | |
1202 | setxid_unmark_thread (cmdp, t); |
1203 | } |
1204 | |
1205 | list_for_each (runp, &__stack_user) |
1206 | { |
1207 | struct pthread *t = list_entry (runp, struct pthread, list); |
1208 | if (t == self) |
1209 | continue; |
1210 | |
1211 | setxid_unmark_thread (cmdp, t); |
1212 | } |
1213 | |
1214 | /* This must be last, otherwise the current thread might not have |
1215 | permissions to send SIGSETXID syscall to the other threads. */ |
1216 | INTERNAL_SYSCALL_DECL (err); |
1217 | result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, err, 3, |
1218 | cmdp->id[0], cmdp->id[1], cmdp->id[2]); |
1219 | int error = 0; |
1220 | if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err))) |
1221 | { |
1222 | error = INTERNAL_SYSCALL_ERRNO (result, err); |
1223 | __set_errno (error); |
1224 | result = -1; |
1225 | } |
1226 | __nptl_setxid_error (cmdp, error); |
1227 | |
1228 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
1229 | return result; |
1230 | } |
1231 | #endif /* SIGSETXID. */ |
1232 | |
1233 | |
1234 | static inline void __attribute__((always_inline)) |
1235 | init_one_static_tls (struct pthread *curp, struct link_map *map) |
1236 | { |
1237 | # if TLS_TCB_AT_TP |
1238 | void *dest = (char *) curp - map->l_tls_offset; |
1239 | # elif TLS_DTV_AT_TP |
1240 | void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE; |
1241 | # else |
1242 | # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" |
1243 | # endif |
1244 | |
1245 | /* Initialize the memory. */ |
1246 | memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size), |
1247 | '\0', map->l_tls_blocksize - map->l_tls_initimage_size); |
1248 | } |
1249 | |
1250 | void |
1251 | attribute_hidden |
1252 | __pthread_init_static_tls (struct link_map *map) |
1253 | { |
1254 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
1255 | |
1256 | /* Iterate over the list with system-allocated threads first. */ |
1257 | list_t *runp; |
1258 | list_for_each (runp, &stack_used) |
1259 | init_one_static_tls (list_entry (runp, struct pthread, list), map); |
1260 | |
1261 | /* Now the list with threads using user-allocated stacks. */ |
1262 | list_for_each (runp, &__stack_user) |
1263 | init_one_static_tls (list_entry (runp, struct pthread, list), map); |
1264 | |
1265 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
1266 | } |
1267 | |
1268 | |
1269 | void |
1270 | attribute_hidden |
1271 | __wait_lookup_done (void) |
1272 | { |
1273 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
1274 | |
1275 | struct pthread *self = THREAD_SELF; |
1276 | |
1277 | /* Iterate over the list with system-allocated threads first. */ |
1278 | list_t *runp; |
1279 | list_for_each (runp, &stack_used) |
1280 | { |
1281 | struct pthread *t = list_entry (runp, struct pthread, list); |
1282 | if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED) |
1283 | continue; |
1284 | |
1285 | int *const gscope_flagp = &t->header.gscope_flag; |
1286 | |
1287 | /* We have to wait until this thread is done with the global |
1288 | scope. First tell the thread that we are waiting and |
1289 | possibly have to be woken. */ |
1290 | if (atomic_compare_and_exchange_bool_acq (gscope_flagp, |
1291 | THREAD_GSCOPE_FLAG_WAIT, |
1292 | THREAD_GSCOPE_FLAG_USED)) |
1293 | continue; |
1294 | |
1295 | do |
1296 | futex_wait_simple ((unsigned int *) gscope_flagp, |
1297 | THREAD_GSCOPE_FLAG_WAIT, FUTEX_PRIVATE); |
1298 | while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT); |
1299 | } |
1300 | |
1301 | /* Now the list with threads using user-allocated stacks. */ |
1302 | list_for_each (runp, &__stack_user) |
1303 | { |
1304 | struct pthread *t = list_entry (runp, struct pthread, list); |
1305 | if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED) |
1306 | continue; |
1307 | |
1308 | int *const gscope_flagp = &t->header.gscope_flag; |
1309 | |
1310 | /* We have to wait until this thread is done with the global |
1311 | scope. First tell the thread that we are waiting and |
1312 | possibly have to be woken. */ |
1313 | if (atomic_compare_and_exchange_bool_acq (gscope_flagp, |
1314 | THREAD_GSCOPE_FLAG_WAIT, |
1315 | THREAD_GSCOPE_FLAG_USED)) |
1316 | continue; |
1317 | |
1318 | do |
1319 | futex_wait_simple ((unsigned int *) gscope_flagp, |
1320 | THREAD_GSCOPE_FLAG_WAIT, FUTEX_PRIVATE); |
1321 | while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT); |
1322 | } |
1323 | |
1324 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
1325 | } |
1326 | |