1/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <errno.h>
21#include <inttypes.h>
22#include <stdio.h>
23#include <stdio_ext.h>
24#include <stdlib.h>
25#include <string.h>
26#include <sys/resource.h>
27#include "pthreadP.h"
28#include <lowlevellock.h>
29#include <ldsodefs.h>
30
31
32int
33pthread_getattr_np (pthread_t thread_id, pthread_attr_t *attr)
34{
35 struct pthread *thread = (struct pthread *) thread_id;
36 struct pthread_attr *iattr = (struct pthread_attr *) attr;
37 int ret = 0;
38
39 lll_lock (thread->lock, LLL_PRIVATE);
40
41 /* The thread library is responsible for keeping the values in the
42 thread desriptor up-to-date in case the user changes them. */
43 memcpy (&iattr->schedparam, &thread->schedparam,
44 sizeof (struct sched_param));
45 iattr->schedpolicy = thread->schedpolicy;
46
47 /* Clear the flags work. */
48 iattr->flags = thread->flags;
49
50 /* The thread might be detached by now. */
51 if (IS_DETACHED (thread))
52 iattr->flags |= ATTR_FLAG_DETACHSTATE;
53
54 /* This is the guardsize after adjusting it. */
55 iattr->guardsize = thread->reported_guardsize;
56
57 /* The sizes are subject to alignment. */
58 if (__glibc_likely (thread->stackblock != NULL))
59 {
60 /* The stack size reported to the user should not include the
61 guard size. */
62 iattr->stacksize = thread->stackblock_size - thread->guardsize;
63#if _STACK_GROWS_DOWN
64 iattr->stackaddr = (char *) thread->stackblock
65 + thread->stackblock_size;
66#else
67 iattr->stackaddr = (char *) thread->stackblock;
68#endif
69 }
70 else
71 {
72 /* No stack information available. This must be for the initial
73 thread. Get the info in some magical way. */
74
75 /* Stack size limit. */
76 struct rlimit rl;
77
78 /* The safest way to get the top of the stack is to read
79 /proc/self/maps and locate the line into which
80 __libc_stack_end falls. */
81 FILE *fp = fopen ("/proc/self/maps", "rce");
82 if (fp == NULL)
83 ret = errno;
84 /* We need the limit of the stack in any case. */
85 else
86 {
87 if (getrlimit (RLIMIT_STACK, &rl) != 0)
88 ret = errno;
89 else
90 {
91 /* We consider the main process stack to have ended with
92 the page containing __libc_stack_end. There is stuff below
93 it in the stack too, like the program arguments, environment
94 variables and auxv info, but we ignore those pages when
95 returning size so that the output is consistent when the
96 stack is marked executable due to a loaded DSO requiring
97 it. */
98 void *stack_end = (void *) ((uintptr_t) __libc_stack_end
99 & -(uintptr_t) GLRO(dl_pagesize));
100#if _STACK_GROWS_DOWN
101 stack_end += GLRO(dl_pagesize);
102#endif
103 /* We need no locking. */
104 __fsetlocking (fp, FSETLOCKING_BYCALLER);
105
106 /* Until we found an entry (which should always be the case)
107 mark the result as a failure. */
108 ret = ENOENT;
109
110 char *line = NULL;
111 size_t linelen = 0;
112#if _STACK_GROWS_DOWN
113 uintptr_t last_to = 0;
114#endif
115
116 while (! feof_unlocked (fp))
117 {
118 if (__getdelim (&line, &linelen, '\n', fp) <= 0)
119 break;
120
121 uintptr_t from;
122 uintptr_t to;
123 if (sscanf (line, "%" SCNxPTR "-%" SCNxPTR, &from, &to) != 2)
124 continue;
125 if (from <= (uintptr_t) __libc_stack_end
126 && (uintptr_t) __libc_stack_end < to)
127 {
128 /* Found the entry. Now we have the info we need. */
129 iattr->stackaddr = stack_end;
130 iattr->stacksize =
131 rl.rlim_cur - (size_t) (to - (uintptr_t) stack_end);
132
133 /* Cut it down to align it to page size since otherwise we
134 risk going beyond rlimit when the kernel rounds up the
135 stack extension request. */
136 iattr->stacksize = (iattr->stacksize
137 & -(intptr_t) GLRO(dl_pagesize));
138#if _STACK_GROWS_DOWN
139 /* The limit might be too high. */
140 if ((size_t) iattr->stacksize
141 > (size_t) iattr->stackaddr - last_to)
142 iattr->stacksize = (size_t) iattr->stackaddr - last_to;
143#else
144 /* The limit might be too high. */
145 if ((size_t) iattr->stacksize
146 > to - (size_t) iattr->stackaddr)
147 iattr->stacksize = to - (size_t) iattr->stackaddr;
148#endif
149 /* We succeed and no need to look further. */
150 ret = 0;
151 break;
152 }
153#if _STACK_GROWS_DOWN
154 last_to = to;
155#endif
156 }
157
158 free (line);
159 }
160
161 fclose (fp);
162 }
163 }
164
165 iattr->flags |= ATTR_FLAG_STACKADDR;
166
167 if (ret == 0)
168 {
169 size_t size = 16;
170 cpu_set_t *cpuset = NULL;
171
172 do
173 {
174 size <<= 1;
175
176 void *newp = realloc (cpuset, size);
177 if (newp == NULL)
178 {
179 ret = ENOMEM;
180 break;
181 }
182 cpuset = (cpu_set_t *) newp;
183
184 ret = __pthread_getaffinity_np (thread_id, size, cpuset);
185 }
186 /* Pick some ridiculous upper limit. Is 8 million CPUs enough? */
187 while (ret == EINVAL && size < 1024 * 1024);
188
189 if (ret == 0)
190 {
191 iattr->cpuset = cpuset;
192 iattr->cpusetsize = size;
193 }
194 else
195 {
196 free (cpuset);
197 if (ret == ENOSYS)
198 {
199 /* There is no such functionality. */
200 ret = 0;
201 iattr->cpuset = NULL;
202 iattr->cpusetsize = 0;
203 }
204 }
205 }
206
207 lll_unlock (thread->lock, LLL_PRIVATE);
208
209 return ret;
210}
211