1 | /* Load a shared object at runtime, relocate it, and run its initializer. |
2 | Copyright (C) 1996-2020 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <assert.h> |
20 | #include <dlfcn.h> |
21 | #include <errno.h> |
22 | #include <libintl.h> |
23 | #include <stdio.h> |
24 | #include <stdlib.h> |
25 | #include <string.h> |
26 | #include <unistd.h> |
27 | #include <sys/mman.h> /* Check whether MAP_COPY is defined. */ |
28 | #include <sys/param.h> |
29 | #include <libc-lock.h> |
30 | #include <ldsodefs.h> |
31 | #include <sysdep-cancel.h> |
32 | #include <tls.h> |
33 | #include <stap-probe.h> |
34 | #include <atomic.h> |
35 | #include <libc-internal.h> |
36 | #include <array_length.h> |
37 | |
38 | #include <dl-dst.h> |
39 | #include <dl-prop.h> |
40 | |
41 | |
42 | /* We must be careful not to leave us in an inconsistent state. Thus we |
43 | catch any error and re-raise it after cleaning up. */ |
44 | |
45 | struct dl_open_args |
46 | { |
47 | const char *file; |
48 | int mode; |
49 | /* This is the caller of the dlopen() function. */ |
50 | const void *caller_dlopen; |
51 | struct link_map *map; |
52 | /* Namespace ID. */ |
53 | Lmid_t nsid; |
54 | |
55 | /* Original value of _ns_global_scope_pending_adds. Set by |
56 | dl_open_worker. Only valid if nsid is a real namespace |
57 | (non-negative). */ |
58 | unsigned int original_global_scope_pending_adds; |
59 | |
60 | /* Original parameters to the program and the current environment. */ |
61 | int argc; |
62 | char **argv; |
63 | char **env; |
64 | }; |
65 | |
66 | /* Called in case the global scope cannot be extended. */ |
67 | static void __attribute__ ((noreturn)) |
68 | add_to_global_resize_failure (struct link_map *new) |
69 | { |
70 | _dl_signal_error (ENOMEM, new->l_libname->name, NULL, |
71 | N_ ("cannot extend global scope" )); |
72 | } |
73 | |
74 | /* Grow the global scope array for the namespace, so that all the new |
75 | global objects can be added later in add_to_global_update, without |
76 | risk of memory allocation failure. add_to_global_resize raises |
77 | exceptions for memory allocation errors. */ |
78 | static void |
79 | add_to_global_resize (struct link_map *new) |
80 | { |
81 | struct link_namespaces *ns = &GL (dl_ns)[new->l_ns]; |
82 | |
83 | /* Count the objects we have to put in the global scope. */ |
84 | unsigned int to_add = 0; |
85 | for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt) |
86 | if (new->l_searchlist.r_list[cnt]->l_global == 0) |
87 | ++to_add; |
88 | |
89 | /* The symbols of the new objects and its dependencies are to be |
90 | introduced into the global scope that will be used to resolve |
91 | references from other dynamically-loaded objects. |
92 | |
93 | The global scope is the searchlist in the main link map. We |
94 | extend this list if necessary. There is one problem though: |
95 | since this structure was allocated very early (before the libc |
96 | is loaded) the memory it uses is allocated by the malloc()-stub |
97 | in the ld.so. When we come here these functions are not used |
98 | anymore. Instead the malloc() implementation of the libc is |
99 | used. But this means the block from the main map cannot be used |
100 | in an realloc() call. Therefore we allocate a completely new |
101 | array the first time we have to add something to the locale scope. */ |
102 | |
103 | if (__builtin_add_overflow (ns->_ns_global_scope_pending_adds, to_add, |
104 | &ns->_ns_global_scope_pending_adds)) |
105 | add_to_global_resize_failure (new); |
106 | |
107 | unsigned int new_size = 0; /* 0 means no new allocation. */ |
108 | void *old_global = NULL; /* Old allocation if free-able. */ |
109 | |
110 | /* Minimum required element count for resizing. Adjusted below for |
111 | an exponential resizing policy. */ |
112 | size_t required_new_size; |
113 | if (__builtin_add_overflow (ns->_ns_main_searchlist->r_nlist, |
114 | ns->_ns_global_scope_pending_adds, |
115 | &required_new_size)) |
116 | add_to_global_resize_failure (new); |
117 | |
118 | if (ns->_ns_global_scope_alloc == 0) |
119 | { |
120 | if (__builtin_add_overflow (required_new_size, 8, &new_size)) |
121 | add_to_global_resize_failure (new); |
122 | } |
123 | else if (required_new_size > ns->_ns_global_scope_alloc) |
124 | { |
125 | if (__builtin_mul_overflow (required_new_size, 2, &new_size)) |
126 | add_to_global_resize_failure (new); |
127 | |
128 | /* The old array was allocated with our malloc, not the minimal |
129 | malloc. */ |
130 | old_global = ns->_ns_main_searchlist->r_list; |
131 | } |
132 | |
133 | if (new_size > 0) |
134 | { |
135 | size_t allocation_size; |
136 | if (__builtin_mul_overflow (new_size, sizeof (struct link_map *), |
137 | &allocation_size)) |
138 | add_to_global_resize_failure (new); |
139 | struct link_map **new_global = malloc (allocation_size); |
140 | if (new_global == NULL) |
141 | add_to_global_resize_failure (new); |
142 | |
143 | /* Copy over the old entries. */ |
144 | memcpy (new_global, ns->_ns_main_searchlist->r_list, |
145 | ns->_ns_main_searchlist->r_nlist * sizeof (struct link_map *)); |
146 | |
147 | ns->_ns_global_scope_alloc = new_size; |
148 | ns->_ns_main_searchlist->r_list = new_global; |
149 | |
150 | if (!RTLD_SINGLE_THREAD_P) |
151 | THREAD_GSCOPE_WAIT (); |
152 | |
153 | free (old_global); |
154 | } |
155 | } |
156 | |
157 | /* Actually add the new global objects to the global scope. Must be |
158 | called after add_to_global_resize. This function cannot fail. */ |
159 | static void |
160 | add_to_global_update (struct link_map *new) |
161 | { |
162 | struct link_namespaces *ns = &GL (dl_ns)[new->l_ns]; |
163 | |
164 | /* Now add the new entries. */ |
165 | unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist; |
166 | for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt) |
167 | { |
168 | struct link_map *map = new->l_searchlist.r_list[cnt]; |
169 | |
170 | if (map->l_global == 0) |
171 | { |
172 | map->l_global = 1; |
173 | |
174 | /* The array has been resized by add_to_global_resize. */ |
175 | assert (new_nlist < ns->_ns_global_scope_alloc); |
176 | |
177 | ns->_ns_main_searchlist->r_list[new_nlist++] = map; |
178 | |
179 | /* We modify the global scope. Report this. */ |
180 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) |
181 | _dl_debug_printf ("\nadd %s [%lu] to global scope\n" , |
182 | map->l_name, map->l_ns); |
183 | } |
184 | } |
185 | |
186 | /* Some of the pending adds have been performed by the loop above. |
187 | Adjust the counter accordingly. */ |
188 | unsigned int added = new_nlist - ns->_ns_main_searchlist->r_nlist; |
189 | assert (added <= ns->_ns_global_scope_pending_adds); |
190 | ns->_ns_global_scope_pending_adds -= added; |
191 | |
192 | atomic_write_barrier (); |
193 | ns->_ns_main_searchlist->r_nlist = new_nlist; |
194 | } |
195 | |
196 | /* Search link maps in all namespaces for the DSO that contains the object at |
197 | address ADDR. Returns the pointer to the link map of the matching DSO, or |
198 | NULL if a match is not found. */ |
199 | struct link_map * |
200 | _dl_find_dso_for_object (const ElfW(Addr) addr) |
201 | { |
202 | struct link_map *l; |
203 | |
204 | /* Find the highest-addressed object that ADDR is not below. */ |
205 | for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns) |
206 | for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next) |
207 | if (addr >= l->l_map_start && addr < l->l_map_end |
208 | && (l->l_contiguous |
209 | || _dl_addr_inside_object (l, (ElfW(Addr)) addr))) |
210 | { |
211 | assert (ns == l->l_ns); |
212 | return l; |
213 | } |
214 | return NULL; |
215 | } |
216 | rtld_hidden_def (_dl_find_dso_for_object); |
217 | |
218 | /* Return true if NEW is found in the scope for MAP. */ |
219 | static size_t |
220 | scope_has_map (struct link_map *map, struct link_map *new) |
221 | { |
222 | size_t cnt; |
223 | for (cnt = 0; map->l_scope[cnt] != NULL; ++cnt) |
224 | if (map->l_scope[cnt] == &new->l_searchlist) |
225 | return true; |
226 | return false; |
227 | } |
228 | |
229 | /* Return the length of the scope for MAP. */ |
230 | static size_t |
231 | scope_size (struct link_map *map) |
232 | { |
233 | size_t cnt; |
234 | for (cnt = 0; map->l_scope[cnt] != NULL; ) |
235 | ++cnt; |
236 | return cnt; |
237 | } |
238 | |
239 | /* Resize the scopes of depended-upon objects, so that the new object |
240 | can be added later without further allocation of memory. This |
241 | function can raise an exceptions due to malloc failure. */ |
242 | static void |
243 | resize_scopes (struct link_map *new) |
244 | { |
245 | /* If the file is not loaded now as a dependency, add the search |
246 | list of the newly loaded object to the scope. */ |
247 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
248 | { |
249 | struct link_map *imap = new->l_searchlist.r_list[i]; |
250 | |
251 | /* If the initializer has been called already, the object has |
252 | not been loaded here and now. */ |
253 | if (imap->l_init_called && imap->l_type == lt_loaded) |
254 | { |
255 | if (scope_has_map (imap, new)) |
256 | /* Avoid duplicates. */ |
257 | continue; |
258 | |
259 | size_t cnt = scope_size (imap); |
260 | if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max)) |
261 | { |
262 | /* The l_scope array is too small. Allocate a new one |
263 | dynamically. */ |
264 | size_t new_size; |
265 | struct r_scope_elem **newp; |
266 | |
267 | if (imap->l_scope != imap->l_scope_mem |
268 | && imap->l_scope_max < array_length (imap->l_scope_mem)) |
269 | { |
270 | /* If the current l_scope memory is not pointing to |
271 | the static memory in the structure, but the |
272 | static memory in the structure is large enough to |
273 | use for cnt + 1 scope entries, then switch to |
274 | using the static memory. */ |
275 | new_size = array_length (imap->l_scope_mem); |
276 | newp = imap->l_scope_mem; |
277 | } |
278 | else |
279 | { |
280 | new_size = imap->l_scope_max * 2; |
281 | newp = (struct r_scope_elem **) |
282 | malloc (new_size * sizeof (struct r_scope_elem *)); |
283 | if (newp == NULL) |
284 | _dl_signal_error (ENOMEM, "dlopen" , NULL, |
285 | N_("cannot create scope list" )); |
286 | } |
287 | |
288 | /* Copy the array and the terminating NULL. */ |
289 | memcpy (newp, imap->l_scope, |
290 | (cnt + 1) * sizeof (imap->l_scope[0])); |
291 | struct r_scope_elem **old = imap->l_scope; |
292 | |
293 | imap->l_scope = newp; |
294 | |
295 | if (old != imap->l_scope_mem) |
296 | _dl_scope_free (old); |
297 | |
298 | imap->l_scope_max = new_size; |
299 | } |
300 | } |
301 | } |
302 | } |
303 | |
304 | /* Second stage of resize_scopes: Add NEW to the scopes. Also print |
305 | debugging information about scopes if requested. |
306 | |
307 | This function cannot raise an exception because all required memory |
308 | has been allocated by a previous call to resize_scopes. */ |
309 | static void |
310 | update_scopes (struct link_map *new) |
311 | { |
312 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
313 | { |
314 | struct link_map *imap = new->l_searchlist.r_list[i]; |
315 | int from_scope = 0; |
316 | |
317 | if (imap->l_init_called && imap->l_type == lt_loaded) |
318 | { |
319 | if (scope_has_map (imap, new)) |
320 | /* Avoid duplicates. */ |
321 | continue; |
322 | |
323 | size_t cnt = scope_size (imap); |
324 | /* Assert that resize_scopes has sufficiently enlarged the |
325 | array. */ |
326 | assert (cnt + 1 < imap->l_scope_max); |
327 | |
328 | /* First terminate the extended list. Otherwise a thread |
329 | might use the new last element and then use the garbage |
330 | at offset IDX+1. */ |
331 | imap->l_scope[cnt + 1] = NULL; |
332 | atomic_write_barrier (); |
333 | imap->l_scope[cnt] = &new->l_searchlist; |
334 | |
335 | from_scope = cnt; |
336 | } |
337 | |
338 | /* Print scope information. */ |
339 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) |
340 | _dl_show_scope (imap, from_scope); |
341 | } |
342 | } |
343 | |
344 | /* Call _dl_add_to_slotinfo with DO_ADD set to false, to allocate |
345 | space in GL (dl_tls_dtv_slotinfo_list). This can raise an |
346 | exception. The return value is true if any of the new objects use |
347 | TLS. */ |
348 | static bool |
349 | resize_tls_slotinfo (struct link_map *new) |
350 | { |
351 | bool any_tls = false; |
352 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
353 | { |
354 | struct link_map *imap = new->l_searchlist.r_list[i]; |
355 | |
356 | /* Only add TLS memory if this object is loaded now and |
357 | therefore is not yet initialized. */ |
358 | if (! imap->l_init_called && imap->l_tls_blocksize > 0) |
359 | { |
360 | _dl_add_to_slotinfo (imap, false); |
361 | any_tls = true; |
362 | } |
363 | } |
364 | return any_tls; |
365 | } |
366 | |
367 | /* Second stage of TLS update, after resize_tls_slotinfo. This |
368 | function does not raise any exception. It should only be called if |
369 | resize_tls_slotinfo returned true. */ |
370 | static void |
371 | update_tls_slotinfo (struct link_map *new) |
372 | { |
373 | unsigned int first_static_tls = new->l_searchlist.r_nlist; |
374 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
375 | { |
376 | struct link_map *imap = new->l_searchlist.r_list[i]; |
377 | |
378 | /* Only add TLS memory if this object is loaded now and |
379 | therefore is not yet initialized. */ |
380 | if (! imap->l_init_called && imap->l_tls_blocksize > 0) |
381 | { |
382 | _dl_add_to_slotinfo (imap, true); |
383 | |
384 | if (imap->l_need_tls_init |
385 | && first_static_tls == new->l_searchlist.r_nlist) |
386 | first_static_tls = i; |
387 | } |
388 | } |
389 | |
390 | if (__builtin_expect (++GL(dl_tls_generation) == 0, 0)) |
391 | _dl_fatal_printf (N_("\ |
392 | TLS generation counter wrapped! Please report this." )); |
393 | |
394 | /* We need a second pass for static tls data, because |
395 | _dl_update_slotinfo must not be run while calls to |
396 | _dl_add_to_slotinfo are still pending. */ |
397 | for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i) |
398 | { |
399 | struct link_map *imap = new->l_searchlist.r_list[i]; |
400 | |
401 | if (imap->l_need_tls_init |
402 | && ! imap->l_init_called |
403 | && imap->l_tls_blocksize > 0) |
404 | { |
405 | /* For static TLS we have to allocate the memory here and |
406 | now, but we can delay updating the DTV. */ |
407 | imap->l_need_tls_init = 0; |
408 | #ifdef SHARED |
409 | /* Update the slot information data for at least the |
410 | generation of the DSO we are allocating data for. */ |
411 | |
412 | /* FIXME: This can terminate the process on memory |
413 | allocation failure. It is not possible to raise |
414 | exceptions from this context; to fix this bug, |
415 | _dl_update_slotinfo would have to be split into two |
416 | operations, similar to resize_scopes and update_scopes |
417 | above. This is related to bug 16134. */ |
418 | _dl_update_slotinfo (imap->l_tls_modid); |
419 | #endif |
420 | |
421 | GL(dl_init_static_tls) (imap); |
422 | assert (imap->l_need_tls_init == 0); |
423 | } |
424 | } |
425 | } |
426 | |
427 | /* Mark the objects as NODELETE if required. This is delayed until |
428 | after dlopen failure is not possible, so that _dl_close can clean |
429 | up objects if necessary. */ |
430 | static void |
431 | activate_nodelete (struct link_map *new) |
432 | { |
433 | /* It is necessary to traverse the entire namespace. References to |
434 | objects in the global scope and unique symbol bindings can force |
435 | NODELETE status for objects outside the local scope. */ |
436 | for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL; |
437 | l = l->l_next) |
438 | if (l->l_nodelete_pending) |
439 | { |
440 | if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)) |
441 | _dl_debug_printf ("activating NODELETE for %s [%lu]\n" , |
442 | l->l_name, l->l_ns); |
443 | |
444 | /* The flag can already be true at this point, e.g. a signal |
445 | handler may have triggered lazy binding and set NODELETE |
446 | status immediately. */ |
447 | l->l_nodelete_active = true; |
448 | |
449 | /* This is just a debugging aid, to indicate that |
450 | activate_nodelete has run for this map. */ |
451 | l->l_nodelete_pending = false; |
452 | } |
453 | } |
454 | |
455 | /* struct dl_init_args and call_dl_init are used to call _dl_init with |
456 | exception handling disabled. */ |
457 | struct dl_init_args |
458 | { |
459 | struct link_map *new; |
460 | int argc; |
461 | char **argv; |
462 | char **env; |
463 | }; |
464 | |
465 | static void |
466 | call_dl_init (void *closure) |
467 | { |
468 | struct dl_init_args *args = closure; |
469 | _dl_init (args->new, args->argc, args->argv, args->env); |
470 | } |
471 | |
472 | static void |
473 | dl_open_worker (void *a) |
474 | { |
475 | struct dl_open_args *args = a; |
476 | const char *file = args->file; |
477 | int mode = args->mode; |
478 | struct link_map *call_map = NULL; |
479 | |
480 | /* Determine the caller's map if necessary. This is needed in case |
481 | we have a DST, when we don't know the namespace ID we have to put |
482 | the new object in, or when the file name has no path in which |
483 | case we need to look along the RUNPATH/RPATH of the caller. */ |
484 | const char *dst = strchr (file, '$'); |
485 | if (dst != NULL || args->nsid == __LM_ID_CALLER |
486 | || strchr (file, '/') == NULL) |
487 | { |
488 | const void *caller_dlopen = args->caller_dlopen; |
489 | |
490 | /* We have to find out from which object the caller is calling. |
491 | By default we assume this is the main application. */ |
492 | call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded; |
493 | |
494 | struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen); |
495 | |
496 | if (l) |
497 | call_map = l; |
498 | |
499 | if (args->nsid == __LM_ID_CALLER) |
500 | args->nsid = call_map->l_ns; |
501 | } |
502 | |
503 | /* Retain the old value, so that it can be restored. */ |
504 | args->original_global_scope_pending_adds |
505 | = GL (dl_ns)[args->nsid]._ns_global_scope_pending_adds; |
506 | |
507 | /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that |
508 | may not be true if this is a recursive call to dlopen. */ |
509 | _dl_debug_initialize (0, args->nsid); |
510 | |
511 | /* Load the named object. */ |
512 | struct link_map *new; |
513 | args->map = new = _dl_map_object (call_map, file, lt_loaded, 0, |
514 | mode | __RTLD_CALLMAP, args->nsid); |
515 | |
516 | /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is |
517 | set and the object is not already loaded. */ |
518 | if (new == NULL) |
519 | { |
520 | assert (mode & RTLD_NOLOAD); |
521 | return; |
522 | } |
523 | |
524 | if (__glibc_unlikely (mode & __RTLD_SPROF)) |
525 | /* This happens only if we load a DSO for 'sprof'. */ |
526 | return; |
527 | |
528 | /* This object is directly loaded. */ |
529 | ++new->l_direct_opencount; |
530 | |
531 | /* It was already open. */ |
532 | if (__glibc_unlikely (new->l_searchlist.r_list != NULL)) |
533 | { |
534 | /* Let the user know about the opencount. */ |
535 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
536 | _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n" , |
537 | new->l_name, new->l_ns, new->l_direct_opencount); |
538 | |
539 | /* If the user requested the object to be in the global |
540 | namespace but it is not so far, prepare to add it now. This |
541 | can raise an exception to do a malloc failure. */ |
542 | if ((mode & RTLD_GLOBAL) && new->l_global == 0) |
543 | add_to_global_resize (new); |
544 | |
545 | /* Mark the object as not deletable if the RTLD_NODELETE flags |
546 | was passed. */ |
547 | if (__glibc_unlikely (mode & RTLD_NODELETE)) |
548 | { |
549 | if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES) |
550 | && !new->l_nodelete_active) |
551 | _dl_debug_printf ("marking %s [%lu] as NODELETE\n" , |
552 | new->l_name, new->l_ns); |
553 | new->l_nodelete_active = true; |
554 | } |
555 | |
556 | /* Finalize the addition to the global scope. */ |
557 | if ((mode & RTLD_GLOBAL) && new->l_global == 0) |
558 | add_to_global_update (new); |
559 | |
560 | assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT); |
561 | |
562 | return; |
563 | } |
564 | |
565 | /* Schedule NODELETE marking for the directly loaded object if |
566 | requested. */ |
567 | if (__glibc_unlikely (mode & RTLD_NODELETE)) |
568 | new->l_nodelete_pending = true; |
569 | |
570 | /* Load that object's dependencies. */ |
571 | _dl_map_object_deps (new, NULL, 0, 0, |
572 | mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT)); |
573 | |
574 | /* So far, so good. Now check the versions. */ |
575 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
576 | if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL) |
577 | (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real, |
578 | 0, 0); |
579 | |
580 | #ifdef SHARED |
581 | /* Auditing checkpoint: we have added all objects. */ |
582 | if (__glibc_unlikely (GLRO(dl_naudit) > 0)) |
583 | { |
584 | struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded; |
585 | /* Do not call the functions for any auditing object. */ |
586 | if (head->l_auditing == 0) |
587 | { |
588 | struct audit_ifaces *afct = GLRO(dl_audit); |
589 | for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt) |
590 | { |
591 | if (afct->activity != NULL) |
592 | { |
593 | struct auditstate *state = link_map_audit_state (head, cnt); |
594 | afct->activity (&state->cookie, LA_ACT_CONSISTENT); |
595 | } |
596 | |
597 | afct = afct->next; |
598 | } |
599 | } |
600 | } |
601 | #endif |
602 | |
603 | /* Notify the debugger all new objects are now ready to go. */ |
604 | struct r_debug *r = _dl_debug_initialize (0, args->nsid); |
605 | r->r_state = RT_CONSISTENT; |
606 | _dl_debug_state (); |
607 | LIBC_PROBE (map_complete, 3, args->nsid, r, new); |
608 | |
609 | _dl_open_check (new); |
610 | |
611 | /* Print scope information. */ |
612 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) |
613 | _dl_show_scope (new, 0); |
614 | |
615 | /* Only do lazy relocation if `LD_BIND_NOW' is not set. */ |
616 | int reloc_mode = mode & __RTLD_AUDIT; |
617 | if (GLRO(dl_lazy)) |
618 | reloc_mode |= mode & RTLD_LAZY; |
619 | |
620 | /* Sort the objects by dependency for the relocation process. This |
621 | allows IFUNC relocations to work and it also means copy |
622 | relocation of dependencies are if necessary overwritten. */ |
623 | unsigned int nmaps = 0; |
624 | struct link_map *l = new; |
625 | do |
626 | { |
627 | if (! l->l_real->l_relocated) |
628 | ++nmaps; |
629 | l = l->l_next; |
630 | } |
631 | while (l != NULL); |
632 | struct link_map *maps[nmaps]; |
633 | nmaps = 0; |
634 | l = new; |
635 | do |
636 | { |
637 | if (! l->l_real->l_relocated) |
638 | maps[nmaps++] = l; |
639 | l = l->l_next; |
640 | } |
641 | while (l != NULL); |
642 | _dl_sort_maps (maps, nmaps, NULL, false); |
643 | |
644 | int relocation_in_progress = 0; |
645 | |
646 | /* Perform relocation. This can trigger lazy binding in IFUNC |
647 | resolvers. For NODELETE mappings, these dependencies are not |
648 | recorded because the flag has not been applied to the newly |
649 | loaded objects. This means that upon dlopen failure, these |
650 | NODELETE objects can be unloaded despite existing references to |
651 | them. However, such relocation dependencies in IFUNC resolvers |
652 | are undefined anyway, so this is not a problem. */ |
653 | |
654 | for (unsigned int i = nmaps; i-- > 0; ) |
655 | { |
656 | l = maps[i]; |
657 | |
658 | if (! relocation_in_progress) |
659 | { |
660 | /* Notify the debugger that relocations are about to happen. */ |
661 | LIBC_PROBE (reloc_start, 2, args->nsid, r); |
662 | relocation_in_progress = 1; |
663 | } |
664 | |
665 | #ifdef SHARED |
666 | if (__glibc_unlikely (GLRO(dl_profile) != NULL)) |
667 | { |
668 | /* If this here is the shared object which we want to profile |
669 | make sure the profile is started. We can find out whether |
670 | this is necessary or not by observing the `_dl_profile_map' |
671 | variable. If it was NULL but is not NULL afterwards we must |
672 | start the profiling. */ |
673 | struct link_map *old_profile_map = GL(dl_profile_map); |
674 | |
675 | _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1); |
676 | |
677 | if (old_profile_map == NULL && GL(dl_profile_map) != NULL) |
678 | { |
679 | /* We must prepare the profiling. */ |
680 | _dl_start_profile (); |
681 | |
682 | /* Prevent unloading the object. */ |
683 | GL(dl_profile_map)->l_nodelete_active = true; |
684 | } |
685 | } |
686 | else |
687 | #endif |
688 | _dl_relocate_object (l, l->l_scope, reloc_mode, 0); |
689 | } |
690 | |
691 | /* This only performs the memory allocations. The actual update of |
692 | the scopes happens below, after failure is impossible. */ |
693 | resize_scopes (new); |
694 | |
695 | /* Increase the size of the GL (dl_tls_dtv_slotinfo_list) data |
696 | structure. */ |
697 | bool any_tls = resize_tls_slotinfo (new); |
698 | |
699 | /* Perform the necessary allocations for adding new global objects |
700 | to the global scope below. */ |
701 | if (mode & RTLD_GLOBAL) |
702 | add_to_global_resize (new); |
703 | |
704 | /* Demarcation point: After this, no recoverable errors are allowed. |
705 | All memory allocations for new objects must have happened |
706 | before. */ |
707 | |
708 | /* Finalize the NODELETE status first. This comes before |
709 | update_scopes, so that lazy binding will not see pending NODELETE |
710 | state for newly loaded objects. There is a compiler barrier in |
711 | update_scopes which ensures that the changes from |
712 | activate_nodelete are visible before new objects show up in the |
713 | local scope. */ |
714 | activate_nodelete (new); |
715 | |
716 | /* Second stage after resize_scopes: Actually perform the scope |
717 | update. After this, dlsym and lazy binding can bind to new |
718 | objects. */ |
719 | update_scopes (new); |
720 | |
721 | /* FIXME: It is unclear whether the order here is correct. |
722 | Shouldn't new objects be made available for binding (and thus |
723 | execution) only after there TLS data has been set up fully? |
724 | Fixing bug 16134 will likely make this distinction less |
725 | important. */ |
726 | |
727 | /* Second stage after resize_tls_slotinfo: Update the slotinfo data |
728 | structures. */ |
729 | if (any_tls) |
730 | /* FIXME: This calls _dl_update_slotinfo, which aborts the process |
731 | on memory allocation failure. See bug 16134. */ |
732 | update_tls_slotinfo (new); |
733 | |
734 | /* Notify the debugger all new objects have been relocated. */ |
735 | if (relocation_in_progress) |
736 | LIBC_PROBE (reloc_complete, 3, args->nsid, r, new); |
737 | |
738 | #ifndef SHARED |
739 | DL_STATIC_INIT (new); |
740 | #endif |
741 | |
742 | /* Perform the necessary allocations for adding new global objects |
743 | to the global scope below, via add_to_global_update. */ |
744 | if (mode & RTLD_GLOBAL) |
745 | add_to_global_resize (new); |
746 | |
747 | /* Run the initializer functions of new objects. Temporarily |
748 | disable the exception handler, so that lazy binding failures are |
749 | fatal. */ |
750 | { |
751 | struct dl_init_args init_args = |
752 | { |
753 | .new = new, |
754 | .argc = args->argc, |
755 | .argv = args->argv, |
756 | .env = args->env |
757 | }; |
758 | _dl_catch_exception (NULL, call_dl_init, &init_args); |
759 | } |
760 | |
761 | /* Now we can make the new map available in the global scope. */ |
762 | if (mode & RTLD_GLOBAL) |
763 | add_to_global_update (new); |
764 | |
765 | #ifndef SHARED |
766 | /* We must be the static _dl_open in libc.a. A static program that |
767 | has loaded a dynamic object now has competition. */ |
768 | __libc_multiple_libcs = 1; |
769 | #endif |
770 | |
771 | /* Let the user know about the opencount. */ |
772 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
773 | _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n" , |
774 | new->l_name, new->l_ns, new->l_direct_opencount); |
775 | } |
776 | |
777 | void * |
778 | _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid, |
779 | int argc, char *argv[], char *env[]) |
780 | { |
781 | if ((mode & RTLD_BINDING_MASK) == 0) |
782 | /* One of the flags must be set. */ |
783 | _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()" )); |
784 | |
785 | /* Make sure we are alone. */ |
786 | __rtld_lock_lock_recursive (GL(dl_load_lock)); |
787 | |
788 | if (__glibc_unlikely (nsid == LM_ID_NEWLM)) |
789 | { |
790 | /* Find a new namespace. */ |
791 | for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid) |
792 | if (GL(dl_ns)[nsid]._ns_loaded == NULL) |
793 | break; |
794 | |
795 | if (__glibc_unlikely (nsid == DL_NNS)) |
796 | { |
797 | /* No more namespace available. */ |
798 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
799 | |
800 | _dl_signal_error (EINVAL, file, NULL, N_("\ |
801 | no more namespaces available for dlmopen()" )); |
802 | } |
803 | else if (nsid == GL(dl_nns)) |
804 | { |
805 | __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock); |
806 | ++GL(dl_nns); |
807 | } |
808 | |
809 | _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT; |
810 | } |
811 | /* Never allow loading a DSO in a namespace which is empty. Such |
812 | direct placements is only causing problems. Also don't allow |
813 | loading into a namespace used for auditing. */ |
814 | else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER) |
815 | && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns)) |
816 | /* This prevents the [NSID] index expressions from being |
817 | evaluated, so the compiler won't think that we are |
818 | accessing an invalid index here in the !SHARED case where |
819 | DL_NNS is 1 and so any NSID != 0 is invalid. */ |
820 | || DL_NNS == 1 |
821 | || GL(dl_ns)[nsid]._ns_nloaded == 0 |
822 | || GL(dl_ns)[nsid]._ns_loaded->l_auditing)) |
823 | _dl_signal_error (EINVAL, file, NULL, |
824 | N_("invalid target namespace in dlmopen()" )); |
825 | |
826 | struct dl_open_args args; |
827 | args.file = file; |
828 | args.mode = mode; |
829 | args.caller_dlopen = caller_dlopen; |
830 | args.map = NULL; |
831 | args.nsid = nsid; |
832 | args.argc = argc; |
833 | args.argv = argv; |
834 | args.env = env; |
835 | |
836 | struct dl_exception exception; |
837 | int errcode = _dl_catch_exception (&exception, dl_open_worker, &args); |
838 | |
839 | #if defined USE_LDCONFIG && !defined MAP_COPY |
840 | /* We must unmap the cache file. */ |
841 | _dl_unload_cache (); |
842 | #endif |
843 | |
844 | /* Do this for both the error and success cases. The old value has |
845 | only been determined if the namespace ID was assigned (i.e., it |
846 | is not __LM_ID_CALLER). In the success case, we actually may |
847 | have consumed more pending adds than planned (because the local |
848 | scopes overlap in case of a recursive dlopen, the inner dlopen |
849 | doing some of the globalization work of the outer dlopen), so the |
850 | old pending adds value is larger than absolutely necessary. |
851 | Since it is just a conservative upper bound, this is harmless. |
852 | The top-level dlopen call will restore the field to zero. */ |
853 | if (args.nsid >= 0) |
854 | GL (dl_ns)[args.nsid]._ns_global_scope_pending_adds |
855 | = args.original_global_scope_pending_adds; |
856 | |
857 | /* See if an error occurred during loading. */ |
858 | if (__glibc_unlikely (exception.errstring != NULL)) |
859 | { |
860 | /* Remove the object from memory. It may be in an inconsistent |
861 | state if relocation failed, for example. */ |
862 | if (args.map) |
863 | { |
864 | /* Maybe some of the modules which were loaded use TLS. |
865 | Since it will be removed in the following _dl_close call |
866 | we have to mark the dtv array as having gaps to fill the |
867 | holes. This is a pessimistic assumption which won't hurt |
868 | if not true. There is no need to do this when we are |
869 | loading the auditing DSOs since TLS has not yet been set |
870 | up. */ |
871 | if ((mode & __RTLD_AUDIT) == 0) |
872 | GL(dl_tls_dtv_gaps) = true; |
873 | |
874 | _dl_close_worker (args.map, true); |
875 | |
876 | /* All l_nodelete_pending objects should have been deleted |
877 | at this point, which is why it is not necessary to reset |
878 | the flag here. */ |
879 | } |
880 | |
881 | assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT); |
882 | |
883 | /* Release the lock. */ |
884 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
885 | |
886 | /* Reraise the error. */ |
887 | _dl_signal_exception (errcode, &exception, NULL); |
888 | } |
889 | |
890 | assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT); |
891 | |
892 | /* Release the lock. */ |
893 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
894 | |
895 | return args.map; |
896 | } |
897 | |
898 | |
899 | void |
900 | _dl_show_scope (struct link_map *l, int from) |
901 | { |
902 | _dl_debug_printf ("object=%s [%lu]\n" , |
903 | DSO_FILENAME (l->l_name), l->l_ns); |
904 | if (l->l_scope != NULL) |
905 | for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt) |
906 | { |
907 | _dl_debug_printf (" scope %u:" , scope_cnt); |
908 | |
909 | for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt) |
910 | if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name) |
911 | _dl_debug_printf_c (" %s" , |
912 | l->l_scope[scope_cnt]->r_list[cnt]->l_name); |
913 | else |
914 | _dl_debug_printf_c (" %s" , RTLD_PROGNAME); |
915 | |
916 | _dl_debug_printf_c ("\n" ); |
917 | } |
918 | else |
919 | _dl_debug_printf (" no scope\n" ); |
920 | _dl_debug_printf ("\n" ); |
921 | } |
922 | |