1/* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2018 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#include <assert.h>
20#include <dlfcn.h>
21#include <errno.h>
22#include <libintl.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>
26#include <unistd.h>
27#include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28#include <sys/param.h>
29#include <libc-lock.h>
30#include <ldsodefs.h>
31#include <caller.h>
32#include <sysdep-cancel.h>
33#include <tls.h>
34#include <stap-probe.h>
35#include <atomic.h>
36#include <libc-internal.h>
37
38#include <dl-dst.h>
39
40
41/* We must be careful not to leave us in an inconsistent state. Thus we
42 catch any error and re-raise it after cleaning up. */
43
44struct dl_open_args
45{
46 const char *file;
47 int mode;
48 /* This is the caller of the dlopen() function. */
49 const void *caller_dlopen;
50 /* This is the caller of _dl_open(). */
51 const void *caller_dl_open;
52 struct link_map *map;
53 /* Namespace ID. */
54 Lmid_t nsid;
55 /* Original parameters to the program and the current environment. */
56 int argc;
57 char **argv;
58 char **env;
59};
60
61
62static int
63add_to_global (struct link_map *new)
64{
65 struct link_map **new_global;
66 unsigned int to_add = 0;
67 unsigned int cnt;
68
69 /* Count the objects we have to put in the global scope. */
70 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
71 if (new->l_searchlist.r_list[cnt]->l_global == 0)
72 ++to_add;
73
74 /* The symbols of the new objects and its dependencies are to be
75 introduced into the global scope that will be used to resolve
76 references from other dynamically-loaded objects.
77
78 The global scope is the searchlist in the main link map. We
79 extend this list if necessary. There is one problem though:
80 since this structure was allocated very early (before the libc
81 is loaded) the memory it uses is allocated by the malloc()-stub
82 in the ld.so. When we come here these functions are not used
83 anymore. Instead the malloc() implementation of the libc is
84 used. But this means the block from the main map cannot be used
85 in an realloc() call. Therefore we allocate a completely new
86 array the first time we have to add something to the locale scope. */
87
88 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
89 if (ns->_ns_global_scope_alloc == 0)
90 {
91 /* This is the first dynamic object given global scope. */
92 ns->_ns_global_scope_alloc
93 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
94 new_global = (struct link_map **)
95 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
96 if (new_global == NULL)
97 {
98 ns->_ns_global_scope_alloc = 0;
99 nomem:
100 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
101 N_("cannot extend global scope"));
102 return 1;
103 }
104
105 /* Copy over the old entries. */
106 ns->_ns_main_searchlist->r_list
107 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
108 (ns->_ns_main_searchlist->r_nlist
109 * sizeof (struct link_map *)));
110 }
111 else if (ns->_ns_main_searchlist->r_nlist + to_add
112 > ns->_ns_global_scope_alloc)
113 {
114 /* We have to extend the existing array of link maps in the
115 main map. */
116 struct link_map **old_global
117 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
118 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
119
120 new_global = (struct link_map **)
121 malloc (new_nalloc * sizeof (struct link_map *));
122 if (new_global == NULL)
123 goto nomem;
124
125 memcpy (new_global, old_global,
126 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
127
128 ns->_ns_global_scope_alloc = new_nalloc;
129 ns->_ns_main_searchlist->r_list = new_global;
130
131 if (!RTLD_SINGLE_THREAD_P)
132 THREAD_GSCOPE_WAIT ();
133
134 free (old_global);
135 }
136
137 /* Now add the new entries. */
138 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
139 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
140 {
141 struct link_map *map = new->l_searchlist.r_list[cnt];
142
143 if (map->l_global == 0)
144 {
145 map->l_global = 1;
146 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
147
148 /* We modify the global scope. Report this. */
149 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
150 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
151 map->l_name, map->l_ns);
152 }
153 }
154 atomic_write_barrier ();
155 ns->_ns_main_searchlist->r_nlist = new_nlist;
156
157 return 0;
158}
159
160/* Search link maps in all namespaces for the DSO that contains the object at
161 address ADDR. Returns the pointer to the link map of the matching DSO, or
162 NULL if a match is not found. */
163struct link_map *
164_dl_find_dso_for_object (const ElfW(Addr) addr)
165{
166 struct link_map *l;
167
168 /* Find the highest-addressed object that ADDR is not below. */
169 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
170 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
171 if (addr >= l->l_map_start && addr < l->l_map_end
172 && (l->l_contiguous
173 || _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
174 {
175 assert (ns == l->l_ns);
176 return l;
177 }
178 return NULL;
179}
180rtld_hidden_def (_dl_find_dso_for_object);
181
182static void
183dl_open_worker (void *a)
184{
185 struct dl_open_args *args = a;
186 const char *file = args->file;
187 int mode = args->mode;
188 struct link_map *call_map = NULL;
189
190 /* Check whether _dl_open() has been called from a valid DSO. */
191 if (__check_caller (args->caller_dl_open,
192 allow_libc|allow_libdl|allow_ldso) != 0)
193 _dl_signal_error (0, "dlopen", NULL, N_("invalid caller"));
194
195 /* Determine the caller's map if necessary. This is needed in case
196 we have a DST, when we don't know the namespace ID we have to put
197 the new object in, or when the file name has no path in which
198 case we need to look along the RUNPATH/RPATH of the caller. */
199 const char *dst = strchr (file, '$');
200 if (dst != NULL || args->nsid == __LM_ID_CALLER
201 || strchr (file, '/') == NULL)
202 {
203 const void *caller_dlopen = args->caller_dlopen;
204
205 /* We have to find out from which object the caller is calling.
206 By default we assume this is the main application. */
207 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
208
209 struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
210
211 if (l)
212 call_map = l;
213
214 if (args->nsid == __LM_ID_CALLER)
215 args->nsid = call_map->l_ns;
216 }
217
218 /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
219 may not be true if this is a recursive call to dlopen. */
220 _dl_debug_initialize (0, args->nsid);
221
222 /* Load the named object. */
223 struct link_map *new;
224 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
225 mode | __RTLD_CALLMAP, args->nsid);
226
227 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
228 set and the object is not already loaded. */
229 if (new == NULL)
230 {
231 assert (mode & RTLD_NOLOAD);
232 return;
233 }
234
235 /* Mark the object as not deletable if the RTLD_NODELETE flags was passed.
236 Do this early so that we don't skip marking the object if it was
237 already loaded. */
238 if (__glibc_unlikely (mode & RTLD_NODELETE))
239 new->l_flags_1 |= DF_1_NODELETE;
240
241 if (__glibc_unlikely (mode & __RTLD_SPROF))
242 /* This happens only if we load a DSO for 'sprof'. */
243 return;
244
245 /* This object is directly loaded. */
246 ++new->l_direct_opencount;
247
248 /* It was already open. */
249 if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
250 {
251 /* Let the user know about the opencount. */
252 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
253 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
254 new->l_name, new->l_ns, new->l_direct_opencount);
255
256 /* If the user requested the object to be in the global namespace
257 but it is not so far, add it now. */
258 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
259 (void) add_to_global (new);
260
261 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
262
263 return;
264 }
265
266 /* Load that object's dependencies. */
267 _dl_map_object_deps (new, NULL, 0, 0,
268 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
269
270 /* So far, so good. Now check the versions. */
271 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
272 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
273 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
274 0, 0);
275
276#ifdef SHARED
277 /* Auditing checkpoint: we have added all objects. */
278 if (__glibc_unlikely (GLRO(dl_naudit) > 0))
279 {
280 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
281 /* Do not call the functions for any auditing object. */
282 if (head->l_auditing == 0)
283 {
284 struct audit_ifaces *afct = GLRO(dl_audit);
285 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
286 {
287 if (afct->activity != NULL)
288 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
289
290 afct = afct->next;
291 }
292 }
293 }
294#endif
295
296 /* Notify the debugger all new objects are now ready to go. */
297 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
298 r->r_state = RT_CONSISTENT;
299 _dl_debug_state ();
300 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
301
302 /* Print scope information. */
303 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
304 _dl_show_scope (new, 0);
305
306 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
307 int reloc_mode = mode & __RTLD_AUDIT;
308 if (GLRO(dl_lazy))
309 reloc_mode |= mode & RTLD_LAZY;
310
311 /* Sort the objects by dependency for the relocation process. This
312 allows IFUNC relocations to work and it also means copy
313 relocation of dependencies are if necessary overwritten. */
314 unsigned int nmaps = 0;
315 struct link_map *l = new;
316 do
317 {
318 if (! l->l_real->l_relocated)
319 ++nmaps;
320 l = l->l_next;
321 }
322 while (l != NULL);
323 struct link_map *maps[nmaps];
324 nmaps = 0;
325 l = new;
326 do
327 {
328 if (! l->l_real->l_relocated)
329 maps[nmaps++] = l;
330 l = l->l_next;
331 }
332 while (l != NULL);
333 _dl_sort_maps (maps, nmaps, NULL, false);
334
335 int relocation_in_progress = 0;
336
337 for (unsigned int i = nmaps; i-- > 0; )
338 {
339 l = maps[i];
340
341 if (! relocation_in_progress)
342 {
343 /* Notify the debugger that relocations are about to happen. */
344 LIBC_PROBE (reloc_start, 2, args->nsid, r);
345 relocation_in_progress = 1;
346 }
347
348#ifdef SHARED
349 if (__glibc_unlikely (GLRO(dl_profile) != NULL))
350 {
351 /* If this here is the shared object which we want to profile
352 make sure the profile is started. We can find out whether
353 this is necessary or not by observing the `_dl_profile_map'
354 variable. If it was NULL but is not NULL afterwards we must
355 start the profiling. */
356 struct link_map *old_profile_map = GL(dl_profile_map);
357
358 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
359
360 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
361 {
362 /* We must prepare the profiling. */
363 _dl_start_profile ();
364
365 /* Prevent unloading the object. */
366 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
367 }
368 }
369 else
370#endif
371 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
372 }
373
374 /* If the file is not loaded now as a dependency, add the search
375 list of the newly loaded object to the scope. */
376 bool any_tls = false;
377 unsigned int first_static_tls = new->l_searchlist.r_nlist;
378 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
379 {
380 struct link_map *imap = new->l_searchlist.r_list[i];
381 int from_scope = 0;
382
383 /* If the initializer has been called already, the object has
384 not been loaded here and now. */
385 if (imap->l_init_called && imap->l_type == lt_loaded)
386 {
387 struct r_scope_elem **runp = imap->l_scope;
388 size_t cnt = 0;
389
390 while (*runp != NULL)
391 {
392 if (*runp == &new->l_searchlist)
393 break;
394 ++cnt;
395 ++runp;
396 }
397
398 if (*runp != NULL)
399 /* Avoid duplicates. */
400 continue;
401
402 if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
403 {
404 /* The 'r_scope' array is too small. Allocate a new one
405 dynamically. */
406 size_t new_size;
407 struct r_scope_elem **newp;
408
409#define SCOPE_ELEMS(imap) \
410 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
411
412 if (imap->l_scope != imap->l_scope_mem
413 && imap->l_scope_max < SCOPE_ELEMS (imap))
414 {
415 new_size = SCOPE_ELEMS (imap);
416 newp = imap->l_scope_mem;
417 }
418 else
419 {
420 new_size = imap->l_scope_max * 2;
421 newp = (struct r_scope_elem **)
422 malloc (new_size * sizeof (struct r_scope_elem *));
423 if (newp == NULL)
424 _dl_signal_error (ENOMEM, "dlopen", NULL,
425 N_("cannot create scope list"));
426 }
427
428 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
429 struct r_scope_elem **old = imap->l_scope;
430
431 imap->l_scope = newp;
432
433 if (old != imap->l_scope_mem)
434 _dl_scope_free (old);
435
436 imap->l_scope_max = new_size;
437 }
438
439 /* First terminate the extended list. Otherwise a thread
440 might use the new last element and then use the garbage
441 at offset IDX+1. */
442 imap->l_scope[cnt + 1] = NULL;
443 atomic_write_barrier ();
444 imap->l_scope[cnt] = &new->l_searchlist;
445
446 /* Print only new scope information. */
447 from_scope = cnt;
448 }
449 /* Only add TLS memory if this object is loaded now and
450 therefore is not yet initialized. */
451 else if (! imap->l_init_called
452 /* Only if the module defines thread local data. */
453 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
454 {
455 /* Now that we know the object is loaded successfully add
456 modules containing TLS data to the slot info table. We
457 might have to increase its size. */
458 _dl_add_to_slotinfo (imap);
459
460 if (imap->l_need_tls_init
461 && first_static_tls == new->l_searchlist.r_nlist)
462 first_static_tls = i;
463
464 /* We have to bump the generation counter. */
465 any_tls = true;
466 }
467
468 /* Print scope information. */
469 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
470 _dl_show_scope (imap, from_scope);
471 }
472
473 /* Bump the generation number if necessary. */
474 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
475 _dl_fatal_printf (N_("\
476TLS generation counter wrapped! Please report this."));
477
478 /* We need a second pass for static tls data, because _dl_update_slotinfo
479 must not be run while calls to _dl_add_to_slotinfo are still pending. */
480 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
481 {
482 struct link_map *imap = new->l_searchlist.r_list[i];
483
484 if (imap->l_need_tls_init
485 && ! imap->l_init_called
486 && imap->l_tls_blocksize > 0)
487 {
488 /* For static TLS we have to allocate the memory here and
489 now, but we can delay updating the DTV. */
490 imap->l_need_tls_init = 0;
491#ifdef SHARED
492 /* Update the slot information data for at least the
493 generation of the DSO we are allocating data for. */
494 _dl_update_slotinfo (imap->l_tls_modid);
495#endif
496
497 GL(dl_init_static_tls) (imap);
498 assert (imap->l_need_tls_init == 0);
499 }
500 }
501
502 /* Notify the debugger all new objects have been relocated. */
503 if (relocation_in_progress)
504 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
505
506#ifndef SHARED
507 DL_STATIC_INIT (new);
508#endif
509
510 /* Run the initializer functions of new objects. */
511 _dl_init (new, args->argc, args->argv, args->env);
512
513 /* Now we can make the new map available in the global scope. */
514 if (mode & RTLD_GLOBAL)
515 /* Move the object in the global namespace. */
516 if (add_to_global (new) != 0)
517 /* It failed. */
518 return;
519
520#ifndef SHARED
521 /* We must be the static _dl_open in libc.a. A static program that
522 has loaded a dynamic object now has competition. */
523 __libc_multiple_libcs = 1;
524#endif
525
526 /* Let the user know about the opencount. */
527 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
528 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
529 new->l_name, new->l_ns, new->l_direct_opencount);
530}
531
532
533void *
534_dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
535 int argc, char *argv[], char *env[])
536{
537 if ((mode & RTLD_BINDING_MASK) == 0)
538 /* One of the flags must be set. */
539 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
540
541 /* Make sure we are alone. */
542 __rtld_lock_lock_recursive (GL(dl_load_lock));
543
544 if (__glibc_unlikely (nsid == LM_ID_NEWLM))
545 {
546 /* Find a new namespace. */
547 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
548 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
549 break;
550
551 if (__glibc_unlikely (nsid == DL_NNS))
552 {
553 /* No more namespace available. */
554 __rtld_lock_unlock_recursive (GL(dl_load_lock));
555
556 _dl_signal_error (EINVAL, file, NULL, N_("\
557no more namespaces available for dlmopen()"));
558 }
559 else if (nsid == GL(dl_nns))
560 {
561 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
562 ++GL(dl_nns);
563 }
564
565 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
566 }
567 /* Never allow loading a DSO in a namespace which is empty. Such
568 direct placements is only causing problems. Also don't allow
569 loading into a namespace used for auditing. */
570 else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER)
571 && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns))
572 /* This prevents the [NSID] index expressions from being
573 evaluated, so the compiler won't think that we are
574 accessing an invalid index here in the !SHARED case where
575 DL_NNS is 1 and so any NSID != 0 is invalid. */
576 || DL_NNS == 1
577 || GL(dl_ns)[nsid]._ns_nloaded == 0
578 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
579 _dl_signal_error (EINVAL, file, NULL,
580 N_("invalid target namespace in dlmopen()"));
581
582 struct dl_open_args args;
583 args.file = file;
584 args.mode = mode;
585 args.caller_dlopen = caller_dlopen;
586 args.caller_dl_open = RETURN_ADDRESS (0);
587 args.map = NULL;
588 args.nsid = nsid;
589 args.argc = argc;
590 args.argv = argv;
591 args.env = env;
592
593 struct dl_exception exception;
594 int errcode = _dl_catch_exception (&exception, dl_open_worker, &args);
595
596#if defined USE_LDCONFIG && !defined MAP_COPY
597 /* We must unmap the cache file. */
598 _dl_unload_cache ();
599#endif
600
601 /* See if an error occurred during loading. */
602 if (__glibc_unlikely (exception.errstring != NULL))
603 {
604 /* Remove the object from memory. It may be in an inconsistent
605 state if relocation failed, for example. */
606 if (args.map)
607 {
608 /* Maybe some of the modules which were loaded use TLS.
609 Since it will be removed in the following _dl_close call
610 we have to mark the dtv array as having gaps to fill the
611 holes. This is a pessimistic assumption which won't hurt
612 if not true. There is no need to do this when we are
613 loading the auditing DSOs since TLS has not yet been set
614 up. */
615 if ((mode & __RTLD_AUDIT) == 0)
616 GL(dl_tls_dtv_gaps) = true;
617
618 _dl_close_worker (args.map, true);
619 }
620
621 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
622
623 /* Release the lock. */
624 __rtld_lock_unlock_recursive (GL(dl_load_lock));
625
626 /* Reraise the error. */
627 _dl_signal_exception (errcode, &exception, NULL);
628 }
629
630 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
631
632 /* Release the lock. */
633 __rtld_lock_unlock_recursive (GL(dl_load_lock));
634
635 return args.map;
636}
637
638
639void
640_dl_show_scope (struct link_map *l, int from)
641{
642 _dl_debug_printf ("object=%s [%lu]\n",
643 DSO_FILENAME (l->l_name), l->l_ns);
644 if (l->l_scope != NULL)
645 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
646 {
647 _dl_debug_printf (" scope %u:", scope_cnt);
648
649 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
650 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
651 _dl_debug_printf_c (" %s",
652 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
653 else
654 _dl_debug_printf_c (" %s", RTLD_PROGNAME);
655
656 _dl_debug_printf_c ("\n");
657 }
658 else
659 _dl_debug_printf (" no scope\n");
660 _dl_debug_printf ("\n");
661}
662