1/* Inline functions for dynamic linking.
2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19/* This macro is used as a callback from elf_machine_rel{a,} when a
20 static TLS reloc is about to be performed. Since (in dl-load.c) we
21 permit dynamic loading of objects that might use such relocs, we
22 have to check whether each use is actually doable. If the object
23 whose TLS segment the reference resolves to was allocated space in
24 the static TLS block at startup, then it's ok. Otherwise, we make
25 an attempt to allocate it in surplus space on the fly. If that
26 can't be done, we fall back to the error that DF_STATIC_TLS is
27 intended to produce. */
28#define HAVE_STATIC_TLS(map, sym_map) \
29 (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET \
30 && ((sym_map)->l_tls_offset \
31 != FORCED_DYNAMIC_TLS_OFFSET), 1))
32
33#define CHECK_STATIC_TLS(map, sym_map) \
34 do { \
35 if (!HAVE_STATIC_TLS (map, sym_map)) \
36 _dl_allocate_static_tls (sym_map); \
37 } while (0)
38
39#define TRY_STATIC_TLS(map, sym_map) \
40 (__builtin_expect ((sym_map)->l_tls_offset \
41 != FORCED_DYNAMIC_TLS_OFFSET, 1) \
42 && (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET, 1) \
43 || _dl_try_allocate_static_tls (sym_map) == 0))
44
45int _dl_try_allocate_static_tls (struct link_map *map) attribute_hidden;
46
47#include <elf.h>
48
49#ifdef RESOLVE_MAP
50/* We pass reloc_addr as a pointer to void, as opposed to a pointer to
51 ElfW(Addr), because not all architectures can assume that the
52 relocated address is properly aligned, whereas the compiler is
53 entitled to assume that a pointer to a type is properly aligned for
54 the type. Even if we cast the pointer back to some other type with
55 less strict alignment requirements, the compiler might still
56 remember that the pointer was originally more aligned, thereby
57 optimizing away alignment tests or using word instructions for
58 copying memory, breaking the very code written to handle the
59 unaligned cases. */
60# if ! ELF_MACHINE_NO_REL
61auto inline void __attribute__((always_inline))
62elf_machine_rel (struct link_map *map, const ElfW(Rel) *reloc,
63 const ElfW(Sym) *sym, const struct r_found_version *version,
64 void *const reloc_addr, int skip_ifunc);
65auto inline void __attribute__((always_inline))
66elf_machine_rel_relative (ElfW(Addr) l_addr, const ElfW(Rel) *reloc,
67 void *const reloc_addr);
68# endif
69# if ! ELF_MACHINE_NO_RELA
70auto inline void __attribute__((always_inline))
71elf_machine_rela (struct link_map *map, const ElfW(Rela) *reloc,
72 const ElfW(Sym) *sym, const struct r_found_version *version,
73 void *const reloc_addr, int skip_ifunc);
74auto inline void __attribute__((always_inline))
75elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
76 void *const reloc_addr);
77# endif
78# if ELF_MACHINE_NO_RELA || defined ELF_MACHINE_PLT_REL
79auto inline void __attribute__((always_inline))
80elf_machine_lazy_rel (struct link_map *map,
81 ElfW(Addr) l_addr, const ElfW(Rel) *reloc,
82 int skip_ifunc);
83# else
84auto inline void __attribute__((always_inline))
85elf_machine_lazy_rel (struct link_map *map,
86 ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
87 int skip_ifunc);
88# endif
89#endif
90
91#include <dl-machine.h>
92
93#include "get-dynamic-info.h"
94
95#ifdef RESOLVE_MAP
96
97# if defined RTLD_BOOTSTRAP || defined STATIC_PIE_BOOTSTRAP
98# define ELF_DURING_STARTUP (1)
99# else
100# define ELF_DURING_STARTUP (0)
101# endif
102
103/* Get the definitions of `elf_dynamic_do_rel' and `elf_dynamic_do_rela'.
104 These functions are almost identical, so we use cpp magic to avoid
105 duplicating their code. It cannot be done in a more general function
106 because we must be able to completely inline. */
107
108/* On some machines, notably SPARC, DT_REL* includes DT_JMPREL in its
109 range. Note that according to the ELF spec, this is completely legal!
110
111 We are guarenteed that we have one of three situations. Either DT_JMPREL
112 comes immediately after DT_REL*, or there is overlap and DT_JMPREL
113 consumes precisely the very end of the DT_REL*, or DT_JMPREL and DT_REL*
114 are completely separate and there is a gap between them. */
115
116# define _ELF_DYNAMIC_DO_RELOC(RELOC, reloc, map, do_lazy, skip_ifunc, test_rel) \
117 do { \
118 struct { ElfW(Addr) start, size; \
119 __typeof (((ElfW(Dyn) *) 0)->d_un.d_val) nrelative; int lazy; } \
120 ranges[2] = { { 0, 0, 0, 0 }, { 0, 0, 0, 0 } }; \
121 \
122 if ((map)->l_info[DT_##RELOC]) \
123 { \
124 ranges[0].start = D_PTR ((map), l_info[DT_##RELOC]); \
125 ranges[0].size = (map)->l_info[DT_##RELOC##SZ]->d_un.d_val; \
126 if (map->l_info[VERSYMIDX (DT_##RELOC##COUNT)] != NULL) \
127 ranges[0].nrelative \
128 = map->l_info[VERSYMIDX (DT_##RELOC##COUNT)]->d_un.d_val; \
129 } \
130 if ((map)->l_info[DT_PLTREL] \
131 && (!test_rel || (map)->l_info[DT_PLTREL]->d_un.d_val == DT_##RELOC)) \
132 { \
133 ElfW(Addr) start = D_PTR ((map), l_info[DT_JMPREL]); \
134 ElfW(Addr) size = (map)->l_info[DT_PLTRELSZ]->d_un.d_val; \
135 \
136 if (ranges[0].start + ranges[0].size == (start + size)) \
137 ranges[0].size -= size; \
138 if (ELF_DURING_STARTUP \
139 || (!(do_lazy) \
140 && (ranges[0].start + ranges[0].size) == start)) \
141 { \
142 /* Combine processing the sections. */ \
143 ranges[0].size += size; \
144 } \
145 else \
146 { \
147 ranges[1].start = start; \
148 ranges[1].size = size; \
149 ranges[1].lazy = (do_lazy); \
150 } \
151 } \
152 \
153 if (ELF_DURING_STARTUP) \
154 elf_dynamic_do_##reloc ((map), ranges[0].start, ranges[0].size, \
155 ranges[0].nrelative, 0, skip_ifunc); \
156 else \
157 { \
158 int ranges_index; \
159 for (ranges_index = 0; ranges_index < 2; ++ranges_index) \
160 elf_dynamic_do_##reloc ((map), \
161 ranges[ranges_index].start, \
162 ranges[ranges_index].size, \
163 ranges[ranges_index].nrelative, \
164 ranges[ranges_index].lazy, \
165 skip_ifunc); \
166 } \
167 } while (0)
168
169# if ELF_MACHINE_NO_REL || ELF_MACHINE_NO_RELA
170# define _ELF_CHECK_REL 0
171# else
172# define _ELF_CHECK_REL 1
173# endif
174
175# if ! ELF_MACHINE_NO_REL
176# include "do-rel.h"
177# define ELF_DYNAMIC_DO_REL(map, lazy, skip_ifunc) \
178 _ELF_DYNAMIC_DO_RELOC (REL, Rel, map, lazy, skip_ifunc, _ELF_CHECK_REL)
179# else
180# define ELF_DYNAMIC_DO_REL(map, lazy, skip_ifunc) /* Nothing to do. */
181# endif
182
183# if ! ELF_MACHINE_NO_RELA
184# define DO_RELA
185# include "do-rel.h"
186# define ELF_DYNAMIC_DO_RELA(map, lazy, skip_ifunc) \
187 _ELF_DYNAMIC_DO_RELOC (RELA, Rela, map, lazy, skip_ifunc, _ELF_CHECK_REL)
188# else
189# define ELF_DYNAMIC_DO_RELA(map, lazy, skip_ifunc) /* Nothing to do. */
190# endif
191
192/* This can't just be an inline function because GCC is too dumb
193 to inline functions containing inlines themselves. */
194# define ELF_DYNAMIC_RELOCATE(map, lazy, consider_profile, skip_ifunc) \
195 do { \
196 int edr_lazy = elf_machine_runtime_setup ((map), (lazy), \
197 (consider_profile)); \
198 ELF_DYNAMIC_DO_REL ((map), edr_lazy, skip_ifunc); \
199 ELF_DYNAMIC_DO_RELA ((map), edr_lazy, skip_ifunc); \
200 } while (0)
201
202#endif
203