1/* Thread-local storage handling in the ELF dynamic linker. x86_64 version.
2 Copyright (C) 2004-2018 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#include <sysdep.h>
20#include <tls.h>
21#include "tlsdesc.h"
22
23 .text
24
25 /* This function is used to compute the TP offset for symbols in
26 Static TLS, i.e., whose TP offset is the same for all
27 threads.
28
29 The incoming %rax points to the TLS descriptor, such that
30 0(%rax) points to _dl_tlsdesc_return itself, and 8(%rax) holds
31 the TP offset of the symbol corresponding to the object
32 denoted by the argument. */
33
34 .hidden _dl_tlsdesc_return
35 .global _dl_tlsdesc_return
36 .type _dl_tlsdesc_return,@function
37 cfi_startproc
38 .align 16
39_dl_tlsdesc_return:
40 movq 8(%rax), %rax
41 ret
42 cfi_endproc
43 .size _dl_tlsdesc_return, .-_dl_tlsdesc_return
44
45 /* This function is used for undefined weak TLS symbols, for
46 which the base address (i.e., disregarding any addend) should
47 resolve to NULL.
48
49 %rax points to the TLS descriptor, such that 0(%rax) points to
50 _dl_tlsdesc_undefweak itself, and 8(%rax) holds the addend.
51 We return the addend minus the TP, such that, when the caller
52 adds TP, it gets the addend back. If that's zero, as usual,
53 that's most likely a NULL pointer. */
54
55 .hidden _dl_tlsdesc_undefweak
56 .global _dl_tlsdesc_undefweak
57 .type _dl_tlsdesc_undefweak,@function
58 cfi_startproc
59 .align 16
60_dl_tlsdesc_undefweak:
61 movq 8(%rax), %rax
62 subq %fs:0, %rax
63 ret
64 cfi_endproc
65 .size _dl_tlsdesc_undefweak, .-_dl_tlsdesc_undefweak
66
67#ifdef SHARED
68 .hidden _dl_tlsdesc_dynamic
69 .global _dl_tlsdesc_dynamic
70 .type _dl_tlsdesc_dynamic,@function
71
72 /* %rax points to the TLS descriptor, such that 0(%rax) points to
73 _dl_tlsdesc_dynamic itself, and 8(%rax) points to a struct
74 tlsdesc_dynamic_arg object. It must return in %rax the offset
75 between the thread pointer and the object denoted by the
76 argument, without clobbering any registers.
77
78 The assembly code that follows is a rendition of the following
79 C code, hand-optimized a little bit.
80
81ptrdiff_t
82_dl_tlsdesc_dynamic (register struct tlsdesc *tdp asm ("%rax"))
83{
84 struct tlsdesc_dynamic_arg *td = tdp->arg;
85 dtv_t *dtv = *(dtv_t **)((char *)__thread_pointer + DTV_OFFSET);
86 if (__builtin_expect (td->gen_count <= dtv[0].counter
87 && (dtv[td->tlsinfo.ti_module].pointer.val
88 != TLS_DTV_UNALLOCATED),
89 1))
90 return dtv[td->tlsinfo.ti_module].pointer.val + td->tlsinfo.ti_offset
91 - __thread_pointer;
92
93 return __tls_get_addr_internal (&td->tlsinfo) - __thread_pointer;
94}
95*/
96 cfi_startproc
97 .align 16
98_dl_tlsdesc_dynamic:
99 /* Preserve call-clobbered registers that we modify.
100 We need two scratch regs anyway. */
101 movq %rsi, -16(%rsp)
102 movq %fs:DTV_OFFSET, %rsi
103 movq %rdi, -8(%rsp)
104 movq TLSDESC_ARG(%rax), %rdi
105 movq (%rsi), %rax
106 cmpq %rax, TLSDESC_GEN_COUNT(%rdi)
107 ja .Lslow
108 movq TLSDESC_MODID(%rdi), %rax
109 salq $4, %rax
110 movq (%rax,%rsi), %rax
111 cmpq $-1, %rax
112 je .Lslow
113 addq TLSDESC_MODOFF(%rdi), %rax
114.Lret:
115 movq -16(%rsp), %rsi
116 subq %fs:0, %rax
117 movq -8(%rsp), %rdi
118 ret
119.Lslow:
120 /* Besides rdi and rsi, saved above, save rdx, rcx, r8, r9,
121 r10 and r11. Also, align the stack, that's off by 8 bytes. */
122 subq $72, %rsp
123 cfi_adjust_cfa_offset (72)
124 movq %rdx, 8(%rsp)
125 movq %rcx, 16(%rsp)
126 movq %r8, 24(%rsp)
127 movq %r9, 32(%rsp)
128 movq %r10, 40(%rsp)
129 movq %r11, 48(%rsp)
130 /* %rdi already points to the tlsinfo data structure. */
131#ifdef NO_RTLD_HIDDEN
132 call JUMPTARGET (__tls_get_addr)
133#else
134 call HIDDEN_JUMPTARGET (__tls_get_addr)
135#endif
136 movq 8(%rsp), %rdx
137 movq 16(%rsp), %rcx
138 movq 24(%rsp), %r8
139 movq 32(%rsp), %r9
140 movq 40(%rsp), %r10
141 movq 48(%rsp), %r11
142 addq $72, %rsp
143 cfi_adjust_cfa_offset (-72)
144 jmp .Lret
145 cfi_endproc
146 .size _dl_tlsdesc_dynamic, .-_dl_tlsdesc_dynamic
147#endif /* SHARED */
148
149 /* This function is a wrapper for a lazy resolver for TLS_DESC
150 RELA relocations. The incoming 0(%rsp) points to the caller's
151 link map, pushed by the dynamic object's internal lazy TLS
152 resolver front-end before tail-calling us. We need to pop it
153 ourselves. %rax points to a TLS descriptor, such that 0(%rax)
154 holds the address of the internal resolver front-end (unless
155 some other thread beat us to resolving it) and 8(%rax) holds a
156 pointer to the relocation.
157
158 When the actual resolver returns, it will have adjusted the
159 TLS descriptor such that we can tail-call it for it to return
160 the TP offset of the symbol. */
161
162 .hidden _dl_tlsdesc_resolve_rela
163 .global _dl_tlsdesc_resolve_rela
164 .type _dl_tlsdesc_resolve_rela,@function
165 cfi_startproc
166 .align 16
167 /* The PLT entry will have pushed the link_map pointer. */
168_dl_tlsdesc_resolve_rela:
169 cfi_adjust_cfa_offset (8)
170 /* Save all call-clobbered registers. Add 8 bytes for push in
171 the PLT entry to align the stack. */
172 subq $80, %rsp
173 cfi_adjust_cfa_offset (80)
174 movq %rax, (%rsp)
175 movq %rdi, 8(%rsp)
176 movq %rax, %rdi /* Pass tlsdesc* in %rdi. */
177 movq %rsi, 16(%rsp)
178 movq 80(%rsp), %rsi /* Pass link_map* in %rsi. */
179 movq %r8, 24(%rsp)
180 movq %r9, 32(%rsp)
181 movq %r10, 40(%rsp)
182 movq %r11, 48(%rsp)
183 movq %rdx, 56(%rsp)
184 movq %rcx, 64(%rsp)
185 call _dl_tlsdesc_resolve_rela_fixup
186 movq (%rsp), %rax
187 movq 8(%rsp), %rdi
188 movq 16(%rsp), %rsi
189 movq 24(%rsp), %r8
190 movq 32(%rsp), %r9
191 movq 40(%rsp), %r10
192 movq 48(%rsp), %r11
193 movq 56(%rsp), %rdx
194 movq 64(%rsp), %rcx
195 addq $88, %rsp
196 cfi_adjust_cfa_offset (-88)
197 jmp *(%rax)
198 cfi_endproc
199 .size _dl_tlsdesc_resolve_rela, .-_dl_tlsdesc_resolve_rela
200
201 /* This function is a placeholder for lazy resolving of TLS
202 relocations. Once some thread starts resolving a TLS
203 relocation, it sets up the TLS descriptor to use this
204 resolver, such that other threads that would attempt to
205 resolve it concurrently may skip the call to the original lazy
206 resolver and go straight to a condition wait.
207
208 When the actual resolver returns, it will have adjusted the
209 TLS descriptor such that we can tail-call it for it to return
210 the TP offset of the symbol. */
211
212 .hidden _dl_tlsdesc_resolve_hold
213 .global _dl_tlsdesc_resolve_hold
214 .type _dl_tlsdesc_resolve_hold,@function
215 cfi_startproc
216 .align 16
217_dl_tlsdesc_resolve_hold:
2180:
219 /* Save all call-clobbered registers. */
220 subq $72, %rsp
221 cfi_adjust_cfa_offset (72)
222 movq %rax, (%rsp)
223 movq %rdi, 8(%rsp)
224 movq %rax, %rdi /* Pass tlsdesc* in %rdi. */
225 movq %rsi, 16(%rsp)
226 /* Pass _dl_tlsdesc_resolve_hold's address in %rsi. */
227 leaq . - _dl_tlsdesc_resolve_hold(%rip), %rsi
228 movq %r8, 24(%rsp)
229 movq %r9, 32(%rsp)
230 movq %r10, 40(%rsp)
231 movq %r11, 48(%rsp)
232 movq %rdx, 56(%rsp)
233 movq %rcx, 64(%rsp)
234 call _dl_tlsdesc_resolve_hold_fixup
2351:
236 movq (%rsp), %rax
237 movq 8(%rsp), %rdi
238 movq 16(%rsp), %rsi
239 movq 24(%rsp), %r8
240 movq 32(%rsp), %r9
241 movq 40(%rsp), %r10
242 movq 48(%rsp), %r11
243 movq 56(%rsp), %rdx
244 movq 64(%rsp), %rcx
245 addq $72, %rsp
246 cfi_adjust_cfa_offset (-72)
247 jmp *(%rax)
248 cfi_endproc
249 .size _dl_tlsdesc_resolve_hold, .-_dl_tlsdesc_resolve_hold
250