1/* Save current context and install the given one.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Andreas Jaeger <aj@suse.de>, 2002.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <https://www.gnu.org/licenses/>. */
19
20#include <sysdep.h>
21#include <asm/prctl.h>
22
23#include "ucontext_i.h"
24
25
26/* int __swapcontext (ucontext_t *oucp, const ucontext_t *ucp);
27
28 Saves the machine context in oucp such that when it is activated,
29 it appears as if __swapcontextt() returned again, restores the
30 machine context in ucp and thereby resumes execution in that
31 context.
32
33 This implementation is intended to be used for *synchronous* context
34 switches only. Therefore, it does not have to save anything
35 other than the PRESERVED state. */
36
37ENTRY(__swapcontext)
38 /* Save the preserved registers, the registers used for passing args,
39 and the return address. */
40 movq %rbx, oRBX(%rdi)
41 movq %rbp, oRBP(%rdi)
42 movq %r12, oR12(%rdi)
43 movq %r13, oR13(%rdi)
44 movq %r14, oR14(%rdi)
45 movq %r15, oR15(%rdi)
46
47 movq %rdi, oRDI(%rdi)
48 movq %rsi, oRSI(%rdi)
49 movq %rdx, oRDX(%rdi)
50 movq %rcx, oRCX(%rdi)
51 movq %r8, oR8(%rdi)
52 movq %r9, oR9(%rdi)
53
54 movq (%rsp), %rcx
55 movq %rcx, oRIP(%rdi)
56 leaq 8(%rsp), %rcx /* Exclude the return address. */
57 movq %rcx, oRSP(%rdi)
58
59 /* We have separate floating-point register content memory on the
60 stack. We use the __fpregs_mem block in the context. Set the
61 links up correctly. */
62 leaq oFPREGSMEM(%rdi), %rcx
63 movq %rcx, oFPREGS(%rdi)
64 /* Save the floating-point environment. */
65 fnstenv (%rcx)
66 stmxcsr oMXCSR(%rdi)
67
68
69 /* The syscall destroys some registers, save them. */
70 movq %rsi, %r12
71 movq %rdi, %r9
72
73 /* Save the current signal mask and install the new one with
74 rt_sigprocmask (SIG_BLOCK, newset, oldset,_NSIG/8). */
75 leaq oSIGMASK(%rdi), %rdx
76 leaq oSIGMASK(%rsi), %rsi
77 movl $SIG_SETMASK, %edi
78 movl $_NSIG8,%r10d
79 movl $__NR_rt_sigprocmask, %eax
80 syscall
81 cmpq $-4095, %rax /* Check %rax for error. */
82 jae SYSCALL_ERROR_LABEL /* Jump to error handler if error. */
83
84 /* Restore destroyed register into RDX. The choice is arbitrary,
85 but leaving RDI and RSI available for use later can avoid
86 shuffling values. */
87 movq %r12, %rdx
88
89 /* Restore the floating-point context. Not the registers, only the
90 rest. */
91 movq oFPREGS(%rdx), %rcx
92 fldenv (%rcx)
93 ldmxcsr oMXCSR(%rdx)
94
95 /* Load the new stack pointer and the preserved registers. */
96 movq oRSP(%rdx), %rsp
97 movq oRBX(%rdx), %rbx
98 movq oRBP(%rdx), %rbp
99 movq oR12(%rdx), %r12
100 movq oR13(%rdx), %r13
101 movq oR14(%rdx), %r14
102 movq oR15(%rdx), %r15
103
104#if SHSTK_ENABLED
105 /* Check if shadow stack is enabled. */
106 testl $X86_FEATURE_1_SHSTK, %fs:FEATURE_1_OFFSET
107 jz L(no_shstk)
108
109 xorl %eax, %eax
110 cmpq %fs:SSP_BASE_OFFSET, %rax
111 jnz L(shadow_stack_bound_recorded)
112
113 /* Get the base address and size of the default shadow stack
114 which must be the current shadow stack since nothing has
115 been recorded yet. */
116 sub $24, %RSP_LP
117 mov %RSP_LP, %RSI_LP
118 movl $ARCH_CET_STATUS, %edi
119 movl $__NR_arch_prctl, %eax
120 syscall
121 testq %rax, %rax
122 jz L(continue_no_err)
123
124 /* This should never happen. */
125 hlt
126
127L(continue_no_err):
128 /* Record the base of the current shadow stack. */
129 movq 8(%rsp), %rax
130 movq %rax, %fs:SSP_BASE_OFFSET
131 add $24, %RSP_LP
132
133L(shadow_stack_bound_recorded):
134 /* If we unwind the stack, we can't undo stack unwinding. Just
135 save the target shadow stack pointer as the current shadow
136 stack pointer. */
137 movq oSSP(%rdx), %rcx
138 movq %rcx, oSSP(%r9)
139
140 /* Save the base of the current shadow stack. */
141 movq %fs:SSP_BASE_OFFSET, %rax
142 movq %rax, (oSSP + 8)(%r9)
143
144 /* If the base of the target shadow stack is the same as the
145 base of the current shadow stack, we unwind the shadow
146 stack. Otherwise it is a stack switch and we look for a
147 restore token. */
148 movq oSSP(%rdx), %rsi
149 movq %rsi, %rdi
150
151 /* Get the base of the target shadow stack. */
152 movq (oSSP + 8)(%rdx), %rcx
153 cmpq %fs:SSP_BASE_OFFSET, %rcx
154 je L(unwind_shadow_stack)
155
156L(find_restore_token_loop):
157 /* Look for a restore token. */
158 movq -8(%rsi), %rax
159 andq $-8, %rax
160 cmpq %rsi, %rax
161 je L(restore_shadow_stack)
162
163 /* Try the next slot. */
164 subq $8, %rsi
165 jmp L(find_restore_token_loop)
166
167L(restore_shadow_stack):
168 /* The target shadow stack will be restored. Save the current
169 shadow stack pointer. */
170 rdsspq %rcx
171 movq %rcx, oSSP(%r9)
172
173 /* Restore the target shadow stack. */
174 rstorssp -8(%rsi)
175
176 /* Save the restore token on the old shadow stack. NB: This
177 restore token may be checked by setcontext or swapcontext
178 later. */
179 saveprevssp
180
181 /* Record the new shadow stack base that was switched to. */
182 movq (oSSP + 8)(%rdx), %rax
183 movq %rax, %fs:SSP_BASE_OFFSET
184
185L(unwind_shadow_stack):
186 rdsspq %rcx
187 subq %rdi, %rcx
188 je L(skip_unwind_shadow_stack)
189 negq %rcx
190 shrq $3, %rcx
191 movl $255, %esi
192L(loop):
193 cmpq %rsi, %rcx
194 cmovb %rcx, %rsi
195 incsspq %rsi
196 subq %rsi, %rcx
197 ja L(loop)
198
199L(skip_unwind_shadow_stack):
200 /* Setup registers used for passing args. */
201 movq oRDI(%rdx), %rdi
202 movq oRSI(%rdx), %rsi
203 movq oRCX(%rdx), %rcx
204 movq oR8(%rdx), %r8
205 movq oR9(%rdx), %r9
206
207 /* Get the return address set with getcontext. */
208 movq oRIP(%rdx), %r10
209
210 /* Setup finally %rdx. */
211 movq oRDX(%rdx), %rdx
212
213 /* Check if return address is valid for the case when setcontext
214 is invoked from __start_context with linked context. */
215 rdsspq %rax
216 cmpq (%rax), %r10
217 /* Clear rax to indicate success. NB: Don't use xorl to keep
218 EFLAGS for jne. */
219 movl $0, %eax
220 jne L(jmp)
221 /* Return to the new context if return address valid. */
222 pushq %r10
223 ret
224
225L(jmp):
226 /* Jump to the new context directly. */
227 jmp *%r10
228
229L(no_shstk):
230#endif
231 /* The following ret should return to the address set with
232 getcontext. Therefore push the address on the stack. */
233 movq oRIP(%rdx), %rcx
234 pushq %rcx
235
236 /* Setup registers used for passing args. */
237 movq oRDI(%rdx), %rdi
238 movq oRSI(%rdx), %rsi
239 movq oRCX(%rdx), %rcx
240 movq oR8(%rdx), %r8
241 movq oR9(%rdx), %r9
242
243 /* Setup finally %rdx. */
244 movq oRDX(%rdx), %rdx
245
246 /* Clear rax to indicate success. */
247 xorl %eax, %eax
248 ret
249PSEUDO_END(__swapcontext)
250
251weak_alias (__swapcontext, swapcontext)
252