1 | /* Function exp vectorized with AVX-512. KNL and SKX versions. |
2 | Copyright (C) 2014-2020 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <sysdep.h> |
20 | #include "svml_d_exp_data.h" |
21 | #include "svml_d_wrapper_impl.h" |
22 | |
23 | .text |
24 | ENTRY (_ZGVeN8v_exp_knl) |
25 | #ifndef HAVE_AVX512DQ_ASM_SUPPORT |
26 | WRAPPER_IMPL_AVX512 _ZGVdN4v_exp |
27 | #else |
28 | /* |
29 | ALGORITHM DESCRIPTION: |
30 | |
31 | Argument representation: |
32 | N = rint(X*2^k/ln2) = 2^k*M+j |
33 | X = N*ln2/2^k + r = M*ln2 + ln2*(j/2^k) + r |
34 | then -ln2/2^(k+1) < r < ln2/2^(k+1) |
35 | Alternatively: |
36 | N = trunc(X*2^k/ln2) |
37 | then 0 < r < ln2/2^k |
38 | |
39 | Result calculation: |
40 | exp(X) = exp(M*ln2 + ln2*(j/2^k) + r) |
41 | = 2^M * 2^(j/2^k) * exp(r) |
42 | 2^M is calculated by bit manipulation |
43 | 2^(j/2^k) is stored in table |
44 | exp(r) is approximated by polynomial |
45 | |
46 | The table lookup is skipped if k = 0. */ |
47 | |
48 | pushq %rbp |
49 | cfi_adjust_cfa_offset (8) |
50 | cfi_rel_offset (%rbp, 0) |
51 | movq %rsp, %rbp |
52 | cfi_def_cfa_register (%rbp) |
53 | andq $-64, %rsp |
54 | subq $1280, %rsp |
55 | movq __svml_dexp_data@GOTPCREL(%rip), %rax |
56 | |
57 | /* dR = X - dN*dbLn2hi, dbLn2hi is 52-8-k hi bits of ln2/2^k */ |
58 | vmovaps %zmm0, %zmm8 |
59 | |
60 | /* iAbsX = (int)(lX>>32), lX = *(longlong*)&X */ |
61 | vpsrlq $32, %zmm0, %zmm1 |
62 | |
63 | /* iAbsX = iAbsX&iAbsMask */ |
64 | movl $255, %edx |
65 | vpmovqd %zmm1, %ymm2 |
66 | kmovw %edx, %k2 |
67 | |
68 | /* iRangeMask = (iAbsX>iDomainRange) */ |
69 | movl $-1, %ecx |
70 | |
71 | /* table lookup for dT[j] = 2^(j/2^k) */ |
72 | vpxord %zmm11, %zmm11, %zmm11 |
73 | vmovups __dbInvLn2(%rax), %zmm5 |
74 | vmovups __dbLn2hi(%rax), %zmm7 |
75 | kxnorw %k3, %k3, %k3 |
76 | |
77 | /* dM = X*dbInvLn2+dbShifter, dbInvLn2 = 2^k/Ln2 */ |
78 | vfmadd213pd __dbShifter(%rax), %zmm0, %zmm5 |
79 | vmovups __dPC2(%rax), %zmm12 |
80 | |
81 | /* dN = dM-dbShifter, dN = rint(X*2^k/Ln2) */ |
82 | vsubpd __dbShifter(%rax), %zmm5, %zmm9 |
83 | vmovups __lIndexMask(%rax), %zmm4 |
84 | vfnmadd231pd %zmm9, %zmm7, %zmm8 |
85 | vpandd __iAbsMask(%rax), %zmm2, %zmm2{%k2} |
86 | |
87 | /* lIndex = (*(longlong*)&dM)&lIndexMask, lIndex is the lower K bits of lM */ |
88 | vpandq %zmm4, %zmm5, %zmm10 |
89 | vgatherqpd (%rax,%zmm10,8), %zmm11{%k3} |
90 | vpcmpgtd __iDomainRange(%rax), %zmm2, %k1{%k2} |
91 | |
92 | /* lM = (*(longlong*)&dM)&(~lIndexMask) */ |
93 | vpandnq %zmm5, %zmm4, %zmm6 |
94 | vpbroadcastd %ecx, %zmm3{%k1}{z} |
95 | |
96 | /* lM = lM<<(52-K), 2^M */ |
97 | vpsllq $42, %zmm6, %zmm14 |
98 | |
99 | /* dR = dR - dN*dbLn2lo, dbLn2lo is 40..94 bits of lo part of ln2/2^k */ |
100 | vfnmadd132pd __dbLn2lo(%rax), %zmm8, %zmm9 |
101 | |
102 | /* Mask = iRangeMask?1:0, set mask for overflow/underflow */ |
103 | vptestmd %zmm3, %zmm3, %k0{%k2} |
104 | |
105 | /* exp(r) = b0+r*(b0+r*(b1+r*b2)) */ |
106 | vfmadd213pd __dPC1(%rax), %zmm9, %zmm12 |
107 | kmovw %k0, %ecx |
108 | movzbl %cl, %ecx |
109 | vfmadd213pd __dPC0(%rax), %zmm9, %zmm12 |
110 | vfmadd213pd __dPC0(%rax), %zmm9, %zmm12 |
111 | |
112 | /* 2^(j/2^k) * exp(r) */ |
113 | vmulpd %zmm12, %zmm11, %zmm13 |
114 | |
115 | /* multiply by 2^M through integer add */ |
116 | vpaddq %zmm14, %zmm13, %zmm1 |
117 | testl %ecx, %ecx |
118 | jne .LBL_1_3 |
119 | |
120 | .LBL_1_2: |
121 | cfi_remember_state |
122 | vmovaps %zmm1, %zmm0 |
123 | movq %rbp, %rsp |
124 | cfi_def_cfa_register (%rsp) |
125 | popq %rbp |
126 | cfi_adjust_cfa_offset (-8) |
127 | cfi_restore (%rbp) |
128 | ret |
129 | |
130 | .LBL_1_3: |
131 | cfi_restore_state |
132 | vmovups %zmm0, 1152(%rsp) |
133 | vmovups %zmm1, 1216(%rsp) |
134 | je .LBL_1_2 |
135 | |
136 | xorb %dl, %dl |
137 | kmovw %k4, 1048(%rsp) |
138 | xorl %eax, %eax |
139 | kmovw %k5, 1040(%rsp) |
140 | kmovw %k6, 1032(%rsp) |
141 | kmovw %k7, 1024(%rsp) |
142 | vmovups %zmm16, 960(%rsp) |
143 | vmovups %zmm17, 896(%rsp) |
144 | vmovups %zmm18, 832(%rsp) |
145 | vmovups %zmm19, 768(%rsp) |
146 | vmovups %zmm20, 704(%rsp) |
147 | vmovups %zmm21, 640(%rsp) |
148 | vmovups %zmm22, 576(%rsp) |
149 | vmovups %zmm23, 512(%rsp) |
150 | vmovups %zmm24, 448(%rsp) |
151 | vmovups %zmm25, 384(%rsp) |
152 | vmovups %zmm26, 320(%rsp) |
153 | vmovups %zmm27, 256(%rsp) |
154 | vmovups %zmm28, 192(%rsp) |
155 | vmovups %zmm29, 128(%rsp) |
156 | vmovups %zmm30, 64(%rsp) |
157 | vmovups %zmm31, (%rsp) |
158 | movq %rsi, 1064(%rsp) |
159 | movq %rdi, 1056(%rsp) |
160 | movq %r12, 1096(%rsp) |
161 | cfi_offset_rel_rsp (12, 1096) |
162 | movb %dl, %r12b |
163 | movq %r13, 1088(%rsp) |
164 | cfi_offset_rel_rsp (13, 1088) |
165 | movl %ecx, %r13d |
166 | movq %r14, 1080(%rsp) |
167 | cfi_offset_rel_rsp (14, 1080) |
168 | movl %eax, %r14d |
169 | movq %r15, 1072(%rsp) |
170 | cfi_offset_rel_rsp (15, 1072) |
171 | cfi_remember_state |
172 | |
173 | .LBL_1_6: |
174 | btl %r14d, %r13d |
175 | jc .LBL_1_12 |
176 | |
177 | .LBL_1_7: |
178 | lea 1(%r14), %esi |
179 | btl %esi, %r13d |
180 | jc .LBL_1_10 |
181 | |
182 | .LBL_1_8: |
183 | addb $1, %r12b |
184 | addl $2, %r14d |
185 | cmpb $16, %r12b |
186 | jb .LBL_1_6 |
187 | |
188 | kmovw 1048(%rsp), %k4 |
189 | movq 1064(%rsp), %rsi |
190 | kmovw 1040(%rsp), %k5 |
191 | movq 1056(%rsp), %rdi |
192 | kmovw 1032(%rsp), %k6 |
193 | movq 1096(%rsp), %r12 |
194 | cfi_restore (%r12) |
195 | movq 1088(%rsp), %r13 |
196 | cfi_restore (%r13) |
197 | kmovw 1024(%rsp), %k7 |
198 | vmovups 960(%rsp), %zmm16 |
199 | vmovups 896(%rsp), %zmm17 |
200 | vmovups 832(%rsp), %zmm18 |
201 | vmovups 768(%rsp), %zmm19 |
202 | vmovups 704(%rsp), %zmm20 |
203 | vmovups 640(%rsp), %zmm21 |
204 | vmovups 576(%rsp), %zmm22 |
205 | vmovups 512(%rsp), %zmm23 |
206 | vmovups 448(%rsp), %zmm24 |
207 | vmovups 384(%rsp), %zmm25 |
208 | vmovups 320(%rsp), %zmm26 |
209 | vmovups 256(%rsp), %zmm27 |
210 | vmovups 192(%rsp), %zmm28 |
211 | vmovups 128(%rsp), %zmm29 |
212 | vmovups 64(%rsp), %zmm30 |
213 | vmovups (%rsp), %zmm31 |
214 | movq 1080(%rsp), %r14 |
215 | cfi_restore (%r14) |
216 | movq 1072(%rsp), %r15 |
217 | cfi_restore (%r15) |
218 | vmovups 1216(%rsp), %zmm1 |
219 | jmp .LBL_1_2 |
220 | |
221 | .LBL_1_10: |
222 | cfi_restore_state |
223 | movzbl %r12b, %r15d |
224 | shlq $4, %r15 |
225 | vmovsd 1160(%rsp,%r15), %xmm0 |
226 | call JUMPTARGET(exp) |
227 | vmovsd %xmm0, 1224(%rsp,%r15) |
228 | jmp .LBL_1_8 |
229 | |
230 | .LBL_1_12: |
231 | movzbl %r12b, %r15d |
232 | shlq $4, %r15 |
233 | vmovsd 1152(%rsp,%r15), %xmm0 |
234 | call JUMPTARGET(exp) |
235 | vmovsd %xmm0, 1216(%rsp,%r15) |
236 | jmp .LBL_1_7 |
237 | #endif |
238 | END (_ZGVeN8v_exp_knl) |
239 | |
240 | ENTRY (_ZGVeN8v_exp_skx) |
241 | #ifndef HAVE_AVX512DQ_ASM_SUPPORT |
242 | WRAPPER_IMPL_AVX512 _ZGVdN4v_exp |
243 | #else |
244 | /* |
245 | ALGORITHM DESCRIPTION: |
246 | |
247 | Argument representation: |
248 | N = rint(X*2^k/ln2) = 2^k*M+j |
249 | X = N*ln2/2^k + r = M*ln2 + ln2*(j/2^k) + r |
250 | then -ln2/2^(k+1) < r < ln2/2^(k+1) |
251 | Alternatively: |
252 | N = trunc(X*2^k/ln2) |
253 | then 0 < r < ln2/2^k |
254 | |
255 | Result calculation: |
256 | exp(X) = exp(M*ln2 + ln2*(j/2^k) + r) |
257 | = 2^M * 2^(j/2^k) * exp(r) |
258 | 2^M is calculated by bit manipulation |
259 | 2^(j/2^k) is stored in table |
260 | exp(r) is approximated by polynomial |
261 | |
262 | The table lookup is skipped if k = 0. */ |
263 | |
264 | pushq %rbp |
265 | cfi_adjust_cfa_offset (8) |
266 | cfi_rel_offset (%rbp, 0) |
267 | movq %rsp, %rbp |
268 | cfi_def_cfa_register (%rbp) |
269 | andq $-64, %rsp |
270 | subq $1280, %rsp |
271 | movq __svml_dexp_data@GOTPCREL(%rip), %rax |
272 | |
273 | /* table lookup for dT[j] = 2^(j/2^k) */ |
274 | kxnorw %k1, %k1, %k1 |
275 | |
276 | /* iAbsX = (int)(lX>>32), lX = *(longlong*)&X */ |
277 | vpsrlq $32, %zmm0, %zmm1 |
278 | vmovups __dbInvLn2(%rax), %zmm7 |
279 | vmovups __dbShifter(%rax), %zmm5 |
280 | vmovups __lIndexMask(%rax), %zmm6 |
281 | vmovups __dbLn2hi(%rax), %zmm9 |
282 | vmovups __dPC0(%rax), %zmm12 |
283 | |
284 | /* dM = X*dbInvLn2+dbShifter, dbInvLn2 = 2^k/Ln2 */ |
285 | vfmadd213pd %zmm5, %zmm0, %zmm7 |
286 | vpmovqd %zmm1, %ymm2 |
287 | |
288 | /* dN = dM-dbShifter, dN = rint(X*2^k/Ln2) */ |
289 | vsubpd %zmm5, %zmm7, %zmm11 |
290 | |
291 | /* iAbsX = iAbsX&iAbsMask */ |
292 | vpand __iAbsMask(%rax), %ymm2, %ymm3 |
293 | |
294 | /* dR = X - dN*dbLn2hi, dbLn2hi is 52-8-k hi bits of ln2/2^k */ |
295 | vmovaps %zmm0, %zmm10 |
296 | vfnmadd231pd %zmm11, %zmm9, %zmm10 |
297 | vmovups __dPC2(%rax), %zmm9 |
298 | |
299 | /* dR = dR - dN*dbLn2lo, dbLn2lo is 40..94 bits of lo part of ln2/2^k */ |
300 | vfnmadd132pd __dbLn2lo(%rax), %zmm10, %zmm11 |
301 | |
302 | /* exp(r) = b0+r*(b0+r*(b1+r*b2)) */ |
303 | vfmadd213pd __dPC1(%rax), %zmm11, %zmm9 |
304 | vfmadd213pd %zmm12, %zmm11, %zmm9 |
305 | vfmadd213pd %zmm12, %zmm11, %zmm9 |
306 | |
307 | /* iRangeMask = (iAbsX>iDomainRange) */ |
308 | vpcmpgtd __iDomainRange(%rax), %ymm3, %ymm4 |
309 | |
310 | /* Mask = iRangeMask?1:0, set mask for overflow/underflow */ |
311 | vmovmskps %ymm4, %ecx |
312 | |
313 | /* lIndex = (*(longlong*)&dM)&lIndexMask, lIndex is the lower K bits of lM */ |
314 | vpandq %zmm6, %zmm7, %zmm13 |
315 | vpmovqd %zmm13, %ymm14 |
316 | vpxord %zmm15, %zmm15, %zmm15 |
317 | vgatherdpd (%rax,%ymm14,8), %zmm15{%k1} |
318 | |
319 | /* 2^(j/2^k) * exp(r) */ |
320 | vmulpd %zmm9, %zmm15, %zmm10 |
321 | |
322 | /* lM = (*(longlong*)&dM)&(~lIndexMask) */ |
323 | vpandnq %zmm7, %zmm6, %zmm8 |
324 | |
325 | /* lM = lM<<(52-K), 2^M */ |
326 | vpsllq $42, %zmm8, %zmm1 |
327 | |
328 | /* multiply by 2^M through integer add */ |
329 | vpaddq %zmm1, %zmm10, %zmm1 |
330 | testl %ecx, %ecx |
331 | jne .LBL_2_3 |
332 | |
333 | .LBL_2_2: |
334 | cfi_remember_state |
335 | vmovaps %zmm1, %zmm0 |
336 | movq %rbp, %rsp |
337 | cfi_def_cfa_register (%rsp) |
338 | popq %rbp |
339 | cfi_adjust_cfa_offset (-8) |
340 | cfi_restore (%rbp) |
341 | ret |
342 | |
343 | .LBL_2_3: |
344 | cfi_restore_state |
345 | vmovups %zmm0, 1152(%rsp) |
346 | vmovups %zmm1, 1216(%rsp) |
347 | je .LBL_2_2 |
348 | |
349 | xorb %dl, %dl |
350 | xorl %eax, %eax |
351 | kmovw %k4, 1048(%rsp) |
352 | kmovw %k5, 1040(%rsp) |
353 | kmovw %k6, 1032(%rsp) |
354 | kmovw %k7, 1024(%rsp) |
355 | vmovups %zmm16, 960(%rsp) |
356 | vmovups %zmm17, 896(%rsp) |
357 | vmovups %zmm18, 832(%rsp) |
358 | vmovups %zmm19, 768(%rsp) |
359 | vmovups %zmm20, 704(%rsp) |
360 | vmovups %zmm21, 640(%rsp) |
361 | vmovups %zmm22, 576(%rsp) |
362 | vmovups %zmm23, 512(%rsp) |
363 | vmovups %zmm24, 448(%rsp) |
364 | vmovups %zmm25, 384(%rsp) |
365 | vmovups %zmm26, 320(%rsp) |
366 | vmovups %zmm27, 256(%rsp) |
367 | vmovups %zmm28, 192(%rsp) |
368 | vmovups %zmm29, 128(%rsp) |
369 | vmovups %zmm30, 64(%rsp) |
370 | vmovups %zmm31, (%rsp) |
371 | movq %rsi, 1064(%rsp) |
372 | movq %rdi, 1056(%rsp) |
373 | movq %r12, 1096(%rsp) |
374 | cfi_offset_rel_rsp (12, 1096) |
375 | movb %dl, %r12b |
376 | movq %r13, 1088(%rsp) |
377 | cfi_offset_rel_rsp (13, 1088) |
378 | movl %ecx, %r13d |
379 | movq %r14, 1080(%rsp) |
380 | cfi_offset_rel_rsp (14, 1080) |
381 | movl %eax, %r14d |
382 | movq %r15, 1072(%rsp) |
383 | cfi_offset_rel_rsp (15, 1072) |
384 | cfi_remember_state |
385 | |
386 | .LBL_2_6: |
387 | btl %r14d, %r13d |
388 | jc .LBL_2_12 |
389 | |
390 | .LBL_2_7: |
391 | lea 1(%r14), %esi |
392 | btl %esi, %r13d |
393 | jc .LBL_2_10 |
394 | |
395 | .LBL_2_8: |
396 | incb %r12b |
397 | addl $2, %r14d |
398 | cmpb $16, %r12b |
399 | jb .LBL_2_6 |
400 | |
401 | kmovw 1048(%rsp), %k4 |
402 | kmovw 1040(%rsp), %k5 |
403 | kmovw 1032(%rsp), %k6 |
404 | kmovw 1024(%rsp), %k7 |
405 | vmovups 960(%rsp), %zmm16 |
406 | vmovups 896(%rsp), %zmm17 |
407 | vmovups 832(%rsp), %zmm18 |
408 | vmovups 768(%rsp), %zmm19 |
409 | vmovups 704(%rsp), %zmm20 |
410 | vmovups 640(%rsp), %zmm21 |
411 | vmovups 576(%rsp), %zmm22 |
412 | vmovups 512(%rsp), %zmm23 |
413 | vmovups 448(%rsp), %zmm24 |
414 | vmovups 384(%rsp), %zmm25 |
415 | vmovups 320(%rsp), %zmm26 |
416 | vmovups 256(%rsp), %zmm27 |
417 | vmovups 192(%rsp), %zmm28 |
418 | vmovups 128(%rsp), %zmm29 |
419 | vmovups 64(%rsp), %zmm30 |
420 | vmovups (%rsp), %zmm31 |
421 | vmovups 1216(%rsp), %zmm1 |
422 | movq 1064(%rsp), %rsi |
423 | movq 1056(%rsp), %rdi |
424 | movq 1096(%rsp), %r12 |
425 | cfi_restore (%r12) |
426 | movq 1088(%rsp), %r13 |
427 | cfi_restore (%r13) |
428 | movq 1080(%rsp), %r14 |
429 | cfi_restore (%r14) |
430 | movq 1072(%rsp), %r15 |
431 | cfi_restore (%r15) |
432 | jmp .LBL_2_2 |
433 | |
434 | .LBL_2_10: |
435 | cfi_restore_state |
436 | movzbl %r12b, %r15d |
437 | shlq $4, %r15 |
438 | vmovsd 1160(%rsp,%r15), %xmm0 |
439 | vzeroupper |
440 | vmovsd 1160(%rsp,%r15), %xmm0 |
441 | call JUMPTARGET(exp) |
442 | vmovsd %xmm0, 1224(%rsp,%r15) |
443 | jmp .LBL_2_8 |
444 | |
445 | .LBL_2_12: |
446 | movzbl %r12b, %r15d |
447 | shlq $4, %r15 |
448 | vmovsd 1152(%rsp,%r15), %xmm0 |
449 | vzeroupper |
450 | vmovsd 1152(%rsp,%r15), %xmm0 |
451 | call JUMPTARGET(exp) |
452 | vmovsd %xmm0, 1216(%rsp,%r15) |
453 | jmp .LBL_2_7 |
454 | |
455 | #endif |
456 | END (_ZGVeN8v_exp_skx) |
457 | |