1/* Function pow vectorized with AVX-512. KNL and SKX versions.
2 Copyright (C) 2014-2020 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <sysdep.h>
20#include "svml_d_pow_data.h"
21#include "svml_d_wrapper_impl.h"
22
23/* ALGORITHM DESCRIPTION:
24
25 1) Calculating log2|x|
26 Here we use the following formula.
27 Let |x|=2^k1*X1, where k1 is integer, 1<=X1<2.
28 Let C ~= 1/ln(2),
29 Rcp1 ~= 1/X1, X2=Rcp1*X1,
30 Rcp2 ~= 1/X2, X3=Rcp2*X2,
31 Rcp3 ~= 1/X3, Rcp3C ~= C/X3.
32 Then
33 log2|x| = k1 + log2(1/Rcp1) + log2(1/Rcp2) + log2(C/Rcp3C) +
34 log2(X1*Rcp1*Rcp2*Rcp3C/C),
35 where X1*Rcp1*Rcp2*Rcp3C = C*(1+q), q is very small.
36
37 The values of Rcp1, log2(1/Rcp1), Rcp2, log2(1/Rcp2),
38 Rcp3C, log2(C/Rcp3C) are taken from tables.
39 Values of Rcp1, Rcp2, Rcp3C are such that RcpC=Rcp1*Rcp2*Rcp3C
40 is exactly represented in target precision.
41
42 log2(X1*Rcp1*Rcp2*Rcp3C/C) = log2(1+q) = ln(1+q)/ln2 =
43 = 1/(ln2)*q - 1/(2ln2)*q^2 + 1/(3ln2)*q^3 - ... =
44 = 1/(C*ln2)*cq - 1/(2*C^2*ln2)*cq^2 + 1/(3*C^3*ln2)*cq^3 - ... =
45 = (1 + a1)*cq + a2*cq^2 + a3*cq^3 + ...,
46 where cq = X1*Rcp1*Rcp2*Rcp3C-C,
47 a1=1/(C*ln(2))-1 is small,
48 a2=1/(2*C^2*ln2),
49 a3=1/(3*C^3*ln2),
50 ...
51 We get 3 parts of log2 result: HH+HL+HLL ~= log2|x|.
52
53 2) Calculation of y*(HH+HL+HLL).
54 Split y into YHi+YLo.
55 Get high PH and medium PL parts of y*log2|x|.
56 Get low PLL part of y*log2|x|.
57 Now we have PH+PL+PLL ~= y*log2|x|.
58
59 3) Calculation of 2^(PH+PL+PLL).
60 Mathematical idea of computing 2^(PH+PL+PLL) is the following.
61 Let's represent PH+PL+PLL in the form N + j/2^expK + Z,
62 where expK=7 in this implementation, N and j are integers,
63 0<=j<=2^expK-1, |Z|<2^(-expK-1).
64 Hence 2^(PH+PL+PLL) ~= 2^N * 2^(j/2^expK) * 2^Z,
65 where 2^(j/2^expK) is stored in a table, and
66 2^Z ~= 1 + B1*Z + B2*Z^2 ... + B5*Z^5.
67
68 We compute 2^(PH+PL+PLL) as follows.
69 Break PH into PHH + PHL, where PHH = N + j/2^expK.
70 Z = PHL + PL + PLL
71 Exp2Poly = B1*Z + B2*Z^2 ... + B5*Z^5
72 Get 2^(j/2^expK) from table in the form THI+TLO.
73 Now we have 2^(PH+PL+PLL) ~= 2^N * (THI + TLO) * (1 + Exp2Poly).
74
75 Get significand of 2^(PH+PL+PLL) in the form ResHi+ResLo:
76 ResHi := THI
77 ResLo := THI * Exp2Poly + TLO
78
79 Get exponent ERes of the result:
80 Res := ResHi + ResLo:
81 Result := ex(Res) + N. */
82
83 .text
84ENTRY (_ZGVeN8vv_pow_knl)
85#ifndef HAVE_AVX512DQ_ASM_SUPPORT
86WRAPPER_IMPL_AVX512_ff _ZGVdN4vv_pow
87#else
88 pushq %rbp
89 cfi_adjust_cfa_offset (8)
90 cfi_rel_offset (%rbp, 0)
91 movq %rsp, %rbp
92 cfi_def_cfa_register (%rbp)
93 andq $-64, %rsp
94 subq $1344, %rsp
95 vpsrlq $32, %zmm0, %zmm13
96 vmovaps %zmm1, %zmm12
97 movq __svml_dpow_data@GOTPCREL(%rip), %rax
98 movl $255, %edx
99 vpmovqd %zmm13, %ymm10
100 vpsrlq $32, %zmm12, %zmm14
101 kmovw %edx, %k1
102 movl $-1, %ecx
103 vpmovqd %zmm14, %ymm15
104
105/* x1 = x; Hi(x1) = (Hi(x1)&0x000fffff)|0x3ff00000 */
106 vmovups _dbOne(%rax), %zmm6
107
108/* i = (((Hi(x) & 0x000ffe00) + 0x00000200) >> 10); -> i = (b1..b11 + 1) / 2 */
109 vmovaps %zmm10, %zmm5
110
111/* k = Hi(x); k = k - 0x3fe7fe00; k = k >> 20 */
112 vpsubd _i3fe7fe00(%rax), %zmm10, %zmm14{%k1}
113 vpandd _iIndexMask(%rax), %zmm10, %zmm5{%k1}
114 vpsrad $20, %zmm14, %zmm14{%k1}
115 vpxord %zmm9, %zmm9, %zmm9
116 vpaddd _HIDELTA(%rax), %zmm10, %zmm3{%k1}
117 vpaddd _iIndexAdd(%rax), %zmm5, %zmm5{%k1}
118 vpxord %zmm7, %zmm7, %zmm7
119 vpaddd _i2p20_2p19(%rax), %zmm14, %zmm14{%k1}
120 vpcmpd $1, _LORANGE(%rax), %zmm3, %k2{%k1}
121 vpsrld $10, %zmm5, %zmm5{%k1}
122 vpandd _ABSMASK(%rax), %zmm15, %zmm2{%k1}
123 vpbroadcastd %ecx, %zmm1{%k2}{z}
124
125/* Index for reciprocal table */
126 vpslld $3, %zmm5, %zmm8{%k1}
127 kxnorw %k2, %k2, %k2
128 vgatherdpd 11712(%rax,%ymm8), %zmm9{%k2}
129 vpmovzxdq %ymm14, %zmm10
130
131/* Index for log2 table */
132 vpslld $4, %zmm5, %zmm13{%k1}
133 kxnorw %k2, %k2, %k2
134 vpsllq $32, %zmm10, %zmm3
135 vpxord %zmm8, %zmm8, %zmm8
136 vpcmpd $5, _INF(%rax), %zmm2, %k3{%k1}
137 vpbroadcastd %ecx, %zmm4{%k3}{z}
138 vpternlogq $248, _iMantissaMask(%rax), %zmm0, %zmm6
139 kxnorw %k3, %k3, %k3
140 vpternlogq $168, _iffffffff00000000(%rax), %zmm10, %zmm3
141
142/* x1Hi=x1; Lo(x1Hi)&=0xf8000000; x1Lo = x1-x1Hi */
143 vpandq _iHighMask(%rax), %zmm6, %zmm2
144 vgatherdpd 19976(%rax,%ymm13), %zmm8{%k2}
145 vpord %zmm4, %zmm1, %zmm11{%k1}
146 vsubpd _db2p20_2p19(%rax), %zmm3, %zmm1
147 vsubpd %zmm2, %zmm6, %zmm5
148
149/* r1 = x1*rcp1 */
150 vmulpd %zmm9, %zmm6, %zmm6
151 vgatherdpd 19968(%rax,%ymm13), %zmm7{%k3}
152
153/* cq = c+r1 */
154 vaddpd _LHN(%rax), %zmm6, %zmm4
155
156/* E = -r1+__fence(x1Hi*rcp1) */
157 vfmsub213pd %zmm6, %zmm9, %zmm2
158
159/* T = k + L1hi */
160 vaddpd %zmm7, %zmm1, %zmm7
161
162/* E=E+x1Lo*rcp1 */
163 vfmadd213pd %zmm2, %zmm9, %zmm5
164
165/* T_Rh = T + cq */
166 vaddpd %zmm4, %zmm7, %zmm3
167
168/* Rl = T-T_Rh; -> -Rh */
169 vsubpd %zmm3, %zmm7, %zmm9
170
171/* Rl=Rl+cq */
172 vaddpd %zmm9, %zmm4, %zmm6
173
174/* T_Rh_Eh = T_Rh + E */
175 vaddpd %zmm5, %zmm3, %zmm9
176
177/* HLL = T_Rh - T_Rh_Eh; -> -Eh */
178 vsubpd %zmm9, %zmm3, %zmm2
179
180/* cq = cq + E; */
181 vaddpd %zmm5, %zmm4, %zmm4
182
183/* HLL+=E; -> El */
184 vaddpd %zmm2, %zmm5, %zmm1
185 vmovups _clv_2(%rax), %zmm5
186
187/* HLL = HLL + (((((((a7)*cq+a6)*cq+a5)*cq+a4)*cq+a3)*cq+a2)*cq+a1)*cq */
188 vfmadd213pd _clv_3(%rax), %zmm4, %zmm5
189
190/* HLL+=Rl */
191 vaddpd %zmm6, %zmm1, %zmm7
192
193/* 2^(y*(HH+HL+HLL)) starts here:
194 yH = y; Lo(yH)&=0xf8000000
195 */
196 vpandq _iHighMask(%rax), %zmm12, %zmm6
197
198/* yL = y-yH */
199 vsubpd %zmm6, %zmm12, %zmm2
200 vfmadd213pd _clv_4(%rax), %zmm4, %zmm5
201
202/* HLL+=L1lo */
203 vaddpd %zmm8, %zmm7, %zmm8
204 vfmadd213pd _clv_5(%rax), %zmm4, %zmm5
205 vfmadd213pd _clv_6(%rax), %zmm4, %zmm5
206 vfmadd213pd _clv_7(%rax), %zmm4, %zmm5
207 vfmadd213pd %zmm8, %zmm4, %zmm5
208
209/* T_Rh_Eh_HLLhi = T_Rh_Eh + HLL */
210 vaddpd %zmm5, %zmm9, %zmm13
211
212/* HLLhi = T_Rh_Eh_HLLhi - T_Rh_Eh */
213 vsubpd %zmm9, %zmm13, %zmm10
214
215/* HLL = HLL - HLLhi */
216 vsubpd %zmm10, %zmm5, %zmm3
217
218/* HH = T_Rh_Eh_HLLhi; Lo(HH)&=0xf8000000 */
219 vpandq _iHighMask(%rax), %zmm13, %zmm5
220
221/* pH = yH*HH */
222 vmulpd %zmm5, %zmm6, %zmm1
223
224/* HL = T_Rh_Eh_HLLhi-HH */
225 vsubpd %zmm5, %zmm13, %zmm4
226 vpsrlq $32, %zmm1, %zmm14
227
228/* pLL = y*HLL;
229 pHH = pH + *(double*)&db2p45_2p44
230 */
231 vaddpd _db2p45_2p44(%rax), %zmm1, %zmm10
232 vpmovqd %zmm14, %ymm15
233 vpandd _ABSMASK(%rax), %zmm15, %zmm14{%k1}
234 vpcmpd $5, _DOMAINRANGE(%rax), %zmm14, %k3{%k1}
235
236/* T1 = ((double*)exp2_tbl)[ 2*j ] */
237 vpxord %zmm14, %zmm14, %zmm14
238 vpbroadcastd %ecx, %zmm13{%k3}{z}
239 vpord %zmm13, %zmm11, %zmm11{%k1}
240 vptestmd %zmm11, %zmm11, %k0{%k1}
241
242/* pL=yL*HL+yH*HL; pL+=yL*HH */
243 vmulpd %zmm4, %zmm2, %zmm11
244 kmovw %k0, %ecx
245 vfmadd213pd %zmm11, %zmm4, %zmm6
246
247/* pHH = pHH - *(double*)&db2p45_2p44 */
248 vsubpd _db2p45_2p44(%rax), %zmm10, %zmm11
249 vpmovqd %zmm10, %ymm4
250 movzbl %cl, %ecx
251
252/* _n = Lo(pHH);
253 _n = _n & 0xffffff80;
254 _n = _n >> 7;
255 Hi(_2n) = (0x3ff+_n)<<20; Lo(_2n) = 0; -> 2^n
256 */
257 vpslld $13, %zmm4, %zmm7{%k1}
258
259/* j = Lo(pHH)&0x0000007f */
260 vpandd _jIndexMask(%rax), %zmm4, %zmm9{%k1}
261 vfmadd213pd %zmm6, %zmm5, %zmm2
262
263/* pHL = pH - pHH */
264 vsubpd %zmm11, %zmm1, %zmm1
265 vpaddd _iOne(%rax), %zmm7, %zmm7{%k1}
266
267/* t=pL+pLL; t+=pHL */
268 vfmadd231pd %zmm12, %zmm3, %zmm2
269 vpslld $4, %zmm9, %zmm9{%k1}
270 kxnorw %k1, %k1, %k1
271 vgatherdpd 36416(%rax,%ymm9), %zmm14{%k1}
272 vpmovzxdq %ymm7, %zmm8
273 vaddpd %zmm1, %zmm2, %zmm2
274 vmovups _cev_1(%rax), %zmm1
275 vpsllq $32, %zmm8, %zmm13
276 vpternlogq $168, _ifff0000000000000(%rax), %zmm8, %zmm13
277 vfmadd213pd _cev_2(%rax), %zmm2, %zmm1
278 vmulpd %zmm14, %zmm13, %zmm15
279 vfmadd213pd _cev_3(%rax), %zmm2, %zmm1
280 vmulpd %zmm2, %zmm15, %zmm3
281 vfmadd213pd _cev_4(%rax), %zmm2, %zmm1
282 vfmadd213pd _cev_5(%rax), %zmm2, %zmm1
283 vfmadd213pd %zmm15, %zmm3, %zmm1
284 testl %ecx, %ecx
285 jne .LBL_1_3
286
287.LBL_1_2:
288 cfi_remember_state
289 vmovaps %zmm1, %zmm0
290 movq %rbp, %rsp
291 cfi_def_cfa_register (%rsp)
292 popq %rbp
293 cfi_adjust_cfa_offset (-8)
294 cfi_restore (%rbp)
295 ret
296
297.LBL_1_3:
298 cfi_restore_state
299 vmovups %zmm0, 1152(%rsp)
300 vmovups %zmm12, 1216(%rsp)
301 vmovups %zmm1, 1280(%rsp)
302 je .LBL_1_2
303
304 xorb %dl, %dl
305 kmovw %k4, 1048(%rsp)
306 xorl %eax, %eax
307 kmovw %k5, 1040(%rsp)
308 kmovw %k6, 1032(%rsp)
309 kmovw %k7, 1024(%rsp)
310 vmovups %zmm16, 960(%rsp)
311 vmovups %zmm17, 896(%rsp)
312 vmovups %zmm18, 832(%rsp)
313 vmovups %zmm19, 768(%rsp)
314 vmovups %zmm20, 704(%rsp)
315 vmovups %zmm21, 640(%rsp)
316 vmovups %zmm22, 576(%rsp)
317 vmovups %zmm23, 512(%rsp)
318 vmovups %zmm24, 448(%rsp)
319 vmovups %zmm25, 384(%rsp)
320 vmovups %zmm26, 320(%rsp)
321 vmovups %zmm27, 256(%rsp)
322 vmovups %zmm28, 192(%rsp)
323 vmovups %zmm29, 128(%rsp)
324 vmovups %zmm30, 64(%rsp)
325 vmovups %zmm31, (%rsp)
326 movq %rsi, 1064(%rsp)
327 movq %rdi, 1056(%rsp)
328 movq %r12, 1096(%rsp)
329 cfi_offset_rel_rsp (12, 1096)
330 movb %dl, %r12b
331 movq %r13, 1088(%rsp)
332 cfi_offset_rel_rsp (13, 1088)
333 movl %ecx, %r13d
334 movq %r14, 1080(%rsp)
335 cfi_offset_rel_rsp (14, 1080)
336 movl %eax, %r14d
337 movq %r15, 1072(%rsp)
338 cfi_offset_rel_rsp (15, 1072)
339 cfi_remember_state
340
341.LBL_1_6:
342 btl %r14d, %r13d
343 jc .LBL_1_12
344
345.LBL_1_7:
346 lea 1(%r14), %esi
347 btl %esi, %r13d
348 jc .LBL_1_10
349
350.LBL_1_8:
351 addb $1, %r12b
352 addl $2, %r14d
353 cmpb $16, %r12b
354 jb .LBL_1_6
355
356 kmovw 1048(%rsp), %k4
357 movq 1064(%rsp), %rsi
358 kmovw 1040(%rsp), %k5
359 movq 1056(%rsp), %rdi
360 kmovw 1032(%rsp), %k6
361 movq 1096(%rsp), %r12
362 cfi_restore (%r12)
363 movq 1088(%rsp), %r13
364 cfi_restore (%r13)
365 kmovw 1024(%rsp), %k7
366 vmovups 960(%rsp), %zmm16
367 vmovups 896(%rsp), %zmm17
368 vmovups 832(%rsp), %zmm18
369 vmovups 768(%rsp), %zmm19
370 vmovups 704(%rsp), %zmm20
371 vmovups 640(%rsp), %zmm21
372 vmovups 576(%rsp), %zmm22
373 vmovups 512(%rsp), %zmm23
374 vmovups 448(%rsp), %zmm24
375 vmovups 384(%rsp), %zmm25
376 vmovups 320(%rsp), %zmm26
377 vmovups 256(%rsp), %zmm27
378 vmovups 192(%rsp), %zmm28
379 vmovups 128(%rsp), %zmm29
380 vmovups 64(%rsp), %zmm30
381 vmovups (%rsp), %zmm31
382 movq 1080(%rsp), %r14
383 cfi_restore (%r14)
384 movq 1072(%rsp), %r15
385 cfi_restore (%r15)
386 vmovups 1280(%rsp), %zmm1
387 jmp .LBL_1_2
388
389.LBL_1_10:
390 cfi_restore_state
391 movzbl %r12b, %r15d
392 shlq $4, %r15
393 vmovsd 1160(%rsp,%r15), %xmm0
394 vmovsd 1224(%rsp,%r15), %xmm1
395 call JUMPTARGET(pow)
396 vmovsd %xmm0, 1288(%rsp,%r15)
397 jmp .LBL_1_8
398
399.LBL_1_12:
400 movzbl %r12b, %r15d
401 shlq $4, %r15
402 vmovsd 1152(%rsp,%r15), %xmm0
403 vmovsd 1216(%rsp,%r15), %xmm1
404 call JUMPTARGET(pow)
405 vmovsd %xmm0, 1280(%rsp,%r15)
406 jmp .LBL_1_7
407
408#endif
409END (_ZGVeN8vv_pow_knl)
410
411ENTRY (_ZGVeN8vv_pow_skx)
412#ifndef HAVE_AVX512DQ_ASM_SUPPORT
413WRAPPER_IMPL_AVX512_ff _ZGVdN4vv_pow
414#else
415 pushq %rbp
416 cfi_adjust_cfa_offset (8)
417 cfi_rel_offset (%rbp, 0)
418 movq %rsp, %rbp
419 cfi_def_cfa_register (%rbp)
420 andq $-64, %rsp
421 subq $1344, %rsp
422 vpsrlq $32, %zmm0, %zmm10
423 kxnorw %k1, %k1, %k1
424 kxnorw %k2, %k2, %k2
425 kxnorw %k3, %k3, %k3
426 vpmovqd %zmm10, %ymm7
427 movq __svml_dpow_data@GOTPCREL(%rip), %rax
428 vmovaps %zmm1, %zmm6
429 vpsrlq $32, %zmm6, %zmm13
430
431/* i = (((Hi(x) & 0x000ffe00) + 0x00000200) >> 10); -> i = (b1..b11 + 1) / 2 */
432 vpand _iIndexMask(%rax), %ymm7, %ymm15
433 vpaddd _HIDELTA(%rax), %ymm7, %ymm2
434
435/* k = Hi(x); k = k - 0x3fe7fe00; k = k >> 20 */
436 vpsubd _i3fe7fe00(%rax), %ymm7, %ymm7
437 vmovdqu _ABSMASK(%rax), %ymm4
438 vmovdqu _LORANGE(%rax), %ymm3
439
440/* x1 = x; Hi(x1) = (Hi(x1)&0x000fffff)|0x3ff00000 */
441 vmovups _dbOne(%rax), %zmm11
442 vmovdqu _INF(%rax), %ymm5
443 vpaddd _iIndexAdd(%rax), %ymm15, %ymm12
444 vpmovqd %zmm13, %ymm14
445 vpternlogq $248, _iMantissaMask(%rax), %zmm0, %zmm11
446 vpsrld $10, %ymm12, %ymm10
447 vpsrad $20, %ymm7, %ymm13
448
449/* Index for reciprocal table */
450 vpslld $3, %ymm10, %ymm8
451
452/* Index for log2 table */
453 vpslld $4, %ymm10, %ymm1
454 vpcmpgtd %ymm2, %ymm3, %ymm3
455 vpand %ymm4, %ymm14, %ymm2
456 vpaddd _i2p20_2p19(%rax), %ymm13, %ymm14
457 vpmovzxdq %ymm14, %zmm15
458 vpsllq $32, %zmm15, %zmm7
459 vpternlogq $168, _iffffffff00000000(%rax), %zmm15, %zmm7
460 vsubpd _db2p20_2p19(%rax), %zmm7, %zmm13
461 vpxord %zmm9, %zmm9, %zmm9
462 vgatherdpd 11712(%rax,%ymm8), %zmm9{%k1}
463
464/* T1 = ((double*)exp2_tbl)[ 2*j ] */
465 kxnorw %k1, %k1, %k1
466 vpxord %zmm12, %zmm12, %zmm12
467 vpxord %zmm8, %zmm8, %zmm8
468 vgatherdpd 19968(%rax,%ymm1), %zmm12{%k2}
469 vgatherdpd 19976(%rax,%ymm1), %zmm8{%k3}
470 vmovups _iHighMask(%rax), %zmm1
471
472/* x1Hi=x1; Lo(x1Hi)&=0xf8000000; x1Lo = x1-x1Hi */
473 vandpd %zmm1, %zmm11, %zmm10
474 vsubpd %zmm10, %zmm11, %zmm15
475
476/* r1 = x1*rcp1 */
477 vmulpd %zmm9, %zmm11, %zmm11
478
479/* E = -r1+__fence(x1Hi*rcp1) */
480 vfmsub213pd %zmm11, %zmm9, %zmm10
481
482/* cq = c+r1 */
483 vaddpd _LHN(%rax), %zmm11, %zmm14
484
485/* E=E+x1Lo*rcp1 */
486 vfmadd213pd %zmm10, %zmm9, %zmm15
487
488/* T = k + L1hi */
489 vaddpd %zmm12, %zmm13, %zmm9
490
491/* T_Rh = T + cq */
492 vaddpd %zmm14, %zmm9, %zmm11
493
494/* T_Rh_Eh = T_Rh + E */
495 vaddpd %zmm15, %zmm11, %zmm13
496
497/* Rl = T-T_Rh; -> -Rh */
498 vsubpd %zmm11, %zmm9, %zmm12
499
500/* HLL = T_Rh - T_Rh_Eh; -> -Eh */
501 vsubpd %zmm13, %zmm11, %zmm9
502
503/* Rl=Rl+cq */
504 vaddpd %zmm12, %zmm14, %zmm10
505
506/* HLL+=E; -> El */
507 vaddpd %zmm9, %zmm15, %zmm7
508
509/* HLL+=Rl */
510 vaddpd %zmm10, %zmm7, %zmm12
511
512/* 2^(y*(HH+HL+HLL)) starts here:
513 yH = y; Lo(yH)&=0xf8000000
514 */
515 vandpd %zmm1, %zmm6, %zmm7
516
517/* HLL+=L1lo */
518 vaddpd %zmm8, %zmm12, %zmm12
519
520/* cq = cq + E */
521 vaddpd %zmm15, %zmm14, %zmm8
522 vmovups _clv_2(%rax), %zmm14
523
524/* HLL = HLL + (((((((a7)*cq+a6)*cq+a5)*cq+a4)*cq+a3)*cq+a2)*cq+a1)*cq */
525 vfmadd213pd _clv_3(%rax), %zmm8, %zmm14
526 vfmadd213pd _clv_4(%rax), %zmm8, %zmm14
527 vfmadd213pd _clv_5(%rax), %zmm8, %zmm14
528 vfmadd213pd _clv_6(%rax), %zmm8, %zmm14
529 vfmadd213pd _clv_7(%rax), %zmm8, %zmm14
530 vfmadd213pd %zmm12, %zmm8, %zmm14
531
532/* yL = y-yH */
533 vsubpd %zmm7, %zmm6, %zmm8
534
535/* T_Rh_Eh_HLLhi = T_Rh_Eh + HLL */
536 vaddpd %zmm14, %zmm13, %zmm15
537
538/* HH = T_Rh_Eh_HLLhi; Lo(HH)&=0xf8000000 */
539 vandpd %zmm1, %zmm15, %zmm11
540
541/* HLLhi = T_Rh_Eh_HLLhi - T_Rh_Eh */
542 vsubpd %zmm13, %zmm15, %zmm13
543
544/* pH = yH*HH */
545 vmulpd %zmm11, %zmm7, %zmm9
546
547/* HLL = HLL - HLLhi */
548 vsubpd %zmm13, %zmm14, %zmm12
549
550/* HL = T_Rh_Eh_HLLhi-HH */
551 vsubpd %zmm11, %zmm15, %zmm10
552 vpsrlq $32, %zmm9, %zmm1
553 vmovdqu _DOMAINRANGE(%rax), %ymm13
554 vpmovqd %zmm1, %ymm1
555 vpand %ymm4, %ymm1, %ymm1
556 vpcmpgtd %ymm5, %ymm2, %ymm4
557 vpcmpeqd %ymm5, %ymm2, %ymm5
558 vpternlogd $254, %ymm5, %ymm4, %ymm3
559 vpcmpgtd %ymm13, %ymm1, %ymm2
560 vpcmpeqd %ymm13, %ymm1, %ymm4
561 vpternlogd $254, %ymm4, %ymm2, %ymm3
562
563/* pLL = y*HLL */
564 vmovups _db2p45_2p44(%rax), %zmm2
565
566/* pHH = pH + *(double*)&db2p45_2p44 */
567 vaddpd %zmm2, %zmm9, %zmm1
568 vpmovqd %zmm1, %ymm5
569
570/* j = Lo(pHH)&0x0000007f */
571 vpand _jIndexMask(%rax), %ymm5, %ymm14
572 vpslld $4, %ymm14, %ymm15
573 vmovmskps %ymm3, %ecx
574
575/* pL=yL*HL+yH*HL; pL+=yL*HH */
576 vmulpd %zmm10, %zmm8, %zmm3
577 vfmadd213pd %zmm3, %zmm10, %zmm7
578 vfmadd213pd %zmm7, %zmm11, %zmm8
579
580/* _n = Lo(pHH)
581 _n = _n & 0xffffff80
582 _n = _n >> 7
583 Hi(_2n) = (0x3ff+_n)<<20; Lo(_2n) = 0; -> 2^n
584 */
585 vpslld $13, %ymm5, %ymm7
586
587/* t=pL+pLL; t+=pHL */
588 vfmadd231pd %zmm6, %zmm12, %zmm8
589 vpaddd _iOne(%rax), %ymm7, %ymm10
590 vpmovzxdq %ymm10, %zmm11
591 vpsllq $32, %zmm11, %zmm3
592 vpternlogq $168, _ifff0000000000000(%rax), %zmm11, %zmm3
593
594/* pHH = pHH - *(double*)&db2p45_2p44 */
595 vsubpd %zmm2, %zmm1, %zmm11
596 vmovups _cev_1(%rax), %zmm2
597
598/* pHL = pH - pHH */
599 vsubpd %zmm11, %zmm9, %zmm9
600 vaddpd %zmm9, %zmm8, %zmm8
601 vfmadd213pd _cev_2(%rax), %zmm8, %zmm2
602 vfmadd213pd _cev_3(%rax), %zmm8, %zmm2
603 vfmadd213pd _cev_4(%rax), %zmm8, %zmm2
604 vfmadd213pd _cev_5(%rax), %zmm8, %zmm2
605 vpxord %zmm4, %zmm4, %zmm4
606 vgatherdpd 36416(%rax,%ymm15), %zmm4{%k1}
607 vmulpd %zmm4, %zmm3, %zmm1
608 vmulpd %zmm8, %zmm1, %zmm12
609 vfmadd213pd %zmm1, %zmm12, %zmm2
610 testl %ecx, %ecx
611 jne .LBL_2_3
612
613.LBL_2_2:
614 cfi_remember_state
615 vmovaps %zmm2, %zmm0
616 movq %rbp, %rsp
617 cfi_def_cfa_register (%rsp)
618 popq %rbp
619 cfi_adjust_cfa_offset (-8)
620 cfi_restore (%rbp)
621 ret
622
623.LBL_2_3:
624 cfi_restore_state
625 vmovups %zmm0, 1152(%rsp)
626 vmovups %zmm6, 1216(%rsp)
627 vmovups %zmm2, 1280(%rsp)
628 je .LBL_2_2
629
630 xorb %dl, %dl
631 xorl %eax, %eax
632 kmovw %k4, 1048(%rsp)
633 kmovw %k5, 1040(%rsp)
634 kmovw %k6, 1032(%rsp)
635 kmovw %k7, 1024(%rsp)
636 vmovups %zmm16, 960(%rsp)
637 vmovups %zmm17, 896(%rsp)
638 vmovups %zmm18, 832(%rsp)
639 vmovups %zmm19, 768(%rsp)
640 vmovups %zmm20, 704(%rsp)
641 vmovups %zmm21, 640(%rsp)
642 vmovups %zmm22, 576(%rsp)
643 vmovups %zmm23, 512(%rsp)
644 vmovups %zmm24, 448(%rsp)
645 vmovups %zmm25, 384(%rsp)
646 vmovups %zmm26, 320(%rsp)
647 vmovups %zmm27, 256(%rsp)
648 vmovups %zmm28, 192(%rsp)
649 vmovups %zmm29, 128(%rsp)
650 vmovups %zmm30, 64(%rsp)
651 vmovups %zmm31, (%rsp)
652 movq %rsi, 1064(%rsp)
653 movq %rdi, 1056(%rsp)
654 movq %r12, 1096(%rsp)
655 cfi_offset_rel_rsp (12, 1096)
656 movb %dl, %r12b
657 movq %r13, 1088(%rsp)
658 cfi_offset_rel_rsp (13, 1088)
659 movl %ecx, %r13d
660 movq %r14, 1080(%rsp)
661 cfi_offset_rel_rsp (14, 1080)
662 movl %eax, %r14d
663 movq %r15, 1072(%rsp)
664 cfi_offset_rel_rsp (15, 1072)
665 cfi_remember_state
666
667.LBL_2_6:
668 btl %r14d, %r13d
669 jc .LBL_2_12
670
671.LBL_2_7:
672 lea 1(%r14), %esi
673 btl %esi, %r13d
674 jc .LBL_2_10
675
676.LBL_2_8:
677 incb %r12b
678 addl $2, %r14d
679 cmpb $16, %r12b
680 jb .LBL_2_6
681
682 kmovw 1048(%rsp), %k4
683 kmovw 1040(%rsp), %k5
684 kmovw 1032(%rsp), %k6
685 kmovw 1024(%rsp), %k7
686 vmovups 960(%rsp), %zmm16
687 vmovups 896(%rsp), %zmm17
688 vmovups 832(%rsp), %zmm18
689 vmovups 768(%rsp), %zmm19
690 vmovups 704(%rsp), %zmm20
691 vmovups 640(%rsp), %zmm21
692 vmovups 576(%rsp), %zmm22
693 vmovups 512(%rsp), %zmm23
694 vmovups 448(%rsp), %zmm24
695 vmovups 384(%rsp), %zmm25
696 vmovups 320(%rsp), %zmm26
697 vmovups 256(%rsp), %zmm27
698 vmovups 192(%rsp), %zmm28
699 vmovups 128(%rsp), %zmm29
700 vmovups 64(%rsp), %zmm30
701 vmovups (%rsp), %zmm31
702 vmovups 1280(%rsp), %zmm2
703 movq 1064(%rsp), %rsi
704 movq 1056(%rsp), %rdi
705 movq 1096(%rsp), %r12
706 cfi_restore (%r12)
707 movq 1088(%rsp), %r13
708 cfi_restore (%r13)
709 movq 1080(%rsp), %r14
710 cfi_restore (%r14)
711 movq 1072(%rsp), %r15
712 cfi_restore (%r15)
713 jmp .LBL_2_2
714
715.LBL_2_10:
716 cfi_restore_state
717 movzbl %r12b, %r15d
718 shlq $4, %r15
719 vmovsd 1224(%rsp,%r15), %xmm1
720 vzeroupper
721 vmovsd 1160(%rsp,%r15), %xmm0
722
723 call JUMPTARGET(pow)
724
725 vmovsd %xmm0, 1288(%rsp,%r15)
726 jmp .LBL_2_8
727
728.LBL_2_12:
729 movzbl %r12b, %r15d
730 shlq $4, %r15
731 vmovsd 1216(%rsp,%r15), %xmm1
732 vzeroupper
733 vmovsd 1152(%rsp,%r15), %xmm0
734
735 call JUMPTARGET(pow)
736
737 vmovsd %xmm0, 1280(%rsp,%r15)
738 jmp .LBL_2_7
739
740#endif
741END (_ZGVeN8vv_pow_skx)
742