1/*
2 * IBM Accurate Mathematical Library
3 * written by International Business Machines Corp.
4 * Copyright (C) 2001-2020 Free Software Foundation, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
8 * the Free Software Foundation; either version 2.1 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program; if not, see <https://www.gnu.org/licenses/>.
18 */
19/************************************************************************/
20/* MODULE_NAME: atnat2.c */
21/* */
22/* FUNCTIONS: uatan2 */
23/* atan2Mp */
24/* signArctan2 */
25/* normalized */
26/* */
27/* FILES NEEDED: dla.h endian.h mpa.h mydefs.h atnat2.h */
28/* mpatan.c mpatan2.c mpsqrt.c */
29/* uatan.tbl */
30/* */
31/* An ultimate atan2() routine. Given two IEEE double machine numbers y,*/
32/* x it computes the correctly rounded (to nearest) value of atan2(y,x).*/
33/* */
34/* Assumption: Machine arithmetic operations are performed in */
35/* round to nearest mode of IEEE 754 standard. */
36/* */
37/************************************************************************/
38
39#include <dla.h>
40#include "mpa.h"
41#include "MathLib.h"
42#include "uatan.tbl"
43#include "atnat2.h"
44#include <fenv.h>
45#include <float.h>
46#include <math.h>
47#include <math-barriers.h>
48#include <math_private.h>
49#include <fenv_private.h>
50#include <stap-probe.h>
51#include <libm-alias-finite.h>
52
53#ifndef SECTION
54# define SECTION
55#endif
56
57/************************************************************************/
58/* An ultimate atan2 routine. Given two IEEE double machine numbers y,x */
59/* it computes the correctly rounded (to nearest) value of atan2(y,x). */
60/* Assumption: Machine arithmetic operations are performed in */
61/* round to nearest mode of IEEE 754 standard. */
62/************************************************************************/
63static double atan2Mp (double, double, const int[]);
64 /* Fix the sign and return after stage 1 or stage 2 */
65static double
66signArctan2 (double y, double z)
67{
68 return copysign (z, y);
69}
70
71static double normalized (double, double, double, double);
72void __mpatan2 (mp_no *, mp_no *, mp_no *, int);
73
74double
75SECTION
76__ieee754_atan2 (double y, double x)
77{
78 int i, de, ux, dx, uy, dy;
79 static const int pr[MM] = { 6, 8, 10, 20, 32 };
80 double ax, ay, u, du, u9, ua, v, vv, dv, t1, t2, t3, t7, t8,
81 z, zz, cor, s1, ss1, s2, ss2;
82#ifndef DLA_FMS
83 double t4, t5, t6;
84#endif
85 number num;
86
87 static const int ep = 59768832, /* 57*16**5 */
88 em = -59768832; /* -57*16**5 */
89
90 /* x=NaN or y=NaN */
91 num.d = x;
92 ux = num.i[HIGH_HALF];
93 dx = num.i[LOW_HALF];
94 if ((ux & 0x7ff00000) == 0x7ff00000)
95 {
96 if (((ux & 0x000fffff) | dx) != 0x00000000)
97 return x + y;
98 }
99 num.d = y;
100 uy = num.i[HIGH_HALF];
101 dy = num.i[LOW_HALF];
102 if ((uy & 0x7ff00000) == 0x7ff00000)
103 {
104 if (((uy & 0x000fffff) | dy) != 0x00000000)
105 return y + y;
106 }
107
108 /* y=+-0 */
109 if (uy == 0x00000000)
110 {
111 if (dy == 0x00000000)
112 {
113 if ((ux & 0x80000000) == 0x00000000)
114 return 0;
115 else
116 return opi.d;
117 }
118 }
119 else if (uy == 0x80000000)
120 {
121 if (dy == 0x00000000)
122 {
123 if ((ux & 0x80000000) == 0x00000000)
124 return -0.0;
125 else
126 return mopi.d;
127 }
128 }
129
130 /* x=+-0 */
131 if (x == 0)
132 {
133 if ((uy & 0x80000000) == 0x00000000)
134 return hpi.d;
135 else
136 return mhpi.d;
137 }
138
139 /* x=+-INF */
140 if (ux == 0x7ff00000)
141 {
142 if (dx == 0x00000000)
143 {
144 if (uy == 0x7ff00000)
145 {
146 if (dy == 0x00000000)
147 return qpi.d;
148 }
149 else if (uy == 0xfff00000)
150 {
151 if (dy == 0x00000000)
152 return mqpi.d;
153 }
154 else
155 {
156 if ((uy & 0x80000000) == 0x00000000)
157 return 0;
158 else
159 return -0.0;
160 }
161 }
162 }
163 else if (ux == 0xfff00000)
164 {
165 if (dx == 0x00000000)
166 {
167 if (uy == 0x7ff00000)
168 {
169 if (dy == 0x00000000)
170 return tqpi.d;
171 }
172 else if (uy == 0xfff00000)
173 {
174 if (dy == 0x00000000)
175 return mtqpi.d;
176 }
177 else
178 {
179 if ((uy & 0x80000000) == 0x00000000)
180 return opi.d;
181 else
182 return mopi.d;
183 }
184 }
185 }
186
187 /* y=+-INF */
188 if (uy == 0x7ff00000)
189 {
190 if (dy == 0x00000000)
191 return hpi.d;
192 }
193 else if (uy == 0xfff00000)
194 {
195 if (dy == 0x00000000)
196 return mhpi.d;
197 }
198
199 SET_RESTORE_ROUND (FE_TONEAREST);
200 /* either x/y or y/x is very close to zero */
201 ax = (x < 0) ? -x : x;
202 ay = (y < 0) ? -y : y;
203 de = (uy & 0x7ff00000) - (ux & 0x7ff00000);
204 if (de >= ep)
205 {
206 return ((y > 0) ? hpi.d : mhpi.d);
207 }
208 else if (de <= em)
209 {
210 if (x > 0)
211 {
212 double ret;
213 if ((z = ay / ax) < TWOM1022)
214 ret = normalized (ax, ay, y, z);
215 else
216 ret = signArctan2 (y, z);
217 if (fabs (ret) < DBL_MIN)
218 {
219 double vret = ret ? ret : DBL_MIN;
220 double force_underflow = vret * vret;
221 math_force_eval (force_underflow);
222 }
223 return ret;
224 }
225 else
226 {
227 return ((y > 0) ? opi.d : mopi.d);
228 }
229 }
230
231 /* if either x or y is extremely close to zero, scale abs(x), abs(y). */
232 if (ax < twom500.d || ay < twom500.d)
233 {
234 ax *= two500.d;
235 ay *= two500.d;
236 }
237
238 /* Likewise for large x and y. */
239 if (ax > two500.d || ay > two500.d)
240 {
241 ax *= twom500.d;
242 ay *= twom500.d;
243 }
244
245 /* x,y which are neither special nor extreme */
246 if (ay < ax)
247 {
248 u = ay / ax;
249 EMULV (ax, u, v, vv, t1, t2, t3, t4, t5);
250 du = ((ay - v) - vv) / ax;
251 }
252 else
253 {
254 u = ax / ay;
255 EMULV (ay, u, v, vv, t1, t2, t3, t4, t5);
256 du = ((ax - v) - vv) / ay;
257 }
258
259 if (x > 0)
260 {
261 /* (i) x>0, abs(y)< abs(x): atan(ay/ax) */
262 if (ay < ax)
263 {
264 if (u < inv16.d)
265 {
266 v = u * u;
267
268 zz = du + u * v * (d3.d
269 + v * (d5.d
270 + v * (d7.d
271 + v * (d9.d
272 + v * (d11.d
273 + v * d13.d)))));
274
275 if ((z = u + (zz - u1.d * u)) == u + (zz + u1.d * u))
276 return signArctan2 (y, z);
277
278 MUL2 (u, du, u, du, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
279 s1 = v * (f11.d + v * (f13.d
280 + v * (f15.d + v * (f17.d + v * f19.d))));
281 ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
282 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
283 ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
284 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
285 ADD2 (f5.d, ff5.d, s1, ss1, s2, ss2, t1, t2);
286 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
287 ADD2 (f3.d, ff3.d, s1, ss1, s2, ss2, t1, t2);
288 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
289 MUL2 (u, du, s1, ss1, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
290 ADD2 (u, du, s2, ss2, s1, ss1, t1, t2);
291
292 if ((z = s1 + (ss1 - u5.d * s1)) == s1 + (ss1 + u5.d * s1))
293 return signArctan2 (y, z);
294
295 return atan2Mp (x, y, pr);
296 }
297
298 i = (TWO52 + TWO8 * u) - TWO52;
299 i -= 16;
300 t3 = u - cij[i][0].d;
301 EADD (t3, du, v, dv);
302 t1 = cij[i][1].d;
303 t2 = cij[i][2].d;
304 zz = v * t2 + (dv * t2
305 + v * v * (cij[i][3].d
306 + v * (cij[i][4].d
307 + v * (cij[i][5].d
308 + v * cij[i][6].d))));
309 if (i < 112)
310 {
311 if (i < 48)
312 u9 = u91.d; /* u < 1/4 */
313 else
314 u9 = u92.d;
315 } /* 1/4 <= u < 1/2 */
316 else
317 {
318 if (i < 176)
319 u9 = u93.d; /* 1/2 <= u < 3/4 */
320 else
321 u9 = u94.d;
322 } /* 3/4 <= u <= 1 */
323 if ((z = t1 + (zz - u9 * t1)) == t1 + (zz + u9 * t1))
324 return signArctan2 (y, z);
325
326 t1 = u - hij[i][0].d;
327 EADD (t1, du, v, vv);
328 s1 = v * (hij[i][11].d
329 + v * (hij[i][12].d
330 + v * (hij[i][13].d
331 + v * (hij[i][14].d
332 + v * hij[i][15].d))));
333 ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
334 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
335 ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
336 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
337 ADD2 (hij[i][5].d, hij[i][6].d, s1, ss1, s2, ss2, t1, t2);
338 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
339 ADD2 (hij[i][3].d, hij[i][4].d, s1, ss1, s2, ss2, t1, t2);
340 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
341 ADD2 (hij[i][1].d, hij[i][2].d, s1, ss1, s2, ss2, t1, t2);
342
343 if ((z = s2 + (ss2 - ub.d * s2)) == s2 + (ss2 + ub.d * s2))
344 return signArctan2 (y, z);
345 return atan2Mp (x, y, pr);
346 }
347
348 /* (ii) x>0, abs(x)<=abs(y): pi/2-atan(ax/ay) */
349 if (u < inv16.d)
350 {
351 v = u * u;
352 zz = u * v * (d3.d
353 + v * (d5.d
354 + v * (d7.d
355 + v * (d9.d
356 + v * (d11.d
357 + v * d13.d)))));
358 ESUB (hpi.d, u, t2, cor);
359 t3 = ((hpi1.d + cor) - du) - zz;
360 if ((z = t2 + (t3 - u2.d)) == t2 + (t3 + u2.d))
361 return signArctan2 (y, z);
362
363 MUL2 (u, du, u, du, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
364 s1 = v * (f11.d
365 + v * (f13.d
366 + v * (f15.d + v * (f17.d + v * f19.d))));
367 ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
368 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
369 ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
370 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
371 ADD2 (f5.d, ff5.d, s1, ss1, s2, ss2, t1, t2);
372 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
373 ADD2 (f3.d, ff3.d, s1, ss1, s2, ss2, t1, t2);
374 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
375 MUL2 (u, du, s1, ss1, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
376 ADD2 (u, du, s2, ss2, s1, ss1, t1, t2);
377 SUB2 (hpi.d, hpi1.d, s1, ss1, s2, ss2, t1, t2);
378
379 if ((z = s2 + (ss2 - u6.d)) == s2 + (ss2 + u6.d))
380 return signArctan2 (y, z);
381 return atan2Mp (x, y, pr);
382 }
383
384 i = (TWO52 + TWO8 * u) - TWO52;
385 i -= 16;
386 v = (u - cij[i][0].d) + du;
387
388 zz = hpi1.d - v * (cij[i][2].d
389 + v * (cij[i][3].d
390 + v * (cij[i][4].d
391 + v * (cij[i][5].d
392 + v * cij[i][6].d))));
393 t1 = hpi.d - cij[i][1].d;
394 if (i < 112)
395 ua = ua1.d; /* w < 1/2 */
396 else
397 ua = ua2.d; /* w >= 1/2 */
398 if ((z = t1 + (zz - ua)) == t1 + (zz + ua))
399 return signArctan2 (y, z);
400
401 t1 = u - hij[i][0].d;
402 EADD (t1, du, v, vv);
403
404 s1 = v * (hij[i][11].d
405 + v * (hij[i][12].d
406 + v * (hij[i][13].d
407 + v * (hij[i][14].d
408 + v * hij[i][15].d))));
409
410 ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
411 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
412 ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
413 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
414 ADD2 (hij[i][5].d, hij[i][6].d, s1, ss1, s2, ss2, t1, t2);
415 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
416 ADD2 (hij[i][3].d, hij[i][4].d, s1, ss1, s2, ss2, t1, t2);
417 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
418 ADD2 (hij[i][1].d, hij[i][2].d, s1, ss1, s2, ss2, t1, t2);
419 SUB2 (hpi.d, hpi1.d, s2, ss2, s1, ss1, t1, t2);
420
421 if ((z = s1 + (ss1 - uc.d)) == s1 + (ss1 + uc.d))
422 return signArctan2 (y, z);
423 return atan2Mp (x, y, pr);
424 }
425
426 /* (iii) x<0, abs(x)< abs(y): pi/2+atan(ax/ay) */
427 if (ax < ay)
428 {
429 if (u < inv16.d)
430 {
431 v = u * u;
432 zz = u * v * (d3.d
433 + v * (d5.d
434 + v * (d7.d
435 + v * (d9.d
436 + v * (d11.d + v * d13.d)))));
437 EADD (hpi.d, u, t2, cor);
438 t3 = ((hpi1.d + cor) + du) + zz;
439 if ((z = t2 + (t3 - u3.d)) == t2 + (t3 + u3.d))
440 return signArctan2 (y, z);
441
442 MUL2 (u, du, u, du, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
443 s1 = v * (f11.d
444 + v * (f13.d + v * (f15.d + v * (f17.d + v * f19.d))));
445 ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
446 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
447 ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
448 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
449 ADD2 (f5.d, ff5.d, s1, ss1, s2, ss2, t1, t2);
450 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
451 ADD2 (f3.d, ff3.d, s1, ss1, s2, ss2, t1, t2);
452 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
453 MUL2 (u, du, s1, ss1, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
454 ADD2 (u, du, s2, ss2, s1, ss1, t1, t2);
455 ADD2 (hpi.d, hpi1.d, s1, ss1, s2, ss2, t1, t2);
456
457 if ((z = s2 + (ss2 - u7.d)) == s2 + (ss2 + u7.d))
458 return signArctan2 (y, z);
459 return atan2Mp (x, y, pr);
460 }
461
462 i = (TWO52 + TWO8 * u) - TWO52;
463 i -= 16;
464 v = (u - cij[i][0].d) + du;
465 zz = hpi1.d + v * (cij[i][2].d
466 + v * (cij[i][3].d
467 + v * (cij[i][4].d
468 + v * (cij[i][5].d
469 + v * cij[i][6].d))));
470 t1 = hpi.d + cij[i][1].d;
471 if (i < 112)
472 ua = ua1.d; /* w < 1/2 */
473 else
474 ua = ua2.d; /* w >= 1/2 */
475 if ((z = t1 + (zz - ua)) == t1 + (zz + ua))
476 return signArctan2 (y, z);
477
478 t1 = u - hij[i][0].d;
479 EADD (t1, du, v, vv);
480 s1 = v * (hij[i][11].d
481 + v * (hij[i][12].d
482 + v * (hij[i][13].d
483 + v * (hij[i][14].d
484 + v * hij[i][15].d))));
485 ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
486 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
487 ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
488 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
489 ADD2 (hij[i][5].d, hij[i][6].d, s1, ss1, s2, ss2, t1, t2);
490 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
491 ADD2 (hij[i][3].d, hij[i][4].d, s1, ss1, s2, ss2, t1, t2);
492 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
493 ADD2 (hij[i][1].d, hij[i][2].d, s1, ss1, s2, ss2, t1, t2);
494 ADD2 (hpi.d, hpi1.d, s2, ss2, s1, ss1, t1, t2);
495
496 if ((z = s1 + (ss1 - uc.d)) == s1 + (ss1 + uc.d))
497 return signArctan2 (y, z);
498 return atan2Mp (x, y, pr);
499 }
500
501 /* (iv) x<0, abs(y)<=abs(x): pi-atan(ax/ay) */
502 if (u < inv16.d)
503 {
504 v = u * u;
505 zz = u * v * (d3.d
506 + v * (d5.d
507 + v * (d7.d
508 + v * (d9.d + v * (d11.d + v * d13.d)))));
509 ESUB (opi.d, u, t2, cor);
510 t3 = ((opi1.d + cor) - du) - zz;
511 if ((z = t2 + (t3 - u4.d)) == t2 + (t3 + u4.d))
512 return signArctan2 (y, z);
513
514 MUL2 (u, du, u, du, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
515 s1 = v * (f11.d + v * (f13.d + v * (f15.d + v * (f17.d + v * f19.d))));
516 ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
517 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
518 ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
519 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
520 ADD2 (f5.d, ff5.d, s1, ss1, s2, ss2, t1, t2);
521 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
522 ADD2 (f3.d, ff3.d, s1, ss1, s2, ss2, t1, t2);
523 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
524 MUL2 (u, du, s1, ss1, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
525 ADD2 (u, du, s2, ss2, s1, ss1, t1, t2);
526 SUB2 (opi.d, opi1.d, s1, ss1, s2, ss2, t1, t2);
527
528 if ((z = s2 + (ss2 - u8.d)) == s2 + (ss2 + u8.d))
529 return signArctan2 (y, z);
530 return atan2Mp (x, y, pr);
531 }
532
533 i = (TWO52 + TWO8 * u) - TWO52;
534 i -= 16;
535 v = (u - cij[i][0].d) + du;
536 zz = opi1.d - v * (cij[i][2].d
537 + v * (cij[i][3].d
538 + v * (cij[i][4].d
539 + v * (cij[i][5].d + v * cij[i][6].d))));
540 t1 = opi.d - cij[i][1].d;
541 if (i < 112)
542 ua = ua1.d; /* w < 1/2 */
543 else
544 ua = ua2.d; /* w >= 1/2 */
545 if ((z = t1 + (zz - ua)) == t1 + (zz + ua))
546 return signArctan2 (y, z);
547
548 t1 = u - hij[i][0].d;
549
550 EADD (t1, du, v, vv);
551
552 s1 = v * (hij[i][11].d
553 + v * (hij[i][12].d
554 + v * (hij[i][13].d
555 + v * (hij[i][14].d + v * hij[i][15].d))));
556
557 ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
558 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
559 ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
560 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
561 ADD2 (hij[i][5].d, hij[i][6].d, s1, ss1, s2, ss2, t1, t2);
562 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
563 ADD2 (hij[i][3].d, hij[i][4].d, s1, ss1, s2, ss2, t1, t2);
564 MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
565 ADD2 (hij[i][1].d, hij[i][2].d, s1, ss1, s2, ss2, t1, t2);
566 SUB2 (opi.d, opi1.d, s2, ss2, s1, ss1, t1, t2);
567
568 if ((z = s1 + (ss1 - uc.d)) == s1 + (ss1 + uc.d))
569 return signArctan2 (y, z);
570 return atan2Mp (x, y, pr);
571}
572
573#ifndef __ieee754_atan2
574libm_alias_finite (__ieee754_atan2, __atan2)
575#endif
576
577/* Treat the Denormalized case */
578static double
579SECTION
580normalized (double ax, double ay, double y, double z)
581{
582 int p;
583 mp_no mpx, mpy, mpz, mperr, mpz2, mpt1;
584 p = 6;
585 __dbl_mp (ax, &mpx, p);
586 __dbl_mp (ay, &mpy, p);
587 __dvd (&mpy, &mpx, &mpz, p);
588 __dbl_mp (ue.d, &mpt1, p);
589 __mul (&mpz, &mpt1, &mperr, p);
590 __sub (&mpz, &mperr, &mpz2, p);
591 __mp_dbl (&mpz2, &z, p);
592 return signArctan2 (y, z);
593}
594
595/* Stage 3: Perform a multi-Precision computation */
596static double
597SECTION
598atan2Mp (double x, double y, const int pr[])
599{
600 double z1, z2;
601 int i, p;
602 mp_no mpx, mpy, mpz, mpz1, mpz2, mperr, mpt1;
603 for (i = 0; i < MM; i++)
604 {
605 p = pr[i];
606 __dbl_mp (x, &mpx, p);
607 __dbl_mp (y, &mpy, p);
608 __mpatan2 (&mpy, &mpx, &mpz, p);
609 __dbl_mp (ud[i].d, &mpt1, p);
610 __mul (&mpz, &mpt1, &mperr, p);
611 __add (&mpz, &mperr, &mpz1, p);
612 __sub (&mpz, &mperr, &mpz2, p);
613 __mp_dbl (&mpz1, &z1, p);
614 __mp_dbl (&mpz2, &z2, p);
615 if (z1 == z2)
616 {
617 LIBC_PROBE (slowatan2, 4, &p, &x, &y, &z1);
618 return z1;
619 }
620 }
621 LIBC_PROBE (slowatan2_inexact, 4, &p, &x, &y, &z1);
622 return z1; /*if impossible to do exact computing */
623}
624