1/*
2 * IBM Accurate Mathematical Library
3 * written by International Business Machines Corp.
4 * Copyright (C) 2001-2019 Free Software Foundation, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
8 * the Free Software Foundation; either version 2.1 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19/*********************************************************************/
20/* MODULE_NAME: utan.c */
21/* */
22/* FUNCTIONS: utan */
23/* tanMp */
24/* */
25/* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */
26/* branred.c sincos32.c mptan.c */
27/* utan.tbl */
28/* */
29/* An ultimate tan routine. Given an IEEE double machine number x */
30/* it computes the correctly rounded (to nearest) value of tan(x). */
31/* Assumption: Machine arithmetic operations are performed in */
32/* round to nearest mode of IEEE 754 standard. */
33/* */
34/*********************************************************************/
35
36#include <errno.h>
37#include <float.h>
38#include "endian.h"
39#include <dla.h>
40#include "mpa.h"
41#include "MathLib.h"
42#include <math.h>
43#include <math_private.h>
44#include <fenv_private.h>
45#include <math-underflow.h>
46#include <libm-alias-double.h>
47#include <fenv.h>
48#include <stap-probe.h>
49
50#ifndef SECTION
51# define SECTION
52#endif
53
54static double tanMp (double);
55void __mptan (double, mp_no *, int);
56
57double
58SECTION
59__tan (double x)
60{
61#include "utan.h"
62#include "utan.tbl"
63
64 int ux, i, n;
65 double a, da, a2, b, db, c, dc, c1, cc1, c2, cc2, c3, cc3, fi, ffi, gi, pz,
66 s, sy, t, t1, t2, t3, t4, t7, t8, t9, t10, w, x2, xn, xx2, y, ya,
67 yya, z0, z, zz, z2, zz2;
68#ifndef DLA_FMS
69 double t5, t6;
70#endif
71 int p;
72 number num, v;
73 mp_no mpa, mpt1, mpt2;
74
75 double retval;
76
77 int __branred (double, double *, double *);
78 int __mpranred (double, mp_no *, int);
79
80 SET_RESTORE_ROUND_53BIT (FE_TONEAREST);
81
82 /* x=+-INF, x=NaN */
83 num.d = x;
84 ux = num.i[HIGH_HALF];
85 if ((ux & 0x7ff00000) == 0x7ff00000)
86 {
87 if ((ux & 0x7fffffff) == 0x7ff00000)
88 __set_errno (EDOM);
89 retval = x - x;
90 goto ret;
91 }
92
93 w = (x < 0.0) ? -x : x;
94
95 /* (I) The case abs(x) <= 1.259e-8 */
96 if (w <= g1.d)
97 {
98 math_check_force_underflow_nonneg (w);
99 retval = x;
100 goto ret;
101 }
102
103 /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
104 if (w <= g2.d)
105 {
106 /* First stage */
107 x2 = x * x;
108
109 t2 = d9.d + x2 * d11.d;
110 t2 = d7.d + x2 * t2;
111 t2 = d5.d + x2 * t2;
112 t2 = d3.d + x2 * t2;
113 t2 *= x * x2;
114
115 if ((y = x + (t2 - u1.d * t2)) == x + (t2 + u1.d * t2))
116 {
117 retval = y;
118 goto ret;
119 }
120
121 /* Second stage */
122 c1 = a25.d + x2 * a27.d;
123 c1 = a23.d + x2 * c1;
124 c1 = a21.d + x2 * c1;
125 c1 = a19.d + x2 * c1;
126 c1 = a17.d + x2 * c1;
127 c1 = a15.d + x2 * c1;
128 c1 *= x2;
129
130 EMULV (x, x, x2, xx2, t1, t2, t3, t4, t5);
131 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
132 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
133 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
134 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
135 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
136 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
137 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
138 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
139 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
140 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
141 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
142 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
143 MUL2 (x, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
144 ADD2 (x, 0.0, c2, cc2, c1, cc1, t1, t2);
145 if ((y = c1 + (cc1 - u2.d * c1)) == c1 + (cc1 + u2.d * c1))
146 {
147 retval = y;
148 goto ret;
149 }
150 retval = tanMp (x);
151 goto ret;
152 }
153
154 /* (III) The case 0.0608 < abs(x) <= 0.787 */
155 if (w <= g3.d)
156 {
157 /* First stage */
158 i = ((int) (mfftnhf.d + TWO8 * w));
159 z = w - xfg[i][0].d;
160 z2 = z * z;
161 s = (x < 0.0) ? -1 : 1;
162 pz = z + z * z2 * (e0.d + z2 * e1.d);
163 fi = xfg[i][1].d;
164 gi = xfg[i][2].d;
165 t2 = pz * (gi + fi) / (gi - pz);
166 if ((y = fi + (t2 - fi * u3.d)) == fi + (t2 + fi * u3.d))
167 {
168 retval = (s * y);
169 goto ret;
170 }
171 t3 = (t2 < 0.0) ? -t2 : t2;
172 t4 = fi * ua3.d + t3 * ub3.d;
173 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
174 {
175 retval = (s * y);
176 goto ret;
177 }
178
179 /* Second stage */
180 ffi = xfg[i][3].d;
181 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
182 EMULV (z, z, z2, zz2, t1, t2, t3, t4, t5);
183 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
184 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
185 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
186 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
187 MUL2 (z, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
188 ADD2 (z, 0.0, c2, cc2, c1, cc1, t1, t2);
189
190 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
191 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
192 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
193 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
194 t10);
195
196 if ((y = c3 + (cc3 - u4.d * c3)) == c3 + (cc3 + u4.d * c3))
197 {
198 retval = (s * y);
199 goto ret;
200 }
201 retval = tanMp (x);
202 goto ret;
203 }
204
205 /* (---) The case 0.787 < abs(x) <= 25 */
206 if (w <= g4.d)
207 {
208 /* Range reduction by algorithm i */
209 t = (x * hpinv.d + toint.d);
210 xn = t - toint.d;
211 v.d = t;
212 t1 = (x - xn * mp1.d) - xn * mp2.d;
213 n = v.i[LOW_HALF] & 0x00000001;
214 da = xn * mp3.d;
215 a = t1 - da;
216 da = (t1 - a) - da;
217 if (a < 0.0)
218 {
219 ya = -a;
220 yya = -da;
221 sy = -1;
222 }
223 else
224 {
225 ya = a;
226 yya = da;
227 sy = 1;
228 }
229
230 /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
231 if (ya <= gy1.d)
232 {
233 retval = tanMp (x);
234 goto ret;
235 }
236
237 /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
238 if (ya <= gy2.d)
239 {
240 a2 = a * a;
241 t2 = d9.d + a2 * d11.d;
242 t2 = d7.d + a2 * t2;
243 t2 = d5.d + a2 * t2;
244 t2 = d3.d + a2 * t2;
245 t2 = da + a * a2 * t2;
246
247 if (n)
248 {
249 /* First stage -cot */
250 EADD (a, t2, b, db);
251 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
252 t9, t10);
253 if ((y = c + (dc - u6.d * c)) == c + (dc + u6.d * c))
254 {
255 retval = (-y);
256 goto ret;
257 }
258 }
259 else
260 {
261 /* First stage tan */
262 if ((y = a + (t2 - u5.d * a)) == a + (t2 + u5.d * a))
263 {
264 retval = y;
265 goto ret;
266 }
267 }
268 /* Second stage */
269 /* Range reduction by algorithm ii */
270 t = (x * hpinv.d + toint.d);
271 xn = t - toint.d;
272 v.d = t;
273 t1 = (x - xn * mp1.d) - xn * mp2.d;
274 n = v.i[LOW_HALF] & 0x00000001;
275 da = xn * pp3.d;
276 t = t1 - da;
277 da = (t1 - t) - da;
278 t1 = xn * pp4.d;
279 a = t - t1;
280 da = ((t - a) - t1) + da;
281
282 /* Second stage */
283 EADD (a, da, t1, t2);
284 a = t1;
285 da = t2;
286 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
287
288 c1 = a25.d + x2 * a27.d;
289 c1 = a23.d + x2 * c1;
290 c1 = a21.d + x2 * c1;
291 c1 = a19.d + x2 * c1;
292 c1 = a17.d + x2 * c1;
293 c1 = a15.d + x2 * c1;
294 c1 *= x2;
295
296 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
297 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
298 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
299 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
300 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
301 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
302 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
303 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
304 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
305 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
306 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
307 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
308 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
309 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
310
311 if (n)
312 {
313 /* Second stage -cot */
314 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
315 t8, t9, t10);
316 if ((y = c2 + (cc2 - u8.d * c2)) == c2 + (cc2 + u8.d * c2))
317 {
318 retval = (-y);
319 goto ret;
320 }
321 }
322 else
323 {
324 /* Second stage tan */
325 if ((y = c1 + (cc1 - u7.d * c1)) == c1 + (cc1 + u7.d * c1))
326 {
327 retval = y;
328 goto ret;
329 }
330 }
331 retval = tanMp (x);
332 goto ret;
333 }
334
335 /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
336
337 /* First stage */
338 i = ((int) (mfftnhf.d + TWO8 * ya));
339 z = (z0 = (ya - xfg[i][0].d)) + yya;
340 z2 = z * z;
341 pz = z + z * z2 * (e0.d + z2 * e1.d);
342 fi = xfg[i][1].d;
343 gi = xfg[i][2].d;
344
345 if (n)
346 {
347 /* -cot */
348 t2 = pz * (fi + gi) / (fi + pz);
349 if ((y = gi - (t2 - gi * u10.d)) == gi - (t2 + gi * u10.d))
350 {
351 retval = (-sy * y);
352 goto ret;
353 }
354 t3 = (t2 < 0.0) ? -t2 : t2;
355 t4 = gi * ua10.d + t3 * ub10.d;
356 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
357 {
358 retval = (-sy * y);
359 goto ret;
360 }
361 }
362 else
363 {
364 /* tan */
365 t2 = pz * (gi + fi) / (gi - pz);
366 if ((y = fi + (t2 - fi * u9.d)) == fi + (t2 + fi * u9.d))
367 {
368 retval = (sy * y);
369 goto ret;
370 }
371 t3 = (t2 < 0.0) ? -t2 : t2;
372 t4 = fi * ua9.d + t3 * ub9.d;
373 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
374 {
375 retval = (sy * y);
376 goto ret;
377 }
378 }
379
380 /* Second stage */
381 ffi = xfg[i][3].d;
382 EADD (z0, yya, z, zz)
383 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
384 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
385 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
386 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
387 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
388 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
389 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
390 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
391
392 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
393 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
394 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
395
396 if (n)
397 {
398 /* -cot */
399 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
400 t10);
401 if ((y = c3 + (cc3 - u12.d * c3)) == c3 + (cc3 + u12.d * c3))
402 {
403 retval = (-sy * y);
404 goto ret;
405 }
406 }
407 else
408 {
409 /* tan */
410 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
411 t10);
412 if ((y = c3 + (cc3 - u11.d * c3)) == c3 + (cc3 + u11.d * c3))
413 {
414 retval = (sy * y);
415 goto ret;
416 }
417 }
418
419 retval = tanMp (x);
420 goto ret;
421 }
422
423 /* (---) The case 25 < abs(x) <= 1e8 */
424 if (w <= g5.d)
425 {
426 /* Range reduction by algorithm ii */
427 t = (x * hpinv.d + toint.d);
428 xn = t - toint.d;
429 v.d = t;
430 t1 = (x - xn * mp1.d) - xn * mp2.d;
431 n = v.i[LOW_HALF] & 0x00000001;
432 da = xn * pp3.d;
433 t = t1 - da;
434 da = (t1 - t) - da;
435 t1 = xn * pp4.d;
436 a = t - t1;
437 da = ((t - a) - t1) + da;
438 EADD (a, da, t1, t2);
439 a = t1;
440 da = t2;
441 if (a < 0.0)
442 {
443 ya = -a;
444 yya = -da;
445 sy = -1;
446 }
447 else
448 {
449 ya = a;
450 yya = da;
451 sy = 1;
452 }
453
454 /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
455 if (ya <= gy1.d)
456 {
457 retval = tanMp (x);
458 goto ret;
459 }
460
461 /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
462 if (ya <= gy2.d)
463 {
464 a2 = a * a;
465 t2 = d9.d + a2 * d11.d;
466 t2 = d7.d + a2 * t2;
467 t2 = d5.d + a2 * t2;
468 t2 = d3.d + a2 * t2;
469 t2 = da + a * a2 * t2;
470
471 if (n)
472 {
473 /* First stage -cot */
474 EADD (a, t2, b, db);
475 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
476 t9, t10);
477 if ((y = c + (dc - u14.d * c)) == c + (dc + u14.d * c))
478 {
479 retval = (-y);
480 goto ret;
481 }
482 }
483 else
484 {
485 /* First stage tan */
486 if ((y = a + (t2 - u13.d * a)) == a + (t2 + u13.d * a))
487 {
488 retval = y;
489 goto ret;
490 }
491 }
492
493 /* Second stage */
494 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
495 c1 = a25.d + x2 * a27.d;
496 c1 = a23.d + x2 * c1;
497 c1 = a21.d + x2 * c1;
498 c1 = a19.d + x2 * c1;
499 c1 = a17.d + x2 * c1;
500 c1 = a15.d + x2 * c1;
501 c1 *= x2;
502
503 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
504 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
505 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
506 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
507 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
508 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
509 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
510 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
511 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
512 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
513 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
514 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
515 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
516 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
517
518 if (n)
519 {
520 /* Second stage -cot */
521 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
522 t8, t9, t10);
523 if ((y = c2 + (cc2 - u16.d * c2)) == c2 + (cc2 + u16.d * c2))
524 {
525 retval = (-y);
526 goto ret;
527 }
528 }
529 else
530 {
531 /* Second stage tan */
532 if ((y = c1 + (cc1 - u15.d * c1)) == c1 + (cc1 + u15.d * c1))
533 {
534 retval = (y);
535 goto ret;
536 }
537 }
538 retval = tanMp (x);
539 goto ret;
540 }
541
542 /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
543 /* First stage */
544 i = ((int) (mfftnhf.d + TWO8 * ya));
545 z = (z0 = (ya - xfg[i][0].d)) + yya;
546 z2 = z * z;
547 pz = z + z * z2 * (e0.d + z2 * e1.d);
548 fi = xfg[i][1].d;
549 gi = xfg[i][2].d;
550
551 if (n)
552 {
553 /* -cot */
554 t2 = pz * (fi + gi) / (fi + pz);
555 if ((y = gi - (t2 - gi * u18.d)) == gi - (t2 + gi * u18.d))
556 {
557 retval = (-sy * y);
558 goto ret;
559 }
560 t3 = (t2 < 0.0) ? -t2 : t2;
561 t4 = gi * ua18.d + t3 * ub18.d;
562 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
563 {
564 retval = (-sy * y);
565 goto ret;
566 }
567 }
568 else
569 {
570 /* tan */
571 t2 = pz * (gi + fi) / (gi - pz);
572 if ((y = fi + (t2 - fi * u17.d)) == fi + (t2 + fi * u17.d))
573 {
574 retval = (sy * y);
575 goto ret;
576 }
577 t3 = (t2 < 0.0) ? -t2 : t2;
578 t4 = fi * ua17.d + t3 * ub17.d;
579 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
580 {
581 retval = (sy * y);
582 goto ret;
583 }
584 }
585
586 /* Second stage */
587 ffi = xfg[i][3].d;
588 EADD (z0, yya, z, zz);
589 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
590 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
591 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
592 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
593 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
594 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
595 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
596 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
597
598 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
599 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
600 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
601
602 if (n)
603 {
604 /* -cot */
605 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
606 t10);
607 if ((y = c3 + (cc3 - u20.d * c3)) == c3 + (cc3 + u20.d * c3))
608 {
609 retval = (-sy * y);
610 goto ret;
611 }
612 }
613 else
614 {
615 /* tan */
616 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
617 t10);
618 if ((y = c3 + (cc3 - u19.d * c3)) == c3 + (cc3 + u19.d * c3))
619 {
620 retval = (sy * y);
621 goto ret;
622 }
623 }
624 retval = tanMp (x);
625 goto ret;
626 }
627
628 /* (---) The case 1e8 < abs(x) < 2**1024 */
629 /* Range reduction by algorithm iii */
630 n = (__branred (x, &a, &da)) & 0x00000001;
631 EADD (a, da, t1, t2);
632 a = t1;
633 da = t2;
634 if (a < 0.0)
635 {
636 ya = -a;
637 yya = -da;
638 sy = -1;
639 }
640 else
641 {
642 ya = a;
643 yya = da;
644 sy = 1;
645 }
646
647 /* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */
648 if (ya <= gy1.d)
649 {
650 retval = tanMp (x);
651 goto ret;
652 }
653
654 /* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */
655 if (ya <= gy2.d)
656 {
657 a2 = a * a;
658 t2 = d9.d + a2 * d11.d;
659 t2 = d7.d + a2 * t2;
660 t2 = d5.d + a2 * t2;
661 t2 = d3.d + a2 * t2;
662 t2 = da + a * a2 * t2;
663 if (n)
664 {
665 /* First stage -cot */
666 EADD (a, t2, b, db);
667 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8, t9,
668 t10);
669 if ((y = c + (dc - u22.d * c)) == c + (dc + u22.d * c))
670 {
671 retval = (-y);
672 goto ret;
673 }
674 }
675 else
676 {
677 /* First stage tan */
678 if ((y = a + (t2 - u21.d * a)) == a + (t2 + u21.d * a))
679 {
680 retval = y;
681 goto ret;
682 }
683 }
684
685 /* Second stage */
686 /* Reduction by algorithm iv */
687 p = 10;
688 n = (__mpranred (x, &mpa, p)) & 0x00000001;
689 __mp_dbl (&mpa, &a, p);
690 __dbl_mp (a, &mpt1, p);
691 __sub (&mpa, &mpt1, &mpt2, p);
692 __mp_dbl (&mpt2, &da, p);
693
694 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
695
696 c1 = a25.d + x2 * a27.d;
697 c1 = a23.d + x2 * c1;
698 c1 = a21.d + x2 * c1;
699 c1 = a19.d + x2 * c1;
700 c1 = a17.d + x2 * c1;
701 c1 = a15.d + x2 * c1;
702 c1 *= x2;
703
704 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
705 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
706 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
707 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
708 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
709 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
710 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
711 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
712 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
713 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
714 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
715 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
716 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
717 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
718
719 if (n)
720 {
721 /* Second stage -cot */
722 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8,
723 t9, t10);
724 if ((y = c2 + (cc2 - u24.d * c2)) == c2 + (cc2 + u24.d * c2))
725 {
726 retval = (-y);
727 goto ret;
728 }
729 }
730 else
731 {
732 /* Second stage tan */
733 if ((y = c1 + (cc1 - u23.d * c1)) == c1 + (cc1 + u23.d * c1))
734 {
735 retval = y;
736 goto ret;
737 }
738 }
739 retval = tanMp (x);
740 goto ret;
741 }
742
743 /* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */
744 /* First stage */
745 i = ((int) (mfftnhf.d + TWO8 * ya));
746 z = (z0 = (ya - xfg[i][0].d)) + yya;
747 z2 = z * z;
748 pz = z + z * z2 * (e0.d + z2 * e1.d);
749 fi = xfg[i][1].d;
750 gi = xfg[i][2].d;
751
752 if (n)
753 {
754 /* -cot */
755 t2 = pz * (fi + gi) / (fi + pz);
756 if ((y = gi - (t2 - gi * u26.d)) == gi - (t2 + gi * u26.d))
757 {
758 retval = (-sy * y);
759 goto ret;
760 }
761 t3 = (t2 < 0.0) ? -t2 : t2;
762 t4 = gi * ua26.d + t3 * ub26.d;
763 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
764 {
765 retval = (-sy * y);
766 goto ret;
767 }
768 }
769 else
770 {
771 /* tan */
772 t2 = pz * (gi + fi) / (gi - pz);
773 if ((y = fi + (t2 - fi * u25.d)) == fi + (t2 + fi * u25.d))
774 {
775 retval = (sy * y);
776 goto ret;
777 }
778 t3 = (t2 < 0.0) ? -t2 : t2;
779 t4 = fi * ua25.d + t3 * ub25.d;
780 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
781 {
782 retval = (sy * y);
783 goto ret;
784 }
785 }
786
787 /* Second stage */
788 ffi = xfg[i][3].d;
789 EADD (z0, yya, z, zz);
790 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
791 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
792 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
793 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
794 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
795 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
796 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
797 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
798
799 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
800 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
801 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
802
803 if (n)
804 {
805 /* -cot */
806 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
807 t10);
808 if ((y = c3 + (cc3 - u28.d * c3)) == c3 + (cc3 + u28.d * c3))
809 {
810 retval = (-sy * y);
811 goto ret;
812 }
813 }
814 else
815 {
816 /* tan */
817 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
818 t10);
819 if ((y = c3 + (cc3 - u27.d * c3)) == c3 + (cc3 + u27.d * c3))
820 {
821 retval = (sy * y);
822 goto ret;
823 }
824 }
825 retval = tanMp (x);
826 goto ret;
827
828ret:
829 return retval;
830}
831
832/* multiple precision stage */
833/* Convert x to multi precision number,compute tan(x) by mptan() routine */
834/* and converts result back to double */
835static double
836SECTION
837tanMp (double x)
838{
839 int p;
840 double y;
841 mp_no mpy;
842 p = 32;
843 __mptan (x, &mpy, p);
844 __mp_dbl (&mpy, &y, p);
845 LIBC_PROBE (slowtan, 2, &x, &y);
846 return y;
847}
848
849#ifndef __tan
850libm_alias_double (__tan, tan)
851#endif
852