1/*
2 * IBM Accurate Mathematical Library
3 * written by International Business Machines Corp.
4 * Copyright (C) 2001-2018 Free Software Foundation, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
8 * the Free Software Foundation; either version 2.1 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19/*********************************************************************/
20/* MODULE_NAME: utan.c */
21/* */
22/* FUNCTIONS: utan */
23/* tanMp */
24/* */
25/* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */
26/* branred.c sincos32.c mptan.c */
27/* utan.tbl */
28/* */
29/* An ultimate tan routine. Given an IEEE double machine number x */
30/* it computes the correctly rounded (to nearest) value of tan(x). */
31/* Assumption: Machine arithmetic operations are performed in */
32/* round to nearest mode of IEEE 754 standard. */
33/* */
34/*********************************************************************/
35
36#include <errno.h>
37#include <float.h>
38#include "endian.h"
39#include <dla.h>
40#include "mpa.h"
41#include "MathLib.h"
42#include <math.h>
43#include <math_private.h>
44#include <libm-alias-double.h>
45#include <fenv.h>
46#include <stap-probe.h>
47
48#ifndef SECTION
49# define SECTION
50#endif
51
52static double tanMp (double);
53void __mptan (double, mp_no *, int);
54
55double
56SECTION
57__tan (double x)
58{
59#include "utan.h"
60#include "utan.tbl"
61
62 int ux, i, n;
63 double a, da, a2, b, db, c, dc, c1, cc1, c2, cc2, c3, cc3, fi, ffi, gi, pz,
64 s, sy, t, t1, t2, t3, t4, t7, t8, t9, t10, w, x2, xn, xx2, y, ya,
65 yya, z0, z, zz, z2, zz2;
66#ifndef DLA_FMS
67 double t5, t6;
68#endif
69 int p;
70 number num, v;
71 mp_no mpa, mpt1, mpt2;
72
73 double retval;
74
75 int __branred (double, double *, double *);
76 int __mpranred (double, mp_no *, int);
77
78 SET_RESTORE_ROUND_53BIT (FE_TONEAREST);
79
80 /* x=+-INF, x=NaN */
81 num.d = x;
82 ux = num.i[HIGH_HALF];
83 if ((ux & 0x7ff00000) == 0x7ff00000)
84 {
85 if ((ux & 0x7fffffff) == 0x7ff00000)
86 __set_errno (EDOM);
87 retval = x - x;
88 goto ret;
89 }
90
91 w = (x < 0.0) ? -x : x;
92
93 /* (I) The case abs(x) <= 1.259e-8 */
94 if (w <= g1.d)
95 {
96 math_check_force_underflow_nonneg (w);
97 retval = x;
98 goto ret;
99 }
100
101 /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
102 if (w <= g2.d)
103 {
104 /* First stage */
105 x2 = x * x;
106
107 t2 = d9.d + x2 * d11.d;
108 t2 = d7.d + x2 * t2;
109 t2 = d5.d + x2 * t2;
110 t2 = d3.d + x2 * t2;
111 t2 *= x * x2;
112
113 if ((y = x + (t2 - u1.d * t2)) == x + (t2 + u1.d * t2))
114 {
115 retval = y;
116 goto ret;
117 }
118
119 /* Second stage */
120 c1 = a25.d + x2 * a27.d;
121 c1 = a23.d + x2 * c1;
122 c1 = a21.d + x2 * c1;
123 c1 = a19.d + x2 * c1;
124 c1 = a17.d + x2 * c1;
125 c1 = a15.d + x2 * c1;
126 c1 *= x2;
127
128 EMULV (x, x, x2, xx2, t1, t2, t3, t4, t5);
129 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
130 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
131 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
132 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
133 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
134 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
135 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
136 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
137 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
138 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
139 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
140 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
141 MUL2 (x, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
142 ADD2 (x, 0.0, c2, cc2, c1, cc1, t1, t2);
143 if ((y = c1 + (cc1 - u2.d * c1)) == c1 + (cc1 + u2.d * c1))
144 {
145 retval = y;
146 goto ret;
147 }
148 retval = tanMp (x);
149 goto ret;
150 }
151
152 /* (III) The case 0.0608 < abs(x) <= 0.787 */
153 if (w <= g3.d)
154 {
155 /* First stage */
156 i = ((int) (mfftnhf.d + TWO8 * w));
157 z = w - xfg[i][0].d;
158 z2 = z * z;
159 s = (x < 0.0) ? -1 : 1;
160 pz = z + z * z2 * (e0.d + z2 * e1.d);
161 fi = xfg[i][1].d;
162 gi = xfg[i][2].d;
163 t2 = pz * (gi + fi) / (gi - pz);
164 if ((y = fi + (t2 - fi * u3.d)) == fi + (t2 + fi * u3.d))
165 {
166 retval = (s * y);
167 goto ret;
168 }
169 t3 = (t2 < 0.0) ? -t2 : t2;
170 t4 = fi * ua3.d + t3 * ub3.d;
171 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
172 {
173 retval = (s * y);
174 goto ret;
175 }
176
177 /* Second stage */
178 ffi = xfg[i][3].d;
179 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
180 EMULV (z, z, z2, zz2, t1, t2, t3, t4, t5);
181 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
182 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
183 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
184 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
185 MUL2 (z, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
186 ADD2 (z, 0.0, c2, cc2, c1, cc1, t1, t2);
187
188 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
189 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
190 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
191 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
192 t10);
193
194 if ((y = c3 + (cc3 - u4.d * c3)) == c3 + (cc3 + u4.d * c3))
195 {
196 retval = (s * y);
197 goto ret;
198 }
199 retval = tanMp (x);
200 goto ret;
201 }
202
203 /* (---) The case 0.787 < abs(x) <= 25 */
204 if (w <= g4.d)
205 {
206 /* Range reduction by algorithm i */
207 t = (x * hpinv.d + toint.d);
208 xn = t - toint.d;
209 v.d = t;
210 t1 = (x - xn * mp1.d) - xn * mp2.d;
211 n = v.i[LOW_HALF] & 0x00000001;
212 da = xn * mp3.d;
213 a = t1 - da;
214 da = (t1 - a) - da;
215 if (a < 0.0)
216 {
217 ya = -a;
218 yya = -da;
219 sy = -1;
220 }
221 else
222 {
223 ya = a;
224 yya = da;
225 sy = 1;
226 }
227
228 /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
229 if (ya <= gy1.d)
230 {
231 retval = tanMp (x);
232 goto ret;
233 }
234
235 /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
236 if (ya <= gy2.d)
237 {
238 a2 = a * a;
239 t2 = d9.d + a2 * d11.d;
240 t2 = d7.d + a2 * t2;
241 t2 = d5.d + a2 * t2;
242 t2 = d3.d + a2 * t2;
243 t2 = da + a * a2 * t2;
244
245 if (n)
246 {
247 /* First stage -cot */
248 EADD (a, t2, b, db);
249 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
250 t9, t10);
251 if ((y = c + (dc - u6.d * c)) == c + (dc + u6.d * c))
252 {
253 retval = (-y);
254 goto ret;
255 }
256 }
257 else
258 {
259 /* First stage tan */
260 if ((y = a + (t2 - u5.d * a)) == a + (t2 + u5.d * a))
261 {
262 retval = y;
263 goto ret;
264 }
265 }
266 /* Second stage */
267 /* Range reduction by algorithm ii */
268 t = (x * hpinv.d + toint.d);
269 xn = t - toint.d;
270 v.d = t;
271 t1 = (x - xn * mp1.d) - xn * mp2.d;
272 n = v.i[LOW_HALF] & 0x00000001;
273 da = xn * pp3.d;
274 t = t1 - da;
275 da = (t1 - t) - da;
276 t1 = xn * pp4.d;
277 a = t - t1;
278 da = ((t - a) - t1) + da;
279
280 /* Second stage */
281 EADD (a, da, t1, t2);
282 a = t1;
283 da = t2;
284 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
285
286 c1 = a25.d + x2 * a27.d;
287 c1 = a23.d + x2 * c1;
288 c1 = a21.d + x2 * c1;
289 c1 = a19.d + x2 * c1;
290 c1 = a17.d + x2 * c1;
291 c1 = a15.d + x2 * c1;
292 c1 *= x2;
293
294 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
295 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
296 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
297 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
298 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
299 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
300 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
301 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
302 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
303 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
304 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
305 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
306 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
307 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
308
309 if (n)
310 {
311 /* Second stage -cot */
312 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
313 t8, t9, t10);
314 if ((y = c2 + (cc2 - u8.d * c2)) == c2 + (cc2 + u8.d * c2))
315 {
316 retval = (-y);
317 goto ret;
318 }
319 }
320 else
321 {
322 /* Second stage tan */
323 if ((y = c1 + (cc1 - u7.d * c1)) == c1 + (cc1 + u7.d * c1))
324 {
325 retval = y;
326 goto ret;
327 }
328 }
329 retval = tanMp (x);
330 goto ret;
331 }
332
333 /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
334
335 /* First stage */
336 i = ((int) (mfftnhf.d + TWO8 * ya));
337 z = (z0 = (ya - xfg[i][0].d)) + yya;
338 z2 = z * z;
339 pz = z + z * z2 * (e0.d + z2 * e1.d);
340 fi = xfg[i][1].d;
341 gi = xfg[i][2].d;
342
343 if (n)
344 {
345 /* -cot */
346 t2 = pz * (fi + gi) / (fi + pz);
347 if ((y = gi - (t2 - gi * u10.d)) == gi - (t2 + gi * u10.d))
348 {
349 retval = (-sy * y);
350 goto ret;
351 }
352 t3 = (t2 < 0.0) ? -t2 : t2;
353 t4 = gi * ua10.d + t3 * ub10.d;
354 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
355 {
356 retval = (-sy * y);
357 goto ret;
358 }
359 }
360 else
361 {
362 /* tan */
363 t2 = pz * (gi + fi) / (gi - pz);
364 if ((y = fi + (t2 - fi * u9.d)) == fi + (t2 + fi * u9.d))
365 {
366 retval = (sy * y);
367 goto ret;
368 }
369 t3 = (t2 < 0.0) ? -t2 : t2;
370 t4 = fi * ua9.d + t3 * ub9.d;
371 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
372 {
373 retval = (sy * y);
374 goto ret;
375 }
376 }
377
378 /* Second stage */
379 ffi = xfg[i][3].d;
380 EADD (z0, yya, z, zz)
381 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
382 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
383 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
384 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
385 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
386 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
387 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
388 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
389
390 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
391 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
392 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
393
394 if (n)
395 {
396 /* -cot */
397 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
398 t10);
399 if ((y = c3 + (cc3 - u12.d * c3)) == c3 + (cc3 + u12.d * c3))
400 {
401 retval = (-sy * y);
402 goto ret;
403 }
404 }
405 else
406 {
407 /* tan */
408 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
409 t10);
410 if ((y = c3 + (cc3 - u11.d * c3)) == c3 + (cc3 + u11.d * c3))
411 {
412 retval = (sy * y);
413 goto ret;
414 }
415 }
416
417 retval = tanMp (x);
418 goto ret;
419 }
420
421 /* (---) The case 25 < abs(x) <= 1e8 */
422 if (w <= g5.d)
423 {
424 /* Range reduction by algorithm ii */
425 t = (x * hpinv.d + toint.d);
426 xn = t - toint.d;
427 v.d = t;
428 t1 = (x - xn * mp1.d) - xn * mp2.d;
429 n = v.i[LOW_HALF] & 0x00000001;
430 da = xn * pp3.d;
431 t = t1 - da;
432 da = (t1 - t) - da;
433 t1 = xn * pp4.d;
434 a = t - t1;
435 da = ((t - a) - t1) + da;
436 EADD (a, da, t1, t2);
437 a = t1;
438 da = t2;
439 if (a < 0.0)
440 {
441 ya = -a;
442 yya = -da;
443 sy = -1;
444 }
445 else
446 {
447 ya = a;
448 yya = da;
449 sy = 1;
450 }
451
452 /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
453 if (ya <= gy1.d)
454 {
455 retval = tanMp (x);
456 goto ret;
457 }
458
459 /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
460 if (ya <= gy2.d)
461 {
462 a2 = a * a;
463 t2 = d9.d + a2 * d11.d;
464 t2 = d7.d + a2 * t2;
465 t2 = d5.d + a2 * t2;
466 t2 = d3.d + a2 * t2;
467 t2 = da + a * a2 * t2;
468
469 if (n)
470 {
471 /* First stage -cot */
472 EADD (a, t2, b, db);
473 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
474 t9, t10);
475 if ((y = c + (dc - u14.d * c)) == c + (dc + u14.d * c))
476 {
477 retval = (-y);
478 goto ret;
479 }
480 }
481 else
482 {
483 /* First stage tan */
484 if ((y = a + (t2 - u13.d * a)) == a + (t2 + u13.d * a))
485 {
486 retval = y;
487 goto ret;
488 }
489 }
490
491 /* Second stage */
492 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
493 c1 = a25.d + x2 * a27.d;
494 c1 = a23.d + x2 * c1;
495 c1 = a21.d + x2 * c1;
496 c1 = a19.d + x2 * c1;
497 c1 = a17.d + x2 * c1;
498 c1 = a15.d + x2 * c1;
499 c1 *= x2;
500
501 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
502 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
503 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
504 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
505 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
506 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
507 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
508 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
509 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
510 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
511 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
512 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
513 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
514 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
515
516 if (n)
517 {
518 /* Second stage -cot */
519 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
520 t8, t9, t10);
521 if ((y = c2 + (cc2 - u16.d * c2)) == c2 + (cc2 + u16.d * c2))
522 {
523 retval = (-y);
524 goto ret;
525 }
526 }
527 else
528 {
529 /* Second stage tan */
530 if ((y = c1 + (cc1 - u15.d * c1)) == c1 + (cc1 + u15.d * c1))
531 {
532 retval = (y);
533 goto ret;
534 }
535 }
536 retval = tanMp (x);
537 goto ret;
538 }
539
540 /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
541 /* First stage */
542 i = ((int) (mfftnhf.d + TWO8 * ya));
543 z = (z0 = (ya - xfg[i][0].d)) + yya;
544 z2 = z * z;
545 pz = z + z * z2 * (e0.d + z2 * e1.d);
546 fi = xfg[i][1].d;
547 gi = xfg[i][2].d;
548
549 if (n)
550 {
551 /* -cot */
552 t2 = pz * (fi + gi) / (fi + pz);
553 if ((y = gi - (t2 - gi * u18.d)) == gi - (t2 + gi * u18.d))
554 {
555 retval = (-sy * y);
556 goto ret;
557 }
558 t3 = (t2 < 0.0) ? -t2 : t2;
559 t4 = gi * ua18.d + t3 * ub18.d;
560 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
561 {
562 retval = (-sy * y);
563 goto ret;
564 }
565 }
566 else
567 {
568 /* tan */
569 t2 = pz * (gi + fi) / (gi - pz);
570 if ((y = fi + (t2 - fi * u17.d)) == fi + (t2 + fi * u17.d))
571 {
572 retval = (sy * y);
573 goto ret;
574 }
575 t3 = (t2 < 0.0) ? -t2 : t2;
576 t4 = fi * ua17.d + t3 * ub17.d;
577 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
578 {
579 retval = (sy * y);
580 goto ret;
581 }
582 }
583
584 /* Second stage */
585 ffi = xfg[i][3].d;
586 EADD (z0, yya, z, zz);
587 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
588 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
589 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
590 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
591 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
592 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
593 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
594 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
595
596 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
597 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
598 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
599
600 if (n)
601 {
602 /* -cot */
603 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
604 t10);
605 if ((y = c3 + (cc3 - u20.d * c3)) == c3 + (cc3 + u20.d * c3))
606 {
607 retval = (-sy * y);
608 goto ret;
609 }
610 }
611 else
612 {
613 /* tan */
614 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
615 t10);
616 if ((y = c3 + (cc3 - u19.d * c3)) == c3 + (cc3 + u19.d * c3))
617 {
618 retval = (sy * y);
619 goto ret;
620 }
621 }
622 retval = tanMp (x);
623 goto ret;
624 }
625
626 /* (---) The case 1e8 < abs(x) < 2**1024 */
627 /* Range reduction by algorithm iii */
628 n = (__branred (x, &a, &da)) & 0x00000001;
629 EADD (a, da, t1, t2);
630 a = t1;
631 da = t2;
632 if (a < 0.0)
633 {
634 ya = -a;
635 yya = -da;
636 sy = -1;
637 }
638 else
639 {
640 ya = a;
641 yya = da;
642 sy = 1;
643 }
644
645 /* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */
646 if (ya <= gy1.d)
647 {
648 retval = tanMp (x);
649 goto ret;
650 }
651
652 /* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */
653 if (ya <= gy2.d)
654 {
655 a2 = a * a;
656 t2 = d9.d + a2 * d11.d;
657 t2 = d7.d + a2 * t2;
658 t2 = d5.d + a2 * t2;
659 t2 = d3.d + a2 * t2;
660 t2 = da + a * a2 * t2;
661 if (n)
662 {
663 /* First stage -cot */
664 EADD (a, t2, b, db);
665 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8, t9,
666 t10);
667 if ((y = c + (dc - u22.d * c)) == c + (dc + u22.d * c))
668 {
669 retval = (-y);
670 goto ret;
671 }
672 }
673 else
674 {
675 /* First stage tan */
676 if ((y = a + (t2 - u21.d * a)) == a + (t2 + u21.d * a))
677 {
678 retval = y;
679 goto ret;
680 }
681 }
682
683 /* Second stage */
684 /* Reduction by algorithm iv */
685 p = 10;
686 n = (__mpranred (x, &mpa, p)) & 0x00000001;
687 __mp_dbl (&mpa, &a, p);
688 __dbl_mp (a, &mpt1, p);
689 __sub (&mpa, &mpt1, &mpt2, p);
690 __mp_dbl (&mpt2, &da, p);
691
692 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
693
694 c1 = a25.d + x2 * a27.d;
695 c1 = a23.d + x2 * c1;
696 c1 = a21.d + x2 * c1;
697 c1 = a19.d + x2 * c1;
698 c1 = a17.d + x2 * c1;
699 c1 = a15.d + x2 * c1;
700 c1 *= x2;
701
702 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
703 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
704 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
705 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
706 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
707 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
708 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
709 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
710 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
711 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
712 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
713 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
714 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
715 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
716
717 if (n)
718 {
719 /* Second stage -cot */
720 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8,
721 t9, t10);
722 if ((y = c2 + (cc2 - u24.d * c2)) == c2 + (cc2 + u24.d * c2))
723 {
724 retval = (-y);
725 goto ret;
726 }
727 }
728 else
729 {
730 /* Second stage tan */
731 if ((y = c1 + (cc1 - u23.d * c1)) == c1 + (cc1 + u23.d * c1))
732 {
733 retval = y;
734 goto ret;
735 }
736 }
737 retval = tanMp (x);
738 goto ret;
739 }
740
741 /* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */
742 /* First stage */
743 i = ((int) (mfftnhf.d + TWO8 * ya));
744 z = (z0 = (ya - xfg[i][0].d)) + yya;
745 z2 = z * z;
746 pz = z + z * z2 * (e0.d + z2 * e1.d);
747 fi = xfg[i][1].d;
748 gi = xfg[i][2].d;
749
750 if (n)
751 {
752 /* -cot */
753 t2 = pz * (fi + gi) / (fi + pz);
754 if ((y = gi - (t2 - gi * u26.d)) == gi - (t2 + gi * u26.d))
755 {
756 retval = (-sy * y);
757 goto ret;
758 }
759 t3 = (t2 < 0.0) ? -t2 : t2;
760 t4 = gi * ua26.d + t3 * ub26.d;
761 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
762 {
763 retval = (-sy * y);
764 goto ret;
765 }
766 }
767 else
768 {
769 /* tan */
770 t2 = pz * (gi + fi) / (gi - pz);
771 if ((y = fi + (t2 - fi * u25.d)) == fi + (t2 + fi * u25.d))
772 {
773 retval = (sy * y);
774 goto ret;
775 }
776 t3 = (t2 < 0.0) ? -t2 : t2;
777 t4 = fi * ua25.d + t3 * ub25.d;
778 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
779 {
780 retval = (sy * y);
781 goto ret;
782 }
783 }
784
785 /* Second stage */
786 ffi = xfg[i][3].d;
787 EADD (z0, yya, z, zz);
788 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
789 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
790 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
791 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
792 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
793 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
794 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
795 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
796
797 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
798 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
799 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
800
801 if (n)
802 {
803 /* -cot */
804 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
805 t10);
806 if ((y = c3 + (cc3 - u28.d * c3)) == c3 + (cc3 + u28.d * c3))
807 {
808 retval = (-sy * y);
809 goto ret;
810 }
811 }
812 else
813 {
814 /* tan */
815 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
816 t10);
817 if ((y = c3 + (cc3 - u27.d * c3)) == c3 + (cc3 + u27.d * c3))
818 {
819 retval = (sy * y);
820 goto ret;
821 }
822 }
823 retval = tanMp (x);
824 goto ret;
825
826ret:
827 return retval;
828}
829
830/* multiple precision stage */
831/* Convert x to multi precision number,compute tan(x) by mptan() routine */
832/* and converts result back to double */
833static double
834SECTION
835tanMp (double x)
836{
837 int p;
838 double y;
839 mp_no mpy;
840 p = 32;
841 __mptan (x, &mpy, p);
842 __mp_dbl (&mpy, &y, p);
843 LIBC_PROBE (slowtan, 2, &x, &y);
844 return y;
845}
846
847#ifndef __tan
848libm_alias_double (__tan, tan)
849#endif
850