1/*
2 * IBM Accurate Mathematical Library
3 * written by International Business Machines Corp.
4 * Copyright (C) 2001-2016 Free Software Foundation, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
8 * the Free Software Foundation; either version 2.1 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19/*********************************************************************/
20/* MODULE_NAME: utan.c */
21/* */
22/* FUNCTIONS: utan */
23/* tanMp */
24/* */
25/* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */
26/* branred.c sincos32.c mptan.c */
27/* utan.tbl */
28/* */
29/* An ultimate tan routine. Given an IEEE double machine number x */
30/* it computes the correctly rounded (to nearest) value of tan(x). */
31/* Assumption: Machine arithmetic operations are performed in */
32/* round to nearest mode of IEEE 754 standard. */
33/* */
34/*********************************************************************/
35
36#include <errno.h>
37#include <float.h>
38#include "endian.h"
39#include <dla.h>
40#include "mpa.h"
41#include "MathLib.h"
42#include <math.h>
43#include <math_private.h>
44#include <fenv.h>
45#include <stap-probe.h>
46
47#ifndef SECTION
48# define SECTION
49#endif
50
51static double tanMp (double);
52void __mptan (double, mp_no *, int);
53
54double
55SECTION
56tan (double x)
57{
58#include "utan.h"
59#include "utan.tbl"
60
61 int ux, i, n;
62 double a, da, a2, b, db, c, dc, c1, cc1, c2, cc2, c3, cc3, fi, ffi, gi, pz,
63 s, sy, t, t1, t2, t3, t4, t7, t8, t9, t10, w, x2, xn, xx2, y, ya,
64 yya, z0, z, zz, z2, zz2;
65#ifndef DLA_FMS
66 double t5, t6;
67#endif
68 int p;
69 number num, v;
70 mp_no mpa, mpt1, mpt2;
71
72 double retval;
73
74 int __branred (double, double *, double *);
75 int __mpranred (double, mp_no *, int);
76
77 SET_RESTORE_ROUND_53BIT (FE_TONEAREST);
78
79 /* x=+-INF, x=NaN */
80 num.d = x;
81 ux = num.i[HIGH_HALF];
82 if ((ux & 0x7ff00000) == 0x7ff00000)
83 {
84 if ((ux & 0x7fffffff) == 0x7ff00000)
85 __set_errno (EDOM);
86 retval = x - x;
87 goto ret;
88 }
89
90 w = (x < 0.0) ? -x : x;
91
92 /* (I) The case abs(x) <= 1.259e-8 */
93 if (w <= g1.d)
94 {
95 math_check_force_underflow_nonneg (w);
96 retval = x;
97 goto ret;
98 }
99
100 /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
101 if (w <= g2.d)
102 {
103 /* First stage */
104 x2 = x * x;
105
106 t2 = d9.d + x2 * d11.d;
107 t2 = d7.d + x2 * t2;
108 t2 = d5.d + x2 * t2;
109 t2 = d3.d + x2 * t2;
110 t2 *= x * x2;
111
112 if ((y = x + (t2 - u1.d * t2)) == x + (t2 + u1.d * t2))
113 {
114 retval = y;
115 goto ret;
116 }
117
118 /* Second stage */
119 c1 = a25.d + x2 * a27.d;
120 c1 = a23.d + x2 * c1;
121 c1 = a21.d + x2 * c1;
122 c1 = a19.d + x2 * c1;
123 c1 = a17.d + x2 * c1;
124 c1 = a15.d + x2 * c1;
125 c1 *= x2;
126
127 EMULV (x, x, x2, xx2, t1, t2, t3, t4, t5);
128 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
129 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
130 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
131 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
132 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
133 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
134 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
135 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
136 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
137 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
138 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
139 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
140 MUL2 (x, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
141 ADD2 (x, 0.0, c2, cc2, c1, cc1, t1, t2);
142 if ((y = c1 + (cc1 - u2.d * c1)) == c1 + (cc1 + u2.d * c1))
143 {
144 retval = y;
145 goto ret;
146 }
147 retval = tanMp (x);
148 goto ret;
149 }
150
151 /* (III) The case 0.0608 < abs(x) <= 0.787 */
152 if (w <= g3.d)
153 {
154 /* First stage */
155 i = ((int) (mfftnhf.d + TWO8 * w));
156 z = w - xfg[i][0].d;
157 z2 = z * z;
158 s = (x < 0.0) ? -1 : 1;
159 pz = z + z * z2 * (e0.d + z2 * e1.d);
160 fi = xfg[i][1].d;
161 gi = xfg[i][2].d;
162 t2 = pz * (gi + fi) / (gi - pz);
163 if ((y = fi + (t2 - fi * u3.d)) == fi + (t2 + fi * u3.d))
164 {
165 retval = (s * y);
166 goto ret;
167 }
168 t3 = (t2 < 0.0) ? -t2 : t2;
169 t4 = fi * ua3.d + t3 * ub3.d;
170 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
171 {
172 retval = (s * y);
173 goto ret;
174 }
175
176 /* Second stage */
177 ffi = xfg[i][3].d;
178 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
179 EMULV (z, z, z2, zz2, t1, t2, t3, t4, t5);
180 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
181 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
182 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
183 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
184 MUL2 (z, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
185 ADD2 (z, 0.0, c2, cc2, c1, cc1, t1, t2);
186
187 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
188 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
189 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
190 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
191 t10);
192
193 if ((y = c3 + (cc3 - u4.d * c3)) == c3 + (cc3 + u4.d * c3))
194 {
195 retval = (s * y);
196 goto ret;
197 }
198 retval = tanMp (x);
199 goto ret;
200 }
201
202 /* (---) The case 0.787 < abs(x) <= 25 */
203 if (w <= g4.d)
204 {
205 /* Range reduction by algorithm i */
206 t = (x * hpinv.d + toint.d);
207 xn = t - toint.d;
208 v.d = t;
209 t1 = (x - xn * mp1.d) - xn * mp2.d;
210 n = v.i[LOW_HALF] & 0x00000001;
211 da = xn * mp3.d;
212 a = t1 - da;
213 da = (t1 - a) - da;
214 if (a < 0.0)
215 {
216 ya = -a;
217 yya = -da;
218 sy = -1;
219 }
220 else
221 {
222 ya = a;
223 yya = da;
224 sy = 1;
225 }
226
227 /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
228 if (ya <= gy1.d)
229 {
230 retval = tanMp (x);
231 goto ret;
232 }
233
234 /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
235 if (ya <= gy2.d)
236 {
237 a2 = a * a;
238 t2 = d9.d + a2 * d11.d;
239 t2 = d7.d + a2 * t2;
240 t2 = d5.d + a2 * t2;
241 t2 = d3.d + a2 * t2;
242 t2 = da + a * a2 * t2;
243
244 if (n)
245 {
246 /* First stage -cot */
247 EADD (a, t2, b, db);
248 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
249 t9, t10);
250 if ((y = c + (dc - u6.d * c)) == c + (dc + u6.d * c))
251 {
252 retval = (-y);
253 goto ret;
254 }
255 }
256 else
257 {
258 /* First stage tan */
259 if ((y = a + (t2 - u5.d * a)) == a + (t2 + u5.d * a))
260 {
261 retval = y;
262 goto ret;
263 }
264 }
265 /* Second stage */
266 /* Range reduction by algorithm ii */
267 t = (x * hpinv.d + toint.d);
268 xn = t - toint.d;
269 v.d = t;
270 t1 = (x - xn * mp1.d) - xn * mp2.d;
271 n = v.i[LOW_HALF] & 0x00000001;
272 da = xn * pp3.d;
273 t = t1 - da;
274 da = (t1 - t) - da;
275 t1 = xn * pp4.d;
276 a = t - t1;
277 da = ((t - a) - t1) + da;
278
279 /* Second stage */
280 EADD (a, da, t1, t2);
281 a = t1;
282 da = t2;
283 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
284
285 c1 = a25.d + x2 * a27.d;
286 c1 = a23.d + x2 * c1;
287 c1 = a21.d + x2 * c1;
288 c1 = a19.d + x2 * c1;
289 c1 = a17.d + x2 * c1;
290 c1 = a15.d + x2 * c1;
291 c1 *= x2;
292
293 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
294 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
295 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
296 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
297 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
298 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
299 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
300 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
301 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
302 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
303 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
304 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
305 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
306 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
307
308 if (n)
309 {
310 /* Second stage -cot */
311 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
312 t8, t9, t10);
313 if ((y = c2 + (cc2 - u8.d * c2)) == c2 + (cc2 + u8.d * c2))
314 {
315 retval = (-y);
316 goto ret;
317 }
318 }
319 else
320 {
321 /* Second stage tan */
322 if ((y = c1 + (cc1 - u7.d * c1)) == c1 + (cc1 + u7.d * c1))
323 {
324 retval = y;
325 goto ret;
326 }
327 }
328 retval = tanMp (x);
329 goto ret;
330 }
331
332 /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
333
334 /* First stage */
335 i = ((int) (mfftnhf.d + TWO8 * ya));
336 z = (z0 = (ya - xfg[i][0].d)) + yya;
337 z2 = z * z;
338 pz = z + z * z2 * (e0.d + z2 * e1.d);
339 fi = xfg[i][1].d;
340 gi = xfg[i][2].d;
341
342 if (n)
343 {
344 /* -cot */
345 t2 = pz * (fi + gi) / (fi + pz);
346 if ((y = gi - (t2 - gi * u10.d)) == gi - (t2 + gi * u10.d))
347 {
348 retval = (-sy * y);
349 goto ret;
350 }
351 t3 = (t2 < 0.0) ? -t2 : t2;
352 t4 = gi * ua10.d + t3 * ub10.d;
353 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
354 {
355 retval = (-sy * y);
356 goto ret;
357 }
358 }
359 else
360 {
361 /* tan */
362 t2 = pz * (gi + fi) / (gi - pz);
363 if ((y = fi + (t2 - fi * u9.d)) == fi + (t2 + fi * u9.d))
364 {
365 retval = (sy * y);
366 goto ret;
367 }
368 t3 = (t2 < 0.0) ? -t2 : t2;
369 t4 = fi * ua9.d + t3 * ub9.d;
370 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
371 {
372 retval = (sy * y);
373 goto ret;
374 }
375 }
376
377 /* Second stage */
378 ffi = xfg[i][3].d;
379 EADD (z0, yya, z, zz)
380 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
381 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
382 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
383 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
384 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
385 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
386 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
387 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
388
389 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
390 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
391 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
392
393 if (n)
394 {
395 /* -cot */
396 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
397 t10);
398 if ((y = c3 + (cc3 - u12.d * c3)) == c3 + (cc3 + u12.d * c3))
399 {
400 retval = (-sy * y);
401 goto ret;
402 }
403 }
404 else
405 {
406 /* tan */
407 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
408 t10);
409 if ((y = c3 + (cc3 - u11.d * c3)) == c3 + (cc3 + u11.d * c3))
410 {
411 retval = (sy * y);
412 goto ret;
413 }
414 }
415
416 retval = tanMp (x);
417 goto ret;
418 }
419
420 /* (---) The case 25 < abs(x) <= 1e8 */
421 if (w <= g5.d)
422 {
423 /* Range reduction by algorithm ii */
424 t = (x * hpinv.d + toint.d);
425 xn = t - toint.d;
426 v.d = t;
427 t1 = (x - xn * mp1.d) - xn * mp2.d;
428 n = v.i[LOW_HALF] & 0x00000001;
429 da = xn * pp3.d;
430 t = t1 - da;
431 da = (t1 - t) - da;
432 t1 = xn * pp4.d;
433 a = t - t1;
434 da = ((t - a) - t1) + da;
435 EADD (a, da, t1, t2);
436 a = t1;
437 da = t2;
438 if (a < 0.0)
439 {
440 ya = -a;
441 yya = -da;
442 sy = -1;
443 }
444 else
445 {
446 ya = a;
447 yya = da;
448 sy = 1;
449 }
450
451 /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
452 if (ya <= gy1.d)
453 {
454 retval = tanMp (x);
455 goto ret;
456 }
457
458 /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
459 if (ya <= gy2.d)
460 {
461 a2 = a * a;
462 t2 = d9.d + a2 * d11.d;
463 t2 = d7.d + a2 * t2;
464 t2 = d5.d + a2 * t2;
465 t2 = d3.d + a2 * t2;
466 t2 = da + a * a2 * t2;
467
468 if (n)
469 {
470 /* First stage -cot */
471 EADD (a, t2, b, db);
472 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
473 t9, t10);
474 if ((y = c + (dc - u14.d * c)) == c + (dc + u14.d * c))
475 {
476 retval = (-y);
477 goto ret;
478 }
479 }
480 else
481 {
482 /* First stage tan */
483 if ((y = a + (t2 - u13.d * a)) == a + (t2 + u13.d * a))
484 {
485 retval = y;
486 goto ret;
487 }
488 }
489
490 /* Second stage */
491 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
492 c1 = a25.d + x2 * a27.d;
493 c1 = a23.d + x2 * c1;
494 c1 = a21.d + x2 * c1;
495 c1 = a19.d + x2 * c1;
496 c1 = a17.d + x2 * c1;
497 c1 = a15.d + x2 * c1;
498 c1 *= x2;
499
500 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
501 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
502 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
503 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
504 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
505 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
506 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
507 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
508 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
509 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
510 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
511 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
512 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
513 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
514
515 if (n)
516 {
517 /* Second stage -cot */
518 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
519 t8, t9, t10);
520 if ((y = c2 + (cc2 - u16.d * c2)) == c2 + (cc2 + u16.d * c2))
521 {
522 retval = (-y);
523 goto ret;
524 }
525 }
526 else
527 {
528 /* Second stage tan */
529 if ((y = c1 + (cc1 - u15.d * c1)) == c1 + (cc1 + u15.d * c1))
530 {
531 retval = (y);
532 goto ret;
533 }
534 }
535 retval = tanMp (x);
536 goto ret;
537 }
538
539 /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
540 /* First stage */
541 i = ((int) (mfftnhf.d + TWO8 * ya));
542 z = (z0 = (ya - xfg[i][0].d)) + yya;
543 z2 = z * z;
544 pz = z + z * z2 * (e0.d + z2 * e1.d);
545 fi = xfg[i][1].d;
546 gi = xfg[i][2].d;
547
548 if (n)
549 {
550 /* -cot */
551 t2 = pz * (fi + gi) / (fi + pz);
552 if ((y = gi - (t2 - gi * u18.d)) == gi - (t2 + gi * u18.d))
553 {
554 retval = (-sy * y);
555 goto ret;
556 }
557 t3 = (t2 < 0.0) ? -t2 : t2;
558 t4 = gi * ua18.d + t3 * ub18.d;
559 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
560 {
561 retval = (-sy * y);
562 goto ret;
563 }
564 }
565 else
566 {
567 /* tan */
568 t2 = pz * (gi + fi) / (gi - pz);
569 if ((y = fi + (t2 - fi * u17.d)) == fi + (t2 + fi * u17.d))
570 {
571 retval = (sy * y);
572 goto ret;
573 }
574 t3 = (t2 < 0.0) ? -t2 : t2;
575 t4 = fi * ua17.d + t3 * ub17.d;
576 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
577 {
578 retval = (sy * y);
579 goto ret;
580 }
581 }
582
583 /* Second stage */
584 ffi = xfg[i][3].d;
585 EADD (z0, yya, z, zz);
586 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
587 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
588 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
589 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
590 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
591 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
592 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
593 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
594
595 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
596 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
597 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
598
599 if (n)
600 {
601 /* -cot */
602 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
603 t10);
604 if ((y = c3 + (cc3 - u20.d * c3)) == c3 + (cc3 + u20.d * c3))
605 {
606 retval = (-sy * y);
607 goto ret;
608 }
609 }
610 else
611 {
612 /* tan */
613 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
614 t10);
615 if ((y = c3 + (cc3 - u19.d * c3)) == c3 + (cc3 + u19.d * c3))
616 {
617 retval = (sy * y);
618 goto ret;
619 }
620 }
621 retval = tanMp (x);
622 goto ret;
623 }
624
625 /* (---) The case 1e8 < abs(x) < 2**1024 */
626 /* Range reduction by algorithm iii */
627 n = (__branred (x, &a, &da)) & 0x00000001;
628 EADD (a, da, t1, t2);
629 a = t1;
630 da = t2;
631 if (a < 0.0)
632 {
633 ya = -a;
634 yya = -da;
635 sy = -1;
636 }
637 else
638 {
639 ya = a;
640 yya = da;
641 sy = 1;
642 }
643
644 /* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */
645 if (ya <= gy1.d)
646 {
647 retval = tanMp (x);
648 goto ret;
649 }
650
651 /* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */
652 if (ya <= gy2.d)
653 {
654 a2 = a * a;
655 t2 = d9.d + a2 * d11.d;
656 t2 = d7.d + a2 * t2;
657 t2 = d5.d + a2 * t2;
658 t2 = d3.d + a2 * t2;
659 t2 = da + a * a2 * t2;
660 if (n)
661 {
662 /* First stage -cot */
663 EADD (a, t2, b, db);
664 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8, t9,
665 t10);
666 if ((y = c + (dc - u22.d * c)) == c + (dc + u22.d * c))
667 {
668 retval = (-y);
669 goto ret;
670 }
671 }
672 else
673 {
674 /* First stage tan */
675 if ((y = a + (t2 - u21.d * a)) == a + (t2 + u21.d * a))
676 {
677 retval = y;
678 goto ret;
679 }
680 }
681
682 /* Second stage */
683 /* Reduction by algorithm iv */
684 p = 10;
685 n = (__mpranred (x, &mpa, p)) & 0x00000001;
686 __mp_dbl (&mpa, &a, p);
687 __dbl_mp (a, &mpt1, p);
688 __sub (&mpa, &mpt1, &mpt2, p);
689 __mp_dbl (&mpt2, &da, p);
690
691 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
692
693 c1 = a25.d + x2 * a27.d;
694 c1 = a23.d + x2 * c1;
695 c1 = a21.d + x2 * c1;
696 c1 = a19.d + x2 * c1;
697 c1 = a17.d + x2 * c1;
698 c1 = a15.d + x2 * c1;
699 c1 *= x2;
700
701 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
702 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
703 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
704 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
705 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
706 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
707 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
708 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
709 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
710 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
711 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
712 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
713 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
714 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
715
716 if (n)
717 {
718 /* Second stage -cot */
719 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8,
720 t9, t10);
721 if ((y = c2 + (cc2 - u24.d * c2)) == c2 + (cc2 + u24.d * c2))
722 {
723 retval = (-y);
724 goto ret;
725 }
726 }
727 else
728 {
729 /* Second stage tan */
730 if ((y = c1 + (cc1 - u23.d * c1)) == c1 + (cc1 + u23.d * c1))
731 {
732 retval = y;
733 goto ret;
734 }
735 }
736 retval = tanMp (x);
737 goto ret;
738 }
739
740 /* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */
741 /* First stage */
742 i = ((int) (mfftnhf.d + TWO8 * ya));
743 z = (z0 = (ya - xfg[i][0].d)) + yya;
744 z2 = z * z;
745 pz = z + z * z2 * (e0.d + z2 * e1.d);
746 fi = xfg[i][1].d;
747 gi = xfg[i][2].d;
748
749 if (n)
750 {
751 /* -cot */
752 t2 = pz * (fi + gi) / (fi + pz);
753 if ((y = gi - (t2 - gi * u26.d)) == gi - (t2 + gi * u26.d))
754 {
755 retval = (-sy * y);
756 goto ret;
757 }
758 t3 = (t2 < 0.0) ? -t2 : t2;
759 t4 = gi * ua26.d + t3 * ub26.d;
760 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
761 {
762 retval = (-sy * y);
763 goto ret;
764 }
765 }
766 else
767 {
768 /* tan */
769 t2 = pz * (gi + fi) / (gi - pz);
770 if ((y = fi + (t2 - fi * u25.d)) == fi + (t2 + fi * u25.d))
771 {
772 retval = (sy * y);
773 goto ret;
774 }
775 t3 = (t2 < 0.0) ? -t2 : t2;
776 t4 = fi * ua25.d + t3 * ub25.d;
777 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
778 {
779 retval = (sy * y);
780 goto ret;
781 }
782 }
783
784 /* Second stage */
785 ffi = xfg[i][3].d;
786 EADD (z0, yya, z, zz);
787 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
788 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
789 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
790 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
791 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
792 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
793 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
794 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
795
796 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
797 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
798 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
799
800 if (n)
801 {
802 /* -cot */
803 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
804 t10);
805 if ((y = c3 + (cc3 - u28.d * c3)) == c3 + (cc3 + u28.d * c3))
806 {
807 retval = (-sy * y);
808 goto ret;
809 }
810 }
811 else
812 {
813 /* tan */
814 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
815 t10);
816 if ((y = c3 + (cc3 - u27.d * c3)) == c3 + (cc3 + u27.d * c3))
817 {
818 retval = (sy * y);
819 goto ret;
820 }
821 }
822 retval = tanMp (x);
823 goto ret;
824
825ret:
826 return retval;
827}
828
829/* multiple precision stage */
830/* Convert x to multi precision number,compute tan(x) by mptan() routine */
831/* and converts result back to double */
832static double
833SECTION
834tanMp (double x)
835{
836 int p;
837 double y;
838 mp_no mpy;
839 p = 32;
840 __mptan (x, &mpy, p);
841 __mp_dbl (&mpy, &y, p);
842 LIBC_PROBE (slowtan, 2, &x, &y);
843 return y;
844}
845
846#ifdef NO_LONG_DOUBLE
847weak_alias (tan, tanl)
848#endif
849