aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSzabolcs Nagy <szabolcs.nagy@arm.com>2018-06-13 17:57:20 +0100
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2018-09-19 10:04:51 +0100
commit424c4f60ed6190e2ea0e72e0873bf3ebcbbf5448 (patch)
tree52fbd60de3d3b1e99208b3018cf79ee8a230a878
parentdab9c3488e86d5304f3e4b778933760374494a82 (diff)
downloadglibc-424c4f60ed6190e2ea0e72e0873bf3ebcbbf5448.tar.xz
glibc-424c4f60ed6190e2ea0e72e0873bf3ebcbbf5448.zip
Add new pow implementation
The algorithm is exp(y * log(x)), where log(x) is computed with about 1.3*2^-68 relative error (1.5*2^-68 without fma), returning the result in two doubles, and the exp part uses the same algorithm (and lookup tables) as exp, but takes the input as two doubles and a sign (to handle negative bases with odd integer exponent). The __exp1 internal symbol is no longer necessary. There is separate code path when fma is not available but the worst case error is about 0.54 ULP in both cases. The lookup table and consts for log are 4168 bytes. The .rodata+.text is decreased by 37908 bytes on aarch64. The non-nearest rounding error is less than 1 ULP. Improvements on Cortex-A72 compared to current glibc master: pow thruput: 2.40x in [0.01 11.1]x[0.01 11.1] pow latency: 1.84x in [0.01 11.1]x[0.01 11.1] Tested on aarch64-linux-gnu (defined __FP_FAST_FMA, TOINT_INTRINSICS) and arm-linux-gnueabihf (!defined __FP_FAST_FMA, !TOINT_INTRINSICS) and x86_64-linux-gnu (!defined __FP_FAST_FMA, !TOINT_INTRINSICS) and powerpc64le-linux-gnu (defined __FP_FAST_FMA, !TOINT_INTRINSICS) targets. * NEWS: Mention pow improvements. * math/Makefile (type-double-routines): Add e_pow_log_data. * sysdeps/generic/math_private.h (__exp1): Remove. * sysdeps/i386/fpu/e_pow_log_data.c: New file. * sysdeps/ia64/fpu/e_pow_log_data.c: New file. * sysdeps/ieee754/dbl-64/Makefile (CFLAGS-e_pow.c): Allow fma contraction. * sysdeps/ieee754/dbl-64/e_exp.c (__exp1): Remove. (exp_inline): Remove. (__ieee754_exp): Only single double input is handled. * sysdeps/ieee754/dbl-64/e_pow.c: Rewrite. * sysdeps/ieee754/dbl-64/e_pow_log_data.c: New file. * sysdeps/ieee754/dbl-64/math_config.h (issignaling_inline): Define. (__pow_log_data): Define. * sysdeps/ieee754/dbl-64/upow.h: Remove. * sysdeps/ieee754/dbl-64/upow.tbl: Remove. * sysdeps/m68k/m680x0/fpu/e_pow_log_data.c: New file. * sysdeps/x86_64/fpu/multiarch/Makefile (CFLAGS-e_pow-fma.c): Allow fma contraction. (CFLAGS-e_pow-fma4.c): Likewise.
-rw-r--r--ChangeLog23
-rw-r--r--NEWS2
-rw-r--r--math/Makefile2
-rw-r--r--sysdeps/generic/math_private.h1
-rw-r--r--sysdeps/i386/fpu/e_pow_log_data.c1
-rw-r--r--sysdeps/ia64/fpu/e_pow_log_data.c1
-rw-r--r--sysdeps/ieee754/dbl-64/Makefile1
-rw-r--r--sysdeps/ieee754/dbl-64/e_exp.c35
-rw-r--r--sysdeps/ieee754/dbl-64/e_pow.c658
-rw-r--r--sysdeps/ieee754/dbl-64/e_pow_log_data.c195
-rw-r--r--sysdeps/ieee754/dbl-64/math_config.h22
-rw-r--r--sysdeps/ieee754/dbl-64/upow.h76
-rw-r--r--sysdeps/ieee754/dbl-64/upow.tbl10188
-rw-r--r--sysdeps/m68k/m680x0/fpu/e_pow_log_data.c1
-rw-r--r--sysdeps/x86_64/fpu/multiarch/Makefile4
15 files changed, 594 insertions, 10616 deletions
diff --git a/ChangeLog b/ChangeLog
index 0be5afdaa0..57ba532bd6 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,26 @@
+2018-09-19 Szabolcs Nagy <szabolcs.nagy@arm.com>
+
+ * NEWS: Mention pow improvements.
+ * math/Makefile (type-double-routines): Add e_pow_log_data.
+ * sysdeps/generic/math_private.h (__exp1): Remove.
+ * sysdeps/i386/fpu/e_pow_log_data.c: New file.
+ * sysdeps/ia64/fpu/e_pow_log_data.c: New file.
+ * sysdeps/ieee754/dbl-64/Makefile (CFLAGS-e_pow.c): Allow fma
+ contraction.
+ * sysdeps/ieee754/dbl-64/e_exp.c (__exp1): Remove.
+ (exp_inline): Remove.
+ (__ieee754_exp): Only single double input is handled.
+ * sysdeps/ieee754/dbl-64/e_pow.c: Rewrite.
+ * sysdeps/ieee754/dbl-64/e_pow_log_data.c: New file.
+ * sysdeps/ieee754/dbl-64/math_config.h (issignaling_inline): Define.
+ (__pow_log_data): Define.
+ * sysdeps/ieee754/dbl-64/upow.h: Remove.
+ * sysdeps/ieee754/dbl-64/upow.tbl: Remove.
+ * sysdeps/m68k/m680x0/fpu/e_pow_log_data.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/Makefile (CFLAGS-e_pow-fma.c): Allow fma
+ contraction.
+ (CFLAGS-e_pow-fma4.c): Likewise.
+
2018-09-18 Paul Eggert <eggert@cs.ucla.edu>
Simplify tzfile fstat failure code
diff --git a/NEWS b/NEWS
index f76ada94d3..53d7bd09b3 100644
--- a/NEWS
+++ b/NEWS
@@ -16,7 +16,7 @@ Major new features:
to set the install root if you wish to install into a non-default
configured location.
-* Optimized generic exp, exp2, log, log2, sinf, cosf, sincosf and tanf.
+* Optimized generic exp, exp2, log, log2, pow, sinf, cosf, sincosf and tanf.
* The reallocarray function is now declared under _DEFAULT_SOURCE, not just
for _GNU_SOURCE, to match BSD environments.
diff --git a/math/Makefile b/math/Makefile
index 2537b2a9ad..750492b381 100644
--- a/math/Makefile
+++ b/math/Makefile
@@ -128,7 +128,7 @@ type-double-suffix :=
type-double-routines := branred doasin dosincos mpa mpatan2 \
k_rem_pio2 mpatan mpsqrt mptan sincos32 \
sincostab math_err e_exp_data e_log_data \
- e_log2_data
+ e_log2_data e_pow_log_data
# float support
type-float-suffix := f
diff --git a/sysdeps/generic/math_private.h b/sysdeps/generic/math_private.h
index c79b65fa6e..d91b929562 100644
--- a/sysdeps/generic/math_private.h
+++ b/sysdeps/generic/math_private.h
@@ -225,7 +225,6 @@ do { \
/* Prototypes for functions of the IBM Accurate Mathematical Library. */
-extern double __exp1 (double __x, double __xx);
extern double __sin (double __x);
extern double __cos (double __x);
extern int __branred (double __x, double *__a, double *__aa);
diff --git a/sysdeps/i386/fpu/e_pow_log_data.c b/sysdeps/i386/fpu/e_pow_log_data.c
new file mode 100644
index 0000000000..1cc8931700
--- /dev/null
+++ b/sysdeps/i386/fpu/e_pow_log_data.c
@@ -0,0 +1 @@
+/* Not needed. */
diff --git a/sysdeps/ia64/fpu/e_pow_log_data.c b/sysdeps/ia64/fpu/e_pow_log_data.c
new file mode 100644
index 0000000000..1cc8931700
--- /dev/null
+++ b/sysdeps/ia64/fpu/e_pow_log_data.c
@@ -0,0 +1 @@
+/* Not needed. */
diff --git a/sysdeps/ieee754/dbl-64/Makefile b/sysdeps/ieee754/dbl-64/Makefile
index c965982fa5..78530b5966 100644
--- a/sysdeps/ieee754/dbl-64/Makefile
+++ b/sysdeps/ieee754/dbl-64/Makefile
@@ -2,5 +2,4 @@ ifeq ($(subdir),math)
# branred depends on precise IEEE double rounding
CFLAGS-branred.c += $(config-cflags-nofma)
CFLAGS-e_sqrt.c += $(config-cflags-nofma)
-CFLAGS-e_pow.c += $(config-cflags-nofma)
endif
diff --git a/sysdeps/ieee754/dbl-64/e_exp.c b/sysdeps/ieee754/dbl-64/e_exp.c
index 209f20b972..37fdafcfa0 100644
--- a/sysdeps/ieee754/dbl-64/e_exp.c
+++ b/sysdeps/ieee754/dbl-64/e_exp.c
@@ -85,10 +85,13 @@ top12 (double x)
return asuint64 (x) >> 52;
}
-/* Computes exp(x+xtail) where |xtail| < 2^-8/N and |xtail| <= |x|.
- If hastail is 0 then xtail is assumed to be 0 too. */
-static inline double
-exp_inline (double x, double xtail, int hastail)
+#ifndef SECTION
+# define SECTION
+#endif
+
+double
+SECTION
+__ieee754_exp (double x)
{
uint32_t abstop;
uint64_t ki, idx, top, sbits;
@@ -131,9 +134,6 @@ exp_inline (double x, double xtail, int hastail)
kd -= Shift;
#endif
r = x + kd * NegLn2hiN + kd * NegLn2loN;
- /* The code assumes 2^-200 < |xtail| < 2^-8/N. */
- if (hastail)
- r += xtail;
/* 2^(k/N) ~= scale * (1 + tail). */
idx = 2 * (ki % N);
top = ki << (52 - EXP_TABLE_BITS);
@@ -149,29 +149,10 @@ exp_inline (double x, double xtail, int hastail)
if (__glibc_unlikely (abstop == 0))
return specialcase (tmp, sbits, ki);
scale = asdouble (sbits);
- /* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there
+ /* Note: tmp == 0 or |tmp| > 2^-65 and scale > 2^-739, so there
is no spurious underflow here even without fma. */
return scale + scale * tmp;
}
-
-#ifndef SECTION
-# define SECTION
-#endif
-
-double
-SECTION
-__ieee754_exp (double x)
-{
- return exp_inline (x, 0, 0);
-}
#ifndef __ieee754_exp
strong_alias (__ieee754_exp, __exp_finite)
#endif
-
-/* Compute e^(x+xx). */
-double
-SECTION
-__exp1 (double x, double xx)
-{
- return exp_inline (x, xx, 1);
-}
diff --git a/sysdeps/ieee754/dbl-64/e_pow.c b/sysdeps/ieee754/dbl-64/e_pow.c
index 9bf29e5cb3..ba38bfefcb 100644
--- a/sysdeps/ieee754/dbl-64/e_pow.c
+++ b/sysdeps/ieee754/dbl-64/e_pow.c
@@ -1,360 +1,380 @@
-/*
- * IBM Accurate Mathematical Library
- * written by International Business Machines Corp.
- * Copyright (C) 2001-2018 Free Software Foundation, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-/***************************************************************************/
-/* MODULE_NAME: upow.c */
-/* */
-/* FUNCTIONS: upow */
-/* log1 */
-/* checkint */
-/* FILES NEEDED: dla.h endian.h mpa.h mydefs.h */
-/* root.tbl uexp.tbl upow.tbl */
-/* An ultimate power routine. Given two IEEE double machine numbers y,x */
-/* it computes the correctly rounded (to nearest) value of x^y. */
-/* Assumption: Machine arithmetic operations are performed in */
-/* round to nearest mode of IEEE 754 standard. */
-/* */
-/***************************************************************************/
+/* Double-precision x^y function.
+ Copyright (C) 2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
#include <math.h>
-#include "endian.h"
-#include "upow.h"
-#include <dla.h>
-#include "mydefs.h"
-#include "MathLib.h"
-#include "upow.tbl"
-#include <math_private.h>
-#include <fenv_private.h>
-#include <math-underflow.h>
-#include <fenv.h>
+#include <stdint.h>
+#include <math-barriers.h>
+#include <math-narrow-eval.h>
+#include "math_config.h"
-#ifndef SECTION
-# define SECTION
-#endif
+/*
+Worst-case error: 0.54 ULP (~= ulperr_exp + 1024*Ln2*relerr_log*2^53)
+relerr_log: 1.3 * 2^-68 (Relative error of log, 1.5 * 2^-68 without fma)
+ulperr_exp: 0.509 ULP (ULP error of exp, 0.511 ULP without fma)
+*/
-static const double huge = 1.0e300, tiny = 1.0e-300;
+#define T __pow_log_data.tab
+#define A __pow_log_data.poly
+#define Ln2hi __pow_log_data.ln2hi
+#define Ln2lo __pow_log_data.ln2lo
+#define N (1 << POW_LOG_TABLE_BITS)
+#define OFF 0x3fe6955500000000
-double __exp1 (double x, double xx);
-static double log1 (double x, double *delta);
-static int checkint (double x);
+/* Top 12 bits of a double (sign and exponent bits). */
+static inline uint32_t
+top12 (double x)
+{
+ return asuint64 (x) >> 52;
+}
-/* An ultimate power routine. Given two IEEE double machine numbers y, x it
- computes the correctly rounded (to nearest) value of X^y. */
-double
-SECTION
-__ieee754_pow (double x, double y)
+/* Compute y+TAIL = log(x) where the rounded result is y and TAIL has about
+ additional 15 bits precision. IX is the bit representation of x, but
+ normalized in the subnormal range using the sign bit for the exponent. */
+static inline double_t
+log_inline (uint64_t ix, double_t *tail)
{
- double z, a, aa, t, a1, a2, y1, y2;
- mynumber u, v;
- int k;
- int4 qx, qy;
- v.x = y;
- u.x = x;
- if (v.i[LOW_HALF] == 0)
- { /* of y */
- qx = u.i[HIGH_HALF] & 0x7fffffff;
- /* Is x a NaN? */
- if ((((qx == 0x7ff00000) && (u.i[LOW_HALF] != 0)) || (qx > 0x7ff00000))
- && (y != 0 || issignaling (x)))
- return x + x;
- if (y == 1.0)
- return x;
- if (y == 2.0)
- return x * x;
- if (y == -1.0)
- return 1.0 / x;
- if (y == 0)
- return 1.0;
- }
- /* else */
- if (((u.i[HIGH_HALF] > 0 && u.i[HIGH_HALF] < 0x7ff00000) || /* x>0 and not x->0 */
- (u.i[HIGH_HALF] == 0 && u.i[LOW_HALF] != 0)) &&
- /* 2^-1023< x<= 2^-1023 * 0x1.0000ffffffff */
- (v.i[HIGH_HALF] & 0x7fffffff) < 0x4ff00000)
- { /* if y<-1 or y>1 */
- double retval;
+ /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */
+ double_t z, r, y, invc, logc, logctail, kd, hi, t1, t2, lo, lo1, lo2, p;
+ uint64_t iz, tmp;
+ int k, i;
- {
- SET_RESTORE_ROUND (FE_TONEAREST);
+ /* x = 2^k z; where z is in range [OFF,2*OFF) and exact.
+ The range is split into N subintervals.
+ The ith subinterval contains z and c is near its center. */
+ tmp = ix - OFF;
+ i = (tmp >> (52 - POW_LOG_TABLE_BITS)) % N;
+ k = (int64_t) tmp >> 52; /* arithmetic shift */
+ iz = ix - (tmp & 0xfffULL << 52);
+ z = asdouble (iz);
+ kd = (double_t) k;
- /* Avoid internal underflow for tiny y. The exact value of y does
- not matter if |y| <= 2**-64. */
- if (fabs (y) < 0x1p-64)
- y = y < 0 ? -0x1p-64 : 0x1p-64;
- z = log1 (x, &aa); /* x^y =e^(y log (X)) */
- t = y * CN;
- y1 = t - (t - y);
- y2 = y - y1;
- t = z * CN;
- a1 = t - (t - z);
- a2 = (z - a1) + aa;
- a = y1 * a1;
- aa = y2 * a1 + y * a2;
- a1 = a + aa;
- a2 = (a - a1) + aa;
+ /* log(x) = k*Ln2 + log(c) + log1p(z/c-1). */
+ invc = T[i].invc;
+ logc = T[i].logc;
+ logctail = T[i].logctail;
- /* Maximum relative error RElog of log1 is 1.0e-21 (69.7 bits).
- Maximum relative error REexp of __exp1 is 1.0e-18 (59.8 bits).
- We actually compute exp ((1 + RElog) * log (x) * y) * (1 + REexp).
- Since RElog/REexp are tiny and log (x) * y is at most log (DBL_MAX),
- this is equivalent to pow (x, y) * (1 + 710 * RElog + REexp).
- So the relative error is 710 * 1.0e-21 + 1.0e-18 = 1.7e-18
- (59 bits). The worst-case ULP error is 0.515. */
+ /* Note: 1/c is j/N or j/N/2 where j is an integer in [N,2N) and
+ |z/c - 1| < 1/N, so r = z/c - 1 is exactly representible. */
+#ifdef __FP_FAST_FMA
+ r = __builtin_fma (z, invc, -1.0);
+#else
+ /* Split z such that rhi, rlo and rhi*rhi are exact and |rlo| <= |r|. */
+ double_t zhi = asdouble ((iz + (1ULL << 31)) & (-1ULL << 32));
+ double_t zlo = z - zhi;
+ double_t rhi = zhi * invc - 1.0;
+ double_t rlo = zlo * invc;
+ r = rhi + rlo;
+#endif
- retval = __exp1 (a1, a2);
- }
+ /* k*Ln2 + log(c) + r. */
+ t1 = kd * Ln2hi + logc;
+ t2 = t1 + r;
+ lo1 = kd * Ln2lo + logctail;
+ lo2 = t1 - t2 + r;
- if (isinf (retval))
- retval = huge * huge;
- else if (retval == 0)
- retval = tiny * tiny;
- else
- math_check_force_underflow_nonneg (retval);
- return retval;
- }
+ /* Evaluation is optimized assuming superscalar pipelined execution. */
+ double_t ar, ar2, ar3, lo3, lo4;
+ ar = A[0] * r; /* A[0] = -0.5. */
+ ar2 = r * ar;
+ ar3 = r * ar2;
+ /* k*Ln2 + log(c) + r + A[0]*r*r. */
+#ifdef __FP_FAST_FMA
+ hi = t2 + ar2;
+ lo3 = __builtin_fma (ar, r, -ar2);
+ lo4 = t2 - hi + ar2;
+#else
+ double_t arhi = A[0] * rhi;
+ double_t arhi2 = rhi * arhi;
+ hi = t2 + arhi2;
+ lo3 = rlo * (ar + arhi);
+ lo4 = t2 - hi + arhi2;
+#endif
+ /* p = log1p(r) - r - A[0]*r*r. */
+ p = (ar3
+ * (A[1] + r * A[2] + ar2 * (A[3] + r * A[4] + ar2 * (A[5] + r * A[6]))));
+ lo = lo1 + lo2 + lo3 + lo4 + p;
+ y = hi + lo;
+ *tail = hi - y + lo;
+ return y;
+}
+
+#undef N
+#undef T
+#define N (1 << EXP_TABLE_BITS)
+#define InvLn2N __exp_data.invln2N
+#define NegLn2hiN __exp_data.negln2hiN
+#define NegLn2loN __exp_data.negln2loN
+#define Shift __exp_data.shift
+#define T __exp_data.tab
+#define C2 __exp_data.poly[5 - EXP_POLY_ORDER]
+#define C3 __exp_data.poly[6 - EXP_POLY_ORDER]
+#define C4 __exp_data.poly[7 - EXP_POLY_ORDER]
+#define C5 __exp_data.poly[8 - EXP_POLY_ORDER]
+#define C6 __exp_data.poly[9 - EXP_POLY_ORDER]
- if (x == 0)
+/* Handle cases that may overflow or underflow when computing the result that
+ is scale*(1+TMP) without intermediate rounding. The bit representation of
+ scale is in SBITS, however it has a computed exponent that may have
+ overflown into the sign bit so that needs to be adjusted before using it as
+ a double. (int32_t)KI is the k used in the argument reduction and exponent
+ adjustment of scale, positive k here means the result may overflow and
+ negative k means the result may underflow. */
+static inline double
+specialcase (double_t tmp, uint64_t sbits, uint64_t ki)
+{
+ double_t scale, y;
+
+ if ((ki & 0x80000000) == 0)
+ {
+ /* k > 0, the exponent of scale might have overflowed by <= 460. */
+ sbits -= 1009ull << 52;
+ scale = asdouble (sbits);
+ y = 0x1p1009 * (scale + scale * tmp);
+ return check_oflow (y);
+ }
+ /* k < 0, need special care in the subnormal range. */
+ sbits += 1022ull << 52;
+ /* Note: sbits is signed scale. */
+ scale = asdouble (sbits);
+ y = scale + scale * tmp;
+ if (fabs (y) < 1.0)
{
- if (((v.i[HIGH_HALF] & 0x7fffffff) == 0x7ff00000 && v.i[LOW_HALF] != 0)
- || (v.i[HIGH_HALF] & 0x7fffffff) > 0x7ff00000) /* NaN */
- return y + y;
- if (fabs (y) > 1.0e20)
- return (y > 0) ? 0 : 1.0 / 0.0;
- k = checkint (y);
- if (k == -1)
- return y < 0 ? 1.0 / x : x;
- else
- return y < 0 ? 1.0 / 0.0 : 0.0; /* return 0 */
+ /* Round y to the right precision before scaling it into the subnormal
+ range to avoid double rounding that can cause 0.5+E/2 ulp error where
+ E is the worst-case ulp error outside the subnormal range. So this
+ is only useful if the goal is better than 1 ulp worst-case error. */
+ double_t hi, lo, one = 1.0;
+ if (y < 0.0)
+ one = -1.0;
+ lo = scale - y + scale * tmp;
+ hi = one + y;
+ lo = one - hi + y + lo;
+ y = math_narrow_eval (hi + lo) - one;
+ /* Fix the sign of 0. */
+ if (y == 0.0)
+ y = asdouble (sbits & 0x8000000000000000);
+ /* The underflow exception needs to be signaled explicitly. */
+ math_force_eval (math_opt_barrier (0x1p-1022) * 0x1p-1022);
}
+ y = 0x1p-1022 * y;
+ return check_uflow (y);
+}
- qx = u.i[HIGH_HALF] & 0x7fffffff; /* no sign */
- qy = v.i[HIGH_HALF] & 0x7fffffff; /* no sign */
+#define SIGN_BIAS (0x800 << EXP_TABLE_BITS)
- if (qx >= 0x7ff00000 && (qx > 0x7ff00000 || u.i[LOW_HALF] != 0)) /* NaN */
- return x + y;
- if (qy >= 0x7ff00000 && (qy > 0x7ff00000 || v.i[LOW_HALF] != 0)) /* NaN */
- return x == 1.0 && !issignaling (y) ? 1.0 : y + y;
+/* Computes sign*exp(x+xtail) where |xtail| < 2^-8/N and |xtail| <= |x|.
+ The sign_bias argument is SIGN_BIAS or 0 and sets the sign to -1 or 1. */
+static inline double
+exp_inline (double x, double xtail, uint32_t sign_bias)
+{
+ uint32_t abstop;
+ uint64_t ki, idx, top, sbits;
+ /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */
+ double_t kd, z, r, r2, scale, tail, tmp;
- /* if x<0 */
- if (u.i[HIGH_HALF] < 0)
+ abstop = top12 (x) & 0x7ff;
+ if (__glibc_unlikely (abstop - top12 (0x1p-54)
+ >= top12 (512.0) - top12 (0x1p-54)))
{
- k = checkint (y);
- if (k == 0)
+ if (abstop - top12 (0x1p-54) >= 0x80000000)
{
- if (qy == 0x7ff00000)
- {
- if (x == -1.0)
- return 1.0;
- else if (x > -1.0)
- return v.i[HIGH_HALF] < 0 ? INF.x : 0.0;
- else
- return v.i[HIGH_HALF] < 0 ? 0.0 : INF.x;
- }
- else if (qx == 0x7ff00000)
- return y < 0 ? 0.0 : INF.x;
- return (x - x) / (x - x); /* y not integer and x<0 */
+ /* Avoid spurious underflow for tiny x. */
+ /* Note: 0 is common input. */
+ double_t one = WANT_ROUNDING ? 1.0 + x : 1.0;
+ return sign_bias ? -one : one;
}
- else if (qx == 0x7ff00000)
+ if (abstop >= top12 (1024.0))
{
- if (k < 0)
- return y < 0 ? nZERO.x : nINF.x;
+ /* Note: inf and nan are already handled. */
+ if (asuint64 (x) >> 63)
+ return __math_uflow (sign_bias);
else
- return y < 0 ? 0.0 : INF.x;
- }
- /* if y even or odd */
- if (k == 1)
- return __ieee754_pow (-x, y);
- else
- {
- double retval;
- {
- SET_RESTORE_ROUND (FE_TONEAREST);
- retval = -__ieee754_pow (-x, y);
- }
- if (isinf (retv