diff options
| author | Joe Ramsay <Joe.Ramsay@arm.com> | 2023-10-05 17:10:51 +0100 |
|---|---|---|
| committer | Szabolcs Nagy <szabolcs.nagy@arm.com> | 2023-10-23 15:00:45 +0100 |
| commit | 067a34156c19fb3c53824e37d70820c0ce5b87b2 (patch) | |
| tree | c0e5649d7892db1a9fc195adc8b381c01ab77544 | |
| parent | a8e3ab3074d448ff3e58ac8f850d955dfed830ad (diff) | |
| download | glibc-067a34156c19fb3c53824e37d70820c0ce5b87b2.tar.xz glibc-067a34156c19fb3c53824e37d70820c0ce5b87b2.zip | |
aarch64: Add vector implementations of log10 routines
A table is also added, which is shared between AdvSIMD and SVE log10.
| -rw-r--r-- | sysdeps/aarch64/fpu/Makefile | 4 | ||||
| -rw-r--r-- | sysdeps/aarch64/fpu/Versions | 4 | ||||
| -rw-r--r-- | sysdeps/aarch64/fpu/bits/math-vector.h | 4 | ||||
| -rw-r--r-- | sysdeps/aarch64/fpu/log10_advsimd.c | 119 | ||||
| -rw-r--r-- | sysdeps/aarch64/fpu/log10_sve.c | 76 | ||||
| -rw-r--r-- | sysdeps/aarch64/fpu/log10f_advsimd.c | 82 | ||||
| -rw-r--r-- | sysdeps/aarch64/fpu/log10f_sve.c | 94 | ||||
| -rw-r--r-- | sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c | 1 | ||||
| -rw-r--r-- | sysdeps/aarch64/fpu/test-double-sve-wrappers.c | 1 | ||||
| -rw-r--r-- | sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c | 1 | ||||
| -rw-r--r-- | sysdeps/aarch64/fpu/test-float-sve-wrappers.c | 1 | ||||
| -rw-r--r-- | sysdeps/aarch64/fpu/v_log10_data.c | 175 | ||||
| -rw-r--r-- | sysdeps/aarch64/fpu/vecmath_config.h | 11 | ||||
| -rw-r--r-- | sysdeps/aarch64/libm-test-ulps | 8 | ||||
| -rw-r--r-- | sysdeps/unix/sysv/linux/aarch64/libmvec.abilist | 4 |
15 files changed, 584 insertions, 1 deletions
diff --git a/sysdeps/aarch64/fpu/Makefile b/sysdeps/aarch64/fpu/Makefile index c3f204ff0d..0a047a150d 100644 --- a/sysdeps/aarch64/fpu/Makefile +++ b/sysdeps/aarch64/fpu/Makefile @@ -2,6 +2,7 @@ libmvec-supported-funcs = cos \ exp \ exp2 \ log \ + log10 \ log2 \ sin \ tan @@ -18,7 +19,8 @@ libmvec-support = $(addsuffix f_advsimd,$(float-advsimd-funcs)) \ $(addsuffix _sve,$(double-sve-funcs)) \ v_log_data \ v_exp_data \ - v_log2_data + v_log2_data \ + v_log10_data endif sve-cflags = -march=armv8-a+sve diff --git a/sysdeps/aarch64/fpu/Versions b/sysdeps/aarch64/fpu/Versions index ffe62a6f65..358efed5ee 100644 --- a/sysdeps/aarch64/fpu/Versions +++ b/sysdeps/aarch64/fpu/Versions @@ -22,6 +22,10 @@ libmvec { _ZGVnN2v_exp2; _ZGVsMxv_exp2f; _ZGVsMxv_exp2; + _ZGVnN4v_log10f; + _ZGVnN2v_log10; + _ZGVsMxv_log10f; + _ZGVsMxv_log10; _ZGVnN4v_log2f; _ZGVnN2v_log2; _ZGVsMxv_log2f; diff --git a/sysdeps/aarch64/fpu/bits/math-vector.h b/sysdeps/aarch64/fpu/bits/math-vector.h index 92f214b194..59f2efa6d7 100644 --- a/sysdeps/aarch64/fpu/bits/math-vector.h +++ b/sysdeps/aarch64/fpu/bits/math-vector.h @@ -53,6 +53,7 @@ __vpcs __f32x4_t _ZGVnN4v_cosf (__f32x4_t); __vpcs __f32x4_t _ZGVnN4v_expf (__f32x4_t); __vpcs __f32x4_t _ZGVnN4v_exp2f (__f32x4_t); __vpcs __f32x4_t _ZGVnN4v_logf (__f32x4_t); +__vpcs __f32x4_t _ZGVnN4v_log10f (__f32x4_t); __vpcs __f32x4_t _ZGVnN4v_log2f (__f32x4_t); __vpcs __f32x4_t _ZGVnN4v_sinf (__f32x4_t); __vpcs __f32x4_t _ZGVnN4v_tanf (__f32x4_t); @@ -61,6 +62,7 @@ __vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t); __vpcs __f64x2_t _ZGVnN2v_exp (__f64x2_t); __vpcs __f64x2_t _ZGVnN2v_exp2 (__f64x2_t); __vpcs __f64x2_t _ZGVnN2v_log (__f64x2_t); +__vpcs __f64x2_t _ZGVnN2v_log10 (__f64x2_t); __vpcs __f64x2_t _ZGVnN2v_log2 (__f64x2_t); __vpcs __f64x2_t _ZGVnN2v_sin (__f64x2_t); __vpcs __f64x2_t _ZGVnN2v_tan (__f64x2_t); @@ -74,6 +76,7 @@ __sv_f32_t _ZGVsMxv_cosf (__sv_f32_t, __sv_bool_t); __sv_f32_t _ZGVsMxv_expf (__sv_f32_t, __sv_bool_t); __sv_f32_t _ZGVsMxv_exp2f (__sv_f32_t, __sv_bool_t); __sv_f32_t _ZGVsMxv_logf (__sv_f32_t, __sv_bool_t); +__sv_f32_t _ZGVsMxv_log10f (__sv_f32_t, __sv_bool_t); __sv_f32_t _ZGVsMxv_log2f (__sv_f32_t, __sv_bool_t); __sv_f32_t _ZGVsMxv_sinf (__sv_f32_t, __sv_bool_t); __sv_f32_t _ZGVsMxv_tanf (__sv_f32_t, __sv_bool_t); @@ -82,6 +85,7 @@ __sv_f64_t _ZGVsMxv_cos (__sv_f64_t, __sv_bool_t); __sv_f64_t _ZGVsMxv_exp (__sv_f64_t, __sv_bool_t); __sv_f64_t _ZGVsMxv_exp2 (__sv_f64_t, __sv_bool_t); __sv_f64_t _ZGVsMxv_log (__sv_f64_t, __sv_bool_t); +__sv_f64_t _ZGVsMxv_log10 (__sv_f64_t, __sv_bool_t); __sv_f64_t _ZGVsMxv_log2 (__sv_f64_t, __sv_bool_t); __sv_f64_t _ZGVsMxv_sin (__sv_f64_t, __sv_bool_t); __sv_f64_t _ZGVsMxv_tan (__sv_f64_t, __sv_bool_t); diff --git a/sysdeps/aarch64/fpu/log10_advsimd.c b/sysdeps/aarch64/fpu/log10_advsimd.c new file mode 100644 index 0000000000..05b509f134 --- /dev/null +++ b/sysdeps/aarch64/fpu/log10_advsimd.c @@ -0,0 +1,119 @@ +/* Double-precision vector (AdvSIMD) log10 function + + Copyright (C) 2023 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <https://www.gnu.org/licenses/>. */ + +#include "v_math.h" +#include "poly_advsimd_f64.h" + +#define N (1 << V_LOG10_TABLE_BITS) + +static const struct data +{ + uint64x2_t min_norm; + uint32x4_t special_bound; + float64x2_t poly[5]; + float64x2_t invln10, log10_2, ln2; + uint64x2_t sign_exp_mask; +} data = { + /* Computed from log coefficients divided by log(10) then rounded to double + precision. */ + .poly = { V2 (-0x1.bcb7b1526e506p-3), V2 (0x1.287a7636be1d1p-3), + V2 (-0x1.bcb7b158af938p-4), V2 (0x1.63c78734e6d07p-4), + V2 (-0x1.287461742fee4p-4) }, + .ln2 = V2 (0x1.62e42fefa39efp-1), + .invln10 = V2 (0x1.bcb7b1526e50ep-2), + .log10_2 = V2 (0x1.34413509f79ffp-2), + .min_norm = V2 (0x0010000000000000), /* asuint64(0x1p-1022). */ + .special_bound = V4 (0x7fe00000), /* asuint64(inf) - min_norm. */ + .sign_exp_mask = V2 (0xfff0000000000000), +}; + +#define Off v_u64 (0x3fe6900900000000) +#define IndexMask (N - 1) + +#define T(s, i) __v_log10_data.s[i] + +struct entry +{ + float64x2_t invc; + float64x2_t log10c; +}; + +static inline struct entry +lookup (uint64x2_t i) +{ + struct entry e; + uint64_t i0 = (i[0] >> (52 - V_LOG10_TABLE_BITS)) & IndexMask; + uint64_t i1 = (i[1] >> (52 - V_LOG10_TABLE_BITS)) & IndexMask; + float64x2_t e0 = vld1q_f64 (&__v_log10_data.table[i0].invc); + float64x2_t e1 = vld1q_f64 (&__v_log10_data.table[i1].invc); + e.invc = vuzp1q_f64 (e0, e1); + e.log10c = vuzp2q_f64 (e0, e1); + return e; +} + +static float64x2_t VPCS_ATTR NOINLINE +special_case (float64x2_t x, float64x2_t y, float64x2_t hi, float64x2_t r2, + uint32x2_t special) +{ + return v_call_f64 (log10, x, vfmaq_f64 (hi, r2, y), vmovl_u32 (special)); +} + +/* Fast implementation of double-precision vector log10 + is a slight modification of double-precision vector log. + Max ULP error: < 2.5 ulp (nearest rounding.) + Maximum measured at 2.46 ulp for x in [0.96, 0.97] + _ZGVnN2v_log10(0x1.13192407fcb46p+0) got 0x1.fff6be3cae4bbp-6 + want 0x1.fff6be3cae4b9p-6. */ +float64x2_t VPCS_ATTR V_NAME_D1 (log10) (float64x2_t x) +{ + const struct data *d = ptr_barrier (&data); + uint64x2_t ix = vreinterpretq_u64_f64 (x); + uint32x2_t special = vcge_u32 (vsubhn_u64 (ix, d->min_norm), + vget_low_u32 (d->special_bound)); + + /* x = 2^k z; where z is in range [OFF,2*OFF) and exact. + The range is split into N subintervals. + The ith subinterval contains z and c is near its center. */ + uint64x2_t tmp = vsubq_u64 (ix, Off); + int64x2_t k = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52); + uint64x2_t iz = vsubq_u64 (ix, vandq_u64 (tmp, d->sign_exp_mask)); + float64x2_t z = vreinterpretq_f64_u64 (iz); + + struct entry e = lookup (tmp); + + /* log10(x) = log1p(z/c-1)/log(10) + log10(c) + k*log10(2). */ + float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc); + float64x2_t kd = vcvtq_f64_s64 (k); + + /* hi = r / log(10) + log10(c) + k*log10(2). + Constants in v_log10_data.c are computed (in extended precision) as + e.log10c := e.logc * ivln10. */ + float64x2_t w = vfmaq_f64 (e.log10c, r, d->invln10); + + /* y = log10(1+r) + n * log10(2). */ + float64x2_t hi = vfmaq_f64 (w, kd, d->log10_2); + + /* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi. */ + float64x2_t r2 = vmulq_f64 (r, r); + float64x2_t y = v_pw_horner_4_f64 (r, r2, d->poly); + + if (__glibc_unlikely (v_any_u32h (special))) + return special_case (x, y, hi, r2, special); + return vfmaq_f64 (hi, r2, y); +} diff --git a/sysdeps/aarch64/fpu/log10_sve.c b/sysdeps/aarch64/fpu/log10_sve.c new file mode 100644 index 0000000000..91060ab4a2 --- /dev/null +++ b/sysdeps/aarch64/fpu/log10_sve.c @@ -0,0 +1,76 @@ +/* Double-precision vector (SVE) log10 function + + Copyright (C) 2023 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <https://www.gnu.org/licenses/>. */ + +#include "sv_math.h" +#include "poly_sve_f64.h" + +#define Min 0x0010000000000000 +#define Max 0x7ff0000000000000 +#define Thres 0x7fe0000000000000 /* Max - Min. */ +#define Off 0x3fe6900900000000 +#define N (1 << V_LOG10_TABLE_BITS) + +static svfloat64_t NOINLINE +special_case (svfloat64_t x, svfloat64_t y, svbool_t special) +{ + return sv_call_f64 (log10, x, y, special); +} + +/* SVE log10 algorithm. + Maximum measured error is 2.46 ulps. + SV_NAME_D1 (log10)(0x1.131956cd4b627p+0) got 0x1.fffbdf6eaa669p-6 + want 0x1.fffbdf6eaa667p-6. */ +svfloat64_t SV_NAME_D1 (log10) (svfloat64_t x, const svbool_t pg) +{ + svuint64_t ix = svreinterpret_u64 (x); + svbool_t special = svcmpge (pg, svsub_x (pg, ix, Min), Thres); + + /* x = 2^k z; where z is in range [Off,2*Off) and exact. + The range is split into N subintervals. + The ith subinterval contains z and c is near its center. */ + svuint64_t tmp = svsub_x (pg, ix, Off); + svuint64_t i = svlsr_x (pg, tmp, 51 - V_LOG10_TABLE_BITS); + i = svand_x (pg, i, (N - 1) << 1); + svfloat64_t k = svcvt_f64_x (pg, svasr_x (pg, svreinterpret_s64 (tmp), 52)); + svfloat64_t z = svreinterpret_f64 ( + svsub_x (pg, ix, svand_x (pg, tmp, 0xfffULL << 52))); + + /* log(x) = k*log(2) + log(c) + log(z/c). */ + svfloat64_t invc = svld1_gather_index (pg, &__v_log10_data.table[0].invc, i); + svfloat64_t logc + = svld1_gather_index (pg, &__v_log10_data.table[0].log10c, i); + + /* We approximate log(z/c) with a polynomial P(x) ~= log(x + 1): + r = z/c - 1 (we look up precomputed 1/c) + log(z/c) ~= P(r). */ + svfloat64_t r = svmad_x (pg, invc, z, -1.0); + + /* hi = log(c) + k*log(2). */ + svfloat64_t w = svmla_x (pg, logc, r, __v_log10_data.invln10); + svfloat64_t hi = svmla_x (pg, w, k, __v_log10_data.log10_2); + + /* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi. */ + svfloat64_t r2 = svmul_x (pg, r, r); + svfloat64_t y = sv_pw_horner_4_f64_x (pg, r, r2, __v_log10_data.poly); + + if (__glibc_unlikely (svptest_any (pg, special))) + return special_case (x, svmla_x (svnot_z (pg, special), hi, r2, y), + special); + return svmla_x (pg, hi, r2, y); +} diff --git a/sysdeps/aarch64/fpu/log10f_advsimd.c b/sysdeps/aarch64/fpu/log10f_advsimd.c new file mode 100644 index 0000000000..ba02060bbe --- /dev/null +++ b/sysdeps/aarch64/fpu/log10f_advsimd.c @@ -0,0 +1,82 @@ +/* Single-precision vector (AdvSIMD) log10 function + + Copyright (C) 2023 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <https://www.gnu.org/licenses/>. */ + +#include "v_math.h" +#include "poly_advsimd_f32.h" + +static const struct data +{ + uint32x4_t min_norm; + uint16x8_t special_bound; + float32x4_t poly[8]; + float32x4_t inv_ln10, ln2; + uint32x4_t off, mantissa_mask; +} data = { + /* Use order 9 for log10(1+x), i.e. order 8 for log10(1+x)/x, with x in + [-1/3, 1/3] (offset=2/3). Max. relative error: 0x1.068ee468p-25. */ + .poly = { V4 (-0x1.bcb79cp-3f), V4 (0x1.2879c8p-3f), V4 (-0x1.bcd472p-4f), + V4 (0x1.6408f8p-4f), V4 (-0x1.246f8p-4f), V4 (0x1.f0e514p-5f), + V4 (-0x1.0fc92cp-4f), V4 (0x1.f5f76ap-5f) }, + .ln2 = V4 (0x1.62e43p-1f), + .inv_ln10 = V4 (0x1.bcb7b2p-2f), + .min_norm = V4 (0x00800000), + .special_bound = V8 (0x7f00), /* asuint32(inf) - min_norm. */ + .off = V4 (0x3f2aaaab), /* 0.666667. */ + .mantissa_mask = V4 (0x007fffff), +}; + +static float32x4_t VPCS_ATTR NOINLINE +special_case (float32x4_t x, float32x4_t y, float32x4_t p, float32x4_t r2, + uint16x4_t cmp) +{ + /* Fall back to scalar code. */ + return v_call_f32 (log10f, x, vfmaq_f32 (y, p, r2), vmovl_u16 (cmp)); +} + +/* Fast implementation of AdvSIMD log10f, + uses a similar approach as AdvSIMD logf with the same offset (i.e., 2/3) and + an order 9 polynomial. + Maximum error: 3.305ulps (nearest rounding.) + _ZGVnN4v_log10f(0x1.555c16p+0) got 0x1.ffe2fap-4 + want 0x1.ffe2f4p-4. */ +float32x4_t VPCS_ATTR V_NAME_F1 (log10) (float32x4_t x) +{ + const struct data *d = ptr_barrier (&data); + uint32x4_t u = vreinterpretq_u32_f32 (x); + uint16x4_t special = vcge_u16 (vsubhn_u32 (u, d->min_norm), + vget_low_u16 (d->special_bound)); + + /* x = 2^n * (1+r), where 2/3 < 1+r < 4/3. */ + u = vsubq_u32 (u, d->off); + float32x4_t n = vcvtq_f32_s32 ( + vshrq_n_s32 (vreinterpretq_s32_u32 (u), 23)); /* signextend. */ + u = vaddq_u32 (vandq_u32 (u, d->mantissa_mask), d->off); + float32x4_t r = vsubq_f32 (vreinterpretq_f32_u32 (u), v_f32 (1.0f)); + + /* y = log10(1+r) + n * log10(2). */ + float32x4_t r2 = vmulq_f32 (r, r); + float32x4_t poly = v_pw_horner_7_f32 (r, r2, d->poly); + /* y = Log10(2) * n + poly * InvLn(10). */ + float32x4_t y = vfmaq_f32 (r, d->ln2, n); + y = vmulq_f32 (y, d->inv_ln10); + + if (__glibc_unlikely (v_any_u16h (special))) + return special_case (x, y, poly, r2, special); + return vfmaq_f32 (y, poly, r2); +} diff --git a/sysdeps/aarch64/fpu/log10f_sve.c b/sysdeps/aarch64/fpu/log10f_sve.c new file mode 100644 index 0000000000..8729b17ef9 --- /dev/null +++ b/sysdeps/aarch64/fpu/log10f_sve.c @@ -0,0 +1,94 @@ +/* Single-precision vector (SVE) log10 function + + Copyright (C) 2023 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <https://www.gnu.org/licenses/>. */ + +#include "sv_math.h" + +static const struct data +{ + float poly_0246[4]; + float poly_1357[4]; + float ln2, inv_ln10; +} data = { + .poly_1357 = { + /* Coefficients copied from the AdvSIMD routine, then rearranged so that coeffs + 1, 3, 5 and 7 can be loaded as a single quad-word, hence used with _lane + variant of MLA intrinsic. */ + 0x1.2879c8p-3f, 0x1.6408f8p-4f, 0x1.f0e514p-5f, 0x1.f5f76ap-5f + }, + .poly_0246 = { -0x1.bcb79cp-3f, -0x1.bcd472p-4f, -0x1.246f8p-4f, + -0x1.0fc92cp-4f }, + .ln2 = 0x1.62e43p-1f, + .inv_ln10 = 0x1.bcb7b2p-2f, +}; + +#define Min 0x00800000 +#define Max 0x7f800000 +#define Thres 0x7f000000 /* Max - Min. */ +#define Offset 0x3f2aaaab /* 0.666667. */ +#define MantissaMask 0x007fffff + +static svfloat32_t NOINLINE +special_case (svfloat32_t x, svfloat32_t y, svbool_t special) +{ + return sv_call_f32 (log10f, x, y, special); +} + +/* Optimised implementation of SVE log10f using the same algorithm and + polynomial as AdvSIMD log10f. + Maximum error is 3.31ulps: + SV_NAME_F1 (log10)(0x1.555c16p+0) got 0x1.ffe2fap-4 + want 0x1.ffe2f4p-4. */ +svfloat32_t SV_NAME_F1 (log10) (svfloat32_t x, const svbool_t pg) +{ + const struct data *d = ptr_barrier (&data); + svuint32_t ix = svreinterpret_u32 (x); + svbool_t special = svcmpge (pg, svsub_x (pg, ix, Min), Thres); + + /* x = 2^n * (1+r), where 2/3 < 1+r < 4/3. */ + ix = svsub_x (pg, ix, Offset); + svfloat32_t n = svcvt_f32_x ( + pg, svasr_x (pg, svreinterpret_s32 (ix), 23)); /* signextend. */ + ix = svand_x (pg, ix, MantissaMask); + ix = svadd_x (pg, ix, Offset); + svfloat32_t r = svsub_x (pg, svreinterpret_f32 (ix), 1.0f); + + /* y = log10(1+r) + n*log10(2) + log10(1+r) ~ r * InvLn(10) + P(r) + where P(r) is a polynomial. Use order 9 for log10(1+x), i.e. order 8 for + log10(1+x)/x, with x in [-1/3, 1/3] (offset=2/3). */ + svfloat32_t r2 = svmul_x (pg, r, r); + svfloat32_t r4 = svmul_x (pg, r2, r2); + svfloat32_t p_1357 = svld1rq (svptrue_b32 (), &d->poly_1357[0]); + svfloat32_t q_01 = svmla_lane (sv_f32 (d->poly_0246[0]), r, p_1357, 0); + svfloat32_t q_23 = svmla_lane (sv_f32 (d->poly_0246[1]), r, p_1357, 1); + svfloat32_t q_45 = svmla_lane (sv_f32 (d->poly_0246[2]), r, p_1357, 2); + svfloat32_t q_67 = svmla_lane (sv_f32 (d->poly_0246[3]), r, p_1357, 3); + svfloat32_t q_47 = svmla_x (pg, q_45, r2, q_67); + svfloat32_t q_03 = svmla_x (pg, q_01, r2, q_23); + svfloat32_t y = svmla_x (pg, q_03, r4, q_47); + + /* Using hi = Log10(2)*n + r*InvLn(10) is faster but less accurate. */ + svfloat32_t hi = svmla_x (pg, r, n, d->ln2); + hi = svmul_x (pg, hi, d->inv_ln10); + + if (__glibc_unlikely (svptest_any (pg, special))) + return special_case (x, svmla_x (svnot_z (pg, special), hi, r2, y), + special); + return svmla_x (pg, hi, r2, y); +} diff --git a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c index d30dcd6f95..8d05498ec9 100644 --- a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c +++ b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c @@ -27,6 +27,7 @@ VPCS_VECTOR_WRAPPER (cos_advsimd, _ZGVnN2v_cos) VPCS_VECTOR_WRAPPER (exp_advsimd, _ZGVnN2v_exp) VPCS_VECTOR_WRAPPER (exp2_advsimd, _ZGVnN2v_exp2) VPCS_VECTOR_WRAPPER (log_advsimd, _ZGVnN2v_log) +VPCS_VECTOR_WRAPPER (log10_advsimd, _ZGVnN2v_log10) VPCS_VECTOR_WRAPPER (log2_advsimd, _ZGVnN2v_log2) VPCS_VECTOR_WRAPPER (sin_advsimd, _ZGVnN2v_sin) VPCS_VECTOR_WRAPPER (tan_advsimd, _ZGVnN2v_tan) diff --git a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c index 22a8479100..b65bc6f1e6 100644 --- a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c +++ b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c @@ -36,6 +36,7 @@ SVE_VECTOR_WRAPPER (cos_sve, _ZGVsMxv_cos) SVE_VECTOR_WRAPPER (exp_sve, _ZGVsMxv_exp) SVE_VECTOR_WRAPPER (exp2_sve, _ZGVsMxv_exp2) SVE_VECTOR_WRAPPER (log_sve, _ZGVsMxv_log) +SVE_VECTOR_WRAPPER (log10_sve, _ZGVsMxv_log10) SVE_VECTOR_WRAPPER (log2_sve, _ZGVsMxv_log2) SVE_VECTOR_WRAPPER (sin_sve, _ZGVsMxv_sin) SVE_VECTOR_WRAPPER (tan_sve, _ZGVsMxv_tan) diff --git a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c index e8f7f47c67..6ced0d4488 100644 --- a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c +++ b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c @@ -27,6 +27,7 @@ VPCS_VECTOR_WRAPPER (cosf_advsimd, _ZGVnN4v_cosf) VPCS_VECTOR_WRAPPER (expf_advsimd, _ZGVnN4v_expf) VPCS_VECTOR_WRAPPER (exp2f_advsimd, _ZGVnN4v_exp2f) VPCS_VECTOR_WRAPPER (logf_advsimd, _ZGVnN4v_logf) +VPCS_VECTOR_WRAPPER (log10f_advsimd, _ZGVnN4v_log10f) VPCS_VECTOR_WRAPPER (log2f_advsimd, _ZGVnN4v_log2f) VPCS_VECTOR_WRAPPER (sinf_advsimd, _ZGVnN4v_sinf) VPCS_VECTOR_WRAPPER (tanf_advsimd, _ZGVnN4v_tanf) diff --git a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c index f5e9584265..2ed8d0659a 100644 --- a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c +++ b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c @@ -36,6 +36,7 @@ SVE_VECTOR_WRAPPER (cosf_sve, _ZGVsMxv_cosf) SVE_VECTOR_WRAPPER (expf_sve, _ZGVsMxv_expf) |
