aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/aarch64/fpu/cosf_advsimd.c
diff options
context:
space:
mode:
authorJoe Ramsay <Joe.Ramsay@arm.com>2023-06-28 12:19:36 +0100
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2023-06-30 09:04:10 +0100
commitaed39a3aa3ea68b14dce3395fb14b1416541e6c6 (patch)
tree38f866205e31b1bef745122636dbaa61922cb1cc /sysdeps/aarch64/fpu/cosf_advsimd.c
parent84e93afc734a3c30e35ed2d21466a44259ac577e (diff)
downloadglibc-aed39a3aa3ea68b14dce3395fb14b1416541e6c6.tar.xz
glibc-aed39a3aa3ea68b14dce3395fb14b1416541e6c6.zip
aarch64: Add vector implementations of cos routines
Replace the loop-over-scalar placeholder routines with optimised implementations from Arm Optimized Routines (AOR). Also add some headers containing utilities for aarch64 libmvec routines, and update libm-test-ulps. Data tables for new routines are used via a pointer with a barrier on it, in order to prevent overly aggressive constant inlining in GCC. This allows a single adrp, combined with offset loads, to be used for every constant in the table. Special-case handlers are marked NOINLINE in order to confine the save/restore overhead of switching from vector to normal calling standard. This way we only incur the extra memory access in the exceptional cases. NOINLINE definitions have been moved to math_private.h in order to reduce duplication. AOR exposes a config option, WANT_SIMD_EXCEPT, to enable selective masking (and later fixing up) of invalid lanes, in order to trigger fp exceptions correctly (AdvSIMD only). This is tested and maintained in AOR, however it is configured off at source level here for performance reasons. We keep the WANT_SIMD_EXCEPT blocks in routine sources to greatly simplify the upstreaming process from AOR to glibc. Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>
Diffstat (limited to 'sysdeps/aarch64/fpu/cosf_advsimd.c')
-rw-r--r--sysdeps/aarch64/fpu/cosf_advsimd.c77
1 files changed, 71 insertions, 6 deletions
diff --git a/sysdeps/aarch64/fpu/cosf_advsimd.c b/sysdeps/aarch64/fpu/cosf_advsimd.c
index 35bb81aead..f05dd2bcda 100644
--- a/sysdeps/aarch64/fpu/cosf_advsimd.c
+++ b/sysdeps/aarch64/fpu/cosf_advsimd.c
@@ -17,13 +17,78 @@
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
-#include <math.h>
+#include "v_math.h"
-#include "advsimd_utils.h"
+static const struct data
+{
+ float32x4_t poly[4];
+ float32x4_t range_val, inv_pi, half_pi, shift, pi_1, pi_2, pi_3;
+} data = {
+ /* 1.886 ulp error. */
+ .poly = { V4 (-0x1.555548p-3f), V4 (0x1.110df4p-7f), V4 (-0x1.9f42eap-13f),
+ V4 (0x1.5b2e76p-19f) },
+
+ .pi_1 = V4 (0x1.921fb6p+1f),
+ .pi_2 = V4 (-0x1.777a5cp-24f),
+ .pi_3 = V4 (-0x1.ee59dap-49f),
+
+ .inv_pi = V4 (0x1.45f306p-2f),
+ .shift = V4 (0x1.8p+23f),
+ .half_pi = V4 (0x1.921fb6p0f),
+ .range_val = V4 (0x1p20f)
+};
+
+#define C(i) d->poly[i]
+
+static float32x4_t VPCS_ATTR NOINLINE
+special_case (float32x4_t x, float32x4_t y, uint32x4_t odd, uint32x4_t cmp)
+{
+ /* Fall back to scalar code. */
+ y = vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
+ return v_call_f32 (cosf, x, y, cmp);
+}
-VPCS_ATTR
-float32x4_t
-V_NAME_F1 (cos) (float32x4_t x)
+float32x4_t VPCS_ATTR V_NAME_F1 (cos) (float32x4_t x)
{
- return v_call_f32 (cosf, x);
+ const struct data *d = ptr_barrier (&data);
+ float32x4_t n, r, r2, r3, y;
+ uint32x4_t odd, cmp;
+
+#if WANT_SIMD_EXCEPT
+ r = vabsq_f32 (x);
+ cmp = vcgeq_u32 (vreinterpretq_u32_f32 (r),
+ vreinterpretq_u32_f32 (d->range_val));
+ if (__glibc_unlikely (v_any_u32 (cmp)))
+ /* If fenv exceptions are to be triggered correctly, set any special lanes
+ to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by
+ special-case handler later. */
+ r = vbslq_f32 (cmp, v_f32 (1.0f), r);
+#else
+ cmp = vcageq_f32 (d->range_val, x);
+ cmp = vceqzq_u32 (cmp); /* cmp = ~cmp. */
+ r = x;
+#endif
+
+ /* n = rint((|x|+pi/2)/pi) - 0.5. */
+ n = vfmaq_f32 (d->shift, d->inv_pi, vaddq_f32 (r, d->half_pi));
+ odd = vshlq_n_u32 (vreinterpretq_u32_f32 (n), 31);
+ n = vsubq_f32 (n, d->shift);
+ n = vsubq_f32 (n, v_f32 (0.5f));
+
+ /* r = |x| - n*pi (range reduction into -pi/2 .. pi/2). */
+ r = vfmsq_f32 (r, d->pi_1, n);
+ r = vfmsq_f32 (r, d->pi_2, n);
+ r = vfmsq_f32 (r, d->pi_3, n);
+
+ /* y = sin(r). */
+ r2 = vmulq_f32 (r, r);
+ r3 = vmulq_f32 (r2, r);
+ y = vfmaq_f32 (C (2), C (3), r2);
+ y = vfmaq_f32 (C (1), y, r2);
+ y = vfmaq_f32 (C (0), y, r2);
+ y = vfmaq_f32 (r, y, r3);
+
+ if (__glibc_unlikely (v_any_u32 (cmp)))
+ return special_case (x, y, odd, cmp);
+ return vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
}