aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoe Ramsay <Joe.Ramsay@arm.com>2023-10-05 17:10:50 +0100
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2023-10-23 15:00:45 +0100
commita8e3ab3074d448ff3e58ac8f850d955dfed830ad (patch)
treed5ce7009fed7e726fcf84b4d8b9b32b564aa90b1
parentb39e9db5e305365db0c347fd308b7c0d86e3507b (diff)
downloadglibc-a8e3ab3074d448ff3e58ac8f850d955dfed830ad.tar.xz
glibc-a8e3ab3074d448ff3e58ac8f850d955dfed830ad.zip
aarch64: Add vector implementations of log2 routines
A table is also added, which is shared between AdvSIMD and SVE log2.
-rw-r--r--sysdeps/aarch64/fpu/Makefile4
-rw-r--r--sysdeps/aarch64/fpu/Versions4
-rw-r--r--sysdeps/aarch64/fpu/bits/math-vector.h4
-rw-r--r--sysdeps/aarch64/fpu/log2_advsimd.c109
-rw-r--r--sysdeps/aarch64/fpu/log2_sve.c73
-rw-r--r--sysdeps/aarch64/fpu/log2f_advsimd.c77
-rw-r--r--sysdeps/aarch64/fpu/log2f_sve.c86
-rw-r--r--sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c1
-rw-r--r--sysdeps/aarch64/fpu/test-double-sve-wrappers.c1
-rw-r--r--sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c1
-rw-r--r--sysdeps/aarch64/fpu/test-float-sve-wrappers.c1
-rw-r--r--sysdeps/aarch64/fpu/v_log2_data.c165
-rw-r--r--sysdeps/aarch64/fpu/vecmath_config.h12
-rw-r--r--sysdeps/aarch64/libm-test-ulps8
-rw-r--r--sysdeps/unix/sysv/linux/aarch64/libmvec.abilist4
15 files changed, 549 insertions, 1 deletions
diff --git a/sysdeps/aarch64/fpu/Makefile b/sysdeps/aarch64/fpu/Makefile
index 9c7c768301..c3f204ff0d 100644
--- a/sysdeps/aarch64/fpu/Makefile
+++ b/sysdeps/aarch64/fpu/Makefile
@@ -2,6 +2,7 @@ libmvec-supported-funcs = cos \
exp \
exp2 \
log \
+ log2 \
sin \
tan
@@ -16,7 +17,8 @@ libmvec-support = $(addsuffix f_advsimd,$(float-advsimd-funcs)) \
$(addsuffix f_sve,$(float-sve-funcs)) \
$(addsuffix _sve,$(double-sve-funcs)) \
v_log_data \
- v_exp_data
+ v_exp_data \
+ v_log2_data
endif
sve-cflags = -march=armv8-a+sve
diff --git a/sysdeps/aarch64/fpu/Versions b/sysdeps/aarch64/fpu/Versions
index 05de4325d5..ffe62a6f65 100644
--- a/sysdeps/aarch64/fpu/Versions
+++ b/sysdeps/aarch64/fpu/Versions
@@ -22,6 +22,10 @@ libmvec {
_ZGVnN2v_exp2;
_ZGVsMxv_exp2f;
_ZGVsMxv_exp2;
+ _ZGVnN4v_log2f;
+ _ZGVnN2v_log2;
+ _ZGVsMxv_log2f;
+ _ZGVsMxv_log2;
_ZGVnN4v_tanf;
_ZGVnN2v_tan;
_ZGVsMxv_tanf;
diff --git a/sysdeps/aarch64/fpu/bits/math-vector.h b/sysdeps/aarch64/fpu/bits/math-vector.h
index 50921b22e5..92f214b194 100644
--- a/sysdeps/aarch64/fpu/bits/math-vector.h
+++ b/sysdeps/aarch64/fpu/bits/math-vector.h
@@ -53,6 +53,7 @@ __vpcs __f32x4_t _ZGVnN4v_cosf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_expf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_exp2f (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_logf (__f32x4_t);
+__vpcs __f32x4_t _ZGVnN4v_log2f (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_sinf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_tanf (__f32x4_t);
@@ -60,6 +61,7 @@ __vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_exp (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_exp2 (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_log (__f64x2_t);
+__vpcs __f64x2_t _ZGVnN2v_log2 (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_sin (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_tan (__f64x2_t);
@@ -72,6 +74,7 @@ __sv_f32_t _ZGVsMxv_cosf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_expf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_exp2f (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_logf (__sv_f32_t, __sv_bool_t);
+__sv_f32_t _ZGVsMxv_log2f (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_sinf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_tanf (__sv_f32_t, __sv_bool_t);
@@ -79,6 +82,7 @@ __sv_f64_t _ZGVsMxv_cos (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_exp (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_exp2 (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_log (__sv_f64_t, __sv_bool_t);
+__sv_f64_t _ZGVsMxv_log2 (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_sin (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_tan (__sv_f64_t, __sv_bool_t);
diff --git a/sysdeps/aarch64/fpu/log2_advsimd.c b/sysdeps/aarch64/fpu/log2_advsimd.c
new file mode 100644
index 0000000000..4f29924bfa
--- /dev/null
+++ b/sysdeps/aarch64/fpu/log2_advsimd.c
@@ -0,0 +1,109 @@
+/* Double-precision vector (AdvSIMD) exp2 function
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include "v_math.h"
+#include "poly_advsimd_f64.h"
+
+#define N (1 << V_LOG2_TABLE_BITS)
+
+static const struct data
+{
+ uint64x2_t min_norm;
+ uint32x4_t special_bound;
+ float64x2_t poly[5];
+ float64x2_t invln2;
+ uint64x2_t sign_exp_mask;
+} data = {
+ /* Each coefficient was generated to approximate log(r) for |r| < 0x1.fp-9
+ and N = 128, then scaled by log2(e) in extended precision and rounded back
+ to double precision. */
+ .poly = { V2 (-0x1.71547652b83p-1), V2 (0x1.ec709dc340953p-2),
+ V2 (-0x1.71547651c8f35p-2), V2 (0x1.2777ebe12dda5p-2),
+ V2 (-0x1.ec738d616fe26p-3) },
+ .invln2 = V2 (0x1.71547652b82fep0),
+ .min_norm = V2 (0x0010000000000000), /* asuint64(0x1p-1022). */
+ .special_bound = V4 (0x7fe00000), /* asuint64(inf) - min_norm. */
+ .sign_exp_mask = V2 (0xfff0000000000000),
+};
+
+#define Off v_u64 (0x3fe6900900000000)
+#define IndexMask (N - 1)
+
+struct entry
+{
+ float64x2_t invc;
+ float64x2_t log2c;
+};
+
+static inline struct entry
+lookup (uint64x2_t i)
+{
+ struct entry e;
+ uint64_t i0 = (i[0] >> (52 - V_LOG2_TABLE_BITS)) & IndexMask;
+ uint64_t i1 = (i[1] >> (52 - V_LOG2_TABLE_BITS)) & IndexMask;
+ float64x2_t e0 = vld1q_f64 (&__v_log2_data.table[i0].invc);
+ float64x2_t e1 = vld1q_f64 (&__v_log2_data.table[i1].invc);
+ e.invc = vuzp1q_f64 (e0, e1);
+ e.log2c = vuzp2q_f64 (e0, e1);
+ return e;
+}
+
+static float64x2_t VPCS_ATTR NOINLINE
+special_case (float64x2_t x, float64x2_t y, float64x2_t w, float64x2_t r2,
+ uint32x2_t special)
+{
+ return v_call_f64 (log2, x, vfmaq_f64 (w, r2, y), vmovl_u32 (special));
+}
+
+/* Double-precision vector log2 routine. Implements the same algorithm as
+ vector log10, with coefficients and table entries scaled in extended
+ precision. The maximum observed error is 2.58 ULP:
+ _ZGVnN2v_log2(0x1.0b556b093869bp+0) got 0x1.fffb34198d9dap-5
+ want 0x1.fffb34198d9ddp-5. */
+float64x2_t VPCS_ATTR V_NAME_D1 (log2) (float64x2_t x)
+{
+ const struct data *d = ptr_barrier (&data);
+ uint64x2_t ix = vreinterpretq_u64_f64 (x);
+ uint32x2_t special = vcge_u32 (vsubhn_u64 (ix, d->min_norm),
+ vget_low_u32 (d->special_bound));
+
+ /* x = 2^k z; where z is in range [Off,2*Off) and exact.
+ The range is split into N subintervals.
+ The ith subinterval contains z and c is near its center. */
+ uint64x2_t tmp = vsubq_u64 (ix, Off);
+ int64x2_t k = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52);
+ uint64x2_t iz = vsubq_u64 (ix, vandq_u64 (tmp, d->sign_exp_mask));
+ float64x2_t z = vreinterpretq_f64_u64 (iz);
+
+ struct entry e = lookup (tmp);
+
+ /* log2(x) = log1p(z/c-1)/log(2) + log2(c) + k. */
+
+ float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc);
+ float64x2_t kd = vcvtq_f64_s64 (k);
+ float64x2_t w = vfmaq_f64 (e.log2c, r, d->invln2);
+
+ float64x2_t r2 = vmulq_f64 (r, r);
+ float64x2_t y = v_pw_horner_4_f64 (r, r2, d->poly);
+ w = vaddq_f64 (kd, w);
+
+ if (__glibc_unlikely (v_any_u32h (special)))
+ return special_case (x, y, w, r2, special);
+ return vfmaq_f64 (w, r2, y);
+}
diff --git a/sysdeps/aarch64/fpu/log2_sve.c b/sysdeps/aarch64/fpu/log2_sve.c
new file mode 100644
index 0000000000..0ef6669fd5
--- /dev/null
+++ b/sysdeps/aarch64/fpu/log2_sve.c
@@ -0,0 +1,73 @@
+/* Double-precision vector (SVE) log2 function
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include "sv_math.h"
+#include "poly_sve_f64.h"
+
+#define N (1 << V_LOG2_TABLE_BITS)
+#define Off 0x3fe6900900000000
+#define Max (0x7ff0000000000000)
+#define Min (0x0010000000000000)
+#define Thresh (0x7fe0000000000000) /* Max - Min. */
+
+static svfloat64_t NOINLINE
+special_case (svfloat64_t x, svfloat64_t y, svbool_t cmp)
+{
+ return sv_call_f64 (log2, x, y, cmp);
+}
+
+/* Double-precision SVE log2 routine.
+ Implements the same algorithm as AdvSIMD log10, with coefficients and table
+ entries scaled in extended precision.
+ The maximum observed error is 2.58 ULP:
+ SV_NAME_D1 (log2)(0x1.0b556b093869bp+0) got 0x1.fffb34198d9dap-5
+ want 0x1.fffb34198d9ddp-5. */
+svfloat64_t SV_NAME_D1 (log2) (svfloat64_t x, const svbool_t pg)
+{
+ svuint64_t ix = svreinterpret_u64 (x);
+ svbool_t special = svcmpge (pg, svsub_x (pg, ix, Min), Thresh);
+
+ /* x = 2^k z; where z is in range [Off,2*Off) and exact.
+ The range is split into N subintervals.
+ The ith subinterval contains z and c is near its center. */
+ svuint64_t tmp = svsub_x (pg, ix, Off);
+ svuint64_t i = svlsr_x (pg, tmp, 51 - V_LOG2_TABLE_BITS);
+ i = svand_x (pg, i, (N - 1) << 1);
+ svfloat64_t k = svcvt_f64_x (pg, svasr_x (pg, svreinterpret_s64 (tmp), 52));
+ svfloat64_t z = svreinterpret_f64 (
+ svsub_x (pg, ix, svand_x (pg, tmp, 0xfffULL << 52)));
+
+ svfloat64_t invc = svld1_gather_index (pg, &__v_log2_data.table[0].invc, i);
+ svfloat64_t log2c
+ = svld1_gather_index (pg, &__v_log2_data.table[0].log2c, i);
+
+ /* log2(x) = log1p(z/c-1)/log(2) + log2(c) + k. */
+
+ svfloat64_t r = svmad_x (pg, invc, z, -1.0);
+ svfloat64_t w = svmla_x (pg, log2c, r, __v_log2_data.invln2);
+
+ svfloat64_t r2 = svmul_x (pg, r, r);
+ svfloat64_t y = sv_pw_horner_4_f64_x (pg, r, r2, __v_log2_data.poly);
+ w = svadd_x (pg, k, w);
+
+ if (__glibc_unlikely (svptest_any (pg, special)))
+ return special_case (x, svmla_x (svnot_z (pg, special), w, r2, y),
+ special);
+ return svmla_x (pg, w, r2, y);
+}
diff --git a/sysdeps/aarch64/fpu/log2f_advsimd.c b/sysdeps/aarch64/fpu/log2f_advsimd.c
new file mode 100644
index 0000000000..e913bcda18
--- /dev/null
+++ b/sysdeps/aarch64/fpu/log2f_advsimd.c
@@ -0,0 +1,77 @@
+/* Single-precision vector (AdvSIMD) exp2 function
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include "v_math.h"
+#include "poly_advsimd_f32.h"
+
+static const struct data
+{
+ uint32x4_t min_norm;
+ uint16x8_t special_bound;
+ uint32x4_t off, mantissa_mask;
+ float32x4_t poly[9];
+} data = {
+ /* Coefficients generated using Remez algorithm approximate
+ log2(1+r)/r for r in [ -1/3, 1/3 ].
+ rel error: 0x1.c4c4b0cp-26. */
+ .poly = { V4 (0x1.715476p0f), /* (float)(1 / ln(2)). */
+ V4 (-0x1.715458p-1f), V4 (0x1.ec701cp-2f), V4 (-0x1.7171a4p-2f),
+ V4 (0x1.27a0b8p-2f), V4 (-0x1.e5143ep-3f), V4 (0x1.9d8ecap-3f),
+ V4 (-0x1.c675bp-3f), V4 (0x1.9e495p-3f) },
+ .min_norm = V4 (0x00800000),
+ .special_bound = V8 (0x7f00), /* asuint32(inf) - min_norm. */
+ .off = V4 (0x3f2aaaab), /* 0.666667. */
+ .mantissa_mask = V4 (0x007fffff),
+};
+
+static float32x4_t VPCS_ATTR NOINLINE
+special_case (float32x4_t x, float32x4_t n, float32x4_t p, float32x4_t r,
+ uint16x4_t cmp)
+{
+ /* Fall back to scalar code. */
+ return v_call_f32 (log2f, x, vfmaq_f32 (n, p, r), vmovl_u16 (cmp));
+}
+
+/* Fast implementation for single precision AdvSIMD log2,
+ relies on same argument reduction as AdvSIMD logf.
+ Maximum error: 2.48 ULPs
+ _ZGVnN4v_log2f(0x1.558174p+0) got 0x1.a9be84p-2
+ want 0x1.a9be8p-2. */
+float32x4_t VPCS_ATTR V_NAME_F1 (log2) (float32x4_t x)
+{
+ const struct data *d = ptr_barrier (&data);
+ uint32x4_t u = vreinterpretq_u32_f32 (x);
+ uint16x4_t special = vcge_u16 (vsubhn_u32 (u, d->min_norm),
+ vget_low_u16 (d->special_bound));
+
+ /* x = 2^n * (1+r), where 2/3 < 1+r < 4/3. */
+ u = vsubq_u32 (u, d->off);
+ float32x4_t n = vcvtq_f32_s32 (
+ vshrq_n_s32 (vreinterpretq_s32_u32 (u), 23)); /* signextend. */
+ u = vaddq_u32 (vandq_u32 (u, d->mantissa_mask), d->off);
+ float32x4_t r = vsubq_f32 (vreinterpretq_f32_u32 (u), v_f32 (1.0f));
+
+ /* y = log2(1+r) + n. */
+ float32x4_t r2 = vmulq_f32 (r, r);
+ float32x4_t p = v_pw_horner_8_f32 (r, r2, d->poly);
+
+ if (__glibc_unlikely (v_any_u16h (special)))
+ return special_case (x, n, p, r, special);
+ return vfmaq_f32 (n, p, r);
+}
diff --git a/sysdeps/aarch64/fpu/log2f_sve.c b/sysdeps/aarch64/fpu/log2f_sve.c
new file mode 100644
index 0000000000..d00813ee24
--- /dev/null
+++ b/sysdeps/aarch64/fpu/log2f_sve.c
@@ -0,0 +1,86 @@
+/* Single-precision vector (SVE) log2 function
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include "sv_math.h"
+
+static const struct data
+{
+ float poly_02468[5];
+ float poly_1357[4];
+} data = {
+ .poly_1357 = {
+ /* Coefficients copied from the AdvSIMD routine, then rearranged so that coeffs
+ 1, 3, 5 and 7 can be loaded as a single quad-word, hence used with _lane
+ variant of MLA intrinsic. */
+ -0x1.715458p-1f, -0x1.7171a4p-2f, -0x1.e5143ep-3f, -0x1.c675bp-3f
+ },
+ .poly_02468 = { 0x1.715476p0f, 0x1.ec701cp-2f, 0x1.27a0b8p-2f,
+ 0x1.9d8ecap-3f, 0x1.9e495p-3f },
+};
+
+#define Min (0x00800000)
+#define Max (0x7f800000)
+#define Thres (0x7f000000) /* Max - Min. */
+#define MantissaMask (0x007fffff)
+#define Off (0x3f2aaaab) /* 0.666667. */
+
+static svfloat32_t NOINLINE
+special_case (svfloat32_t x, svfloat32_t y, svbool_t cmp)
+{
+ return sv_call_f32 (log2f, x, y, cmp);
+}
+
+/* Optimised implementation of SVE log2f, using the same algorithm
+ and polynomial as AdvSIMD log2f.
+ Maximum error is 2.48 ULPs:
+ SV_NAME_F1 (log2)(0x1.558174p+0) got 0x1.a9be84p-2
+ want 0x1.a9be8p-2. */
+svfloat32_t SV_NAME_F1 (log2) (svfloat32_t x, const svbool_t pg)
+{
+ const struct data *d = ptr_barrier (&data);
+
+ svuint32_t u = svreinterpret_u32 (x);
+ svbool_t special = svcmpge (pg, svsub_x (pg, u, Min), Thres);
+
+ /* x = 2^n * (1+r), where 2/3 < 1+r < 4/3. */
+ u = svsub_x (pg, u, Off);
+ svfloat32_t n = svcvt_f32_x (
+ pg, svasr_x (pg, svreinterpret_s32 (u), 23)); /* Sign-extend. */
+ u = svand_x (pg, u, MantissaMask);
+ u = svadd_x (pg, u, Off);
+ svfloat32_t r = svsub_x (pg, svreinterpret_f32 (u), 1.0f);
+
+ /* y = log2(1+r) + n. */
+ svfloat32_t r2 = svmul_x (pg, r, r);
+
+ /* Evaluate polynomial using pairwise Horner scheme. */
+ svfloat32_t p_1357 = svld1rq (svptrue_b32 (), &d->poly_1357[0]);
+ svfloat32_t q_01 = svmla_lane (sv_f32 (d->poly_02468[0]), r, p_1357, 0);
+ svfloat32_t q_23 = svmla_lane (sv_f32 (d->poly_02468[1]), r, p_1357, 1);
+ svfloat32_t q_45 = svmla_lane (sv_f32 (d->poly_02468[2]), r, p_1357, 2);
+ svfloat32_t q_67 = svmla_lane (sv_f32 (d->poly_02468[3]), r, p_1357, 3);
+ svfloat32_t y = svmla_x (pg, q_67, r2, sv_f32 (d->poly_02468[4]));
+ y = svmla_x (pg, q_45, r2, y);
+ y = svmla_x (pg, q_23, r2, y);
+ y = svmla_x (pg, q_01, r2, y);
+
+ if (__glibc_unlikely (svptest_any (pg, special)))
+ return special_case (x, svmla_x (svnot_z (pg, special), n, r, y), special);
+ return svmla_x (pg, n, r, y);
+}
diff --git a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
index b2b36fd847..d30dcd6f95 100644
--- a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
@@ -27,5 +27,6 @@ VPCS_VECTOR_WRAPPER (cos_advsimd, _ZGVnN2v_cos)
VPCS_VECTOR_WRAPPER (exp_advsimd, _ZGVnN2v_exp)
VPCS_VECTOR_WRAPPER (exp2_advsimd, _ZGVnN2v_exp2)
VPCS_VECTOR_WRAPPER (log_advsimd, _ZGVnN2v_log)
+VPCS_VECTOR_WRAPPER (log2_advsimd, _ZGVnN2v_log2)
VPCS_VECTOR_WRAPPER (sin_advsimd, _ZGVnN2v_sin)
VPCS_VECTOR_WRAPPER (tan_advsimd, _ZGVnN2v_tan)
diff --git a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
index 88b76ed678..22a8479100 100644
--- a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
@@ -36,5 +36,6 @@ SVE_VECTOR_WRAPPER (cos_sve, _ZGVsMxv_cos)
SVE_VECTOR_WRAPPER (exp_sve, _ZGVsMxv_exp)
SVE_VECTOR_WRAPPER (exp2_sve, _ZGVsMxv_exp2)
SVE_VECTOR_WRAPPER (log_sve, _ZGVsMxv_log)
+SVE_VECTOR_WRAPPER (log2_sve, _ZGVsMxv_log2)
SVE_VECTOR_WRAPPER (sin_sve, _ZGVsMxv_sin)
SVE_VECTOR_WRAPPER (tan_sve, _ZGVsMxv_tan)
diff --git a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
index 02ab609b5a..e8f7f47c67 100644
--- a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
@@ -27,5 +27,6 @@ VPCS_VECTOR_WRAPPER (cosf_advsimd, _ZGVnN4v_cosf)
VPCS_VECTOR_WRAPPER (expf_advsimd, _ZGVnN4v_expf)
VPCS_VECTOR_WRAPPER (exp2f_advsimd, _ZGVnN4v_exp2f)
VPCS_VECTOR_WRAPPER (logf_advsimd, _ZGVnN4v_logf)
+VPCS_VECTOR_WRAPPER (log2f_advsimd, _ZGVnN4v_log2f)
VPCS_VECTOR_WRAPPER (sinf_advsimd, _ZGVnN4v_sinf)
VPCS_VECTOR_WRAPPER (tanf_advsimd, _ZGVnN4v_tanf)
diff --git a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
index fa41ce09d8..f5e9584265 100644
--- a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
@@ -36,5 +36,6 @@ SVE_VECTOR_WRAPPER (cosf_sve, _ZGVsMxv_cosf)
SVE_VECTOR_WRAPPER (expf_sve, _ZGVsMxv_expf)
SVE_VECTOR_WRAPPER (exp2f_sve, _ZGVsMxv_exp2f)
SVE_VECTOR_WRAPPER (logf_sve, _ZGVsMxv_logf)
+SVE_VECTOR_WRAPPER (log2f_sve, _ZGVsMxv_log2f)
SVE_VECTOR_WRAPPER (sinf_sve, _ZGVsMxv_sinf)
SVE_VECTOR_WRAPPER (tanf_sve, _ZGVsMxv_tanf)
diff --git a/sysdeps/aarch64/fpu/v_log2_data.c b/sysdeps/aarch64/fpu/v_log2_data.c
new file mode 100644
index 0000000000..4fb126bf31
--- /dev/null
+++ b/sysdeps/aarch64/fpu/v_log2_data.c
@@ -0,0 +1,165 @@
+/* Coefficients and table entries for vector log2
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include "vecmath_config.h"
+
+#define N (1 << V_LOG2_TABLE_BITS)
+
+const struct v_log2_data __v_log2_data = {
+