aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNoah Goldstein <goldstein.w.n@gmail.com>2022-04-14 11:47:34 -0500
committerNoah Goldstein <goldstein.w.n@gmail.com>2022-04-14 23:21:41 -0500
commitc725f408db3a374ae7aec4e83de15715113b6398 (patch)
tree380fd6f96a8c9a82f9c33f32f944b8cbe0f56dbb
parent404656009b459658138ed1bd18f3c6cf3863e6a6 (diff)
downloadglibc-c725f408db3a374ae7aec4e83de15715113b6398.tar.xz
glibc-c725f408db3a374ae7aec4e83de15715113b6398.zip
x86: Remove {w}memcmp-ssse3
With SSE2, SSE4.1, AVX2, and EVEX versions very few targets prefer SSSE3. As a result it is no longer worth it to keep the SSSE3 versions given the code size cost. Reviewed-by: H.J. Lu <hjl.tools@gmail.com>
-rw-r--r--sysdeps/x86_64/multiarch/Makefile2
-rw-r--r--sysdeps/x86_64/multiarch/ifunc-impl-list.c4
-rw-r--r--sysdeps/x86_64/multiarch/ifunc-memcmp.h4
-rw-r--r--sysdeps/x86_64/multiarch/memcmp-ssse3.S1992
-rw-r--r--sysdeps/x86_64/multiarch/wmemcmp-ssse3.S4
5 files changed, 0 insertions, 2006 deletions
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
index 6507d1b7fa..51222dfab1 100644
--- a/sysdeps/x86_64/multiarch/Makefile
+++ b/sysdeps/x86_64/multiarch/Makefile
@@ -12,7 +12,6 @@ sysdep_routines += \
memcmp-evex-movbe \
memcmp-sse2 \
memcmp-sse4 \
- memcmp-ssse3 \
memcmpeq-avx2 \
memcmpeq-avx2-rtm \
memcmpeq-evex \
@@ -179,7 +178,6 @@ sysdep_routines += \
wmemcmp-c \
wmemcmp-evex-movbe \
wmemcmp-sse4 \
- wmemcmp-ssse3 \
# sysdep_routines
endif
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
index 40cc6cc49e..f389928a4e 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -98,8 +98,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
__memcmp_evex_movbe)
IFUNC_IMPL_ADD (array, i, memcmp, CPU_FEATURE_USABLE (SSE4_1),
__memcmp_sse4_1)
- IFUNC_IMPL_ADD (array, i, memcmp, CPU_FEATURE_USABLE (SSSE3),
- __memcmp_ssse3)
IFUNC_IMPL_ADD (array, i, memcmp, 1, __memcmp_sse2))
#ifdef SHARED
@@ -844,8 +842,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
__wmemcmp_evex_movbe)
IFUNC_IMPL_ADD (array, i, wmemcmp, CPU_FEATURE_USABLE (SSE4_1),
__wmemcmp_sse4_1)
- IFUNC_IMPL_ADD (array, i, wmemcmp, CPU_FEATURE_USABLE (SSSE3),
- __wmemcmp_ssse3)
IFUNC_IMPL_ADD (array, i, wmemcmp, 1, __wmemcmp_sse2))
/* Support sysdeps/x86_64/multiarch/wmemset.c. */
diff --git a/sysdeps/x86_64/multiarch/ifunc-memcmp.h b/sysdeps/x86_64/multiarch/ifunc-memcmp.h
index cd12613699..44759a3ad5 100644
--- a/sysdeps/x86_64/multiarch/ifunc-memcmp.h
+++ b/sysdeps/x86_64/multiarch/ifunc-memcmp.h
@@ -20,7 +20,6 @@
# include <init-arch.h>
extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2) attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3) attribute_hidden;
extern __typeof (REDIRECT_NAME) OPTIMIZE (sse4_1) attribute_hidden;
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_movbe) attribute_hidden;
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_movbe_rtm) attribute_hidden;
@@ -50,8 +49,5 @@ IFUNC_SELECTOR (void)
if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_1))
return OPTIMIZE (sse4_1);
- if (CPU_FEATURE_USABLE_P (cpu_features, SSSE3))
- return OPTIMIZE (ssse3);
-
return OPTIMIZE (sse2);
}
diff --git a/sysdeps/x86_64/multiarch/memcmp-ssse3.S b/sysdeps/x86_64/multiarch/memcmp-ssse3.S
deleted file mode 100644
index df1b1fc494..0000000000
--- a/sysdeps/x86_64/multiarch/memcmp-ssse3.S
+++ /dev/null
@@ -1,1992 +0,0 @@
-/* memcmp with SSSE3, wmemcmp with SSSE3
- Copyright (C) 2011-2022 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <https://www.gnu.org/licenses/>. */
-
-#if IS_IN (libc)
-
-# include <sysdep.h>
-
-# ifndef MEMCMP
-# define MEMCMP __memcmp_ssse3
-# endif
-
-/* Warning!
- wmemcmp has to use SIGNED comparison for elements.
- memcmp has to use UNSIGNED comparison for elemnts.
-*/
-
- atom_text_section
-ENTRY (MEMCMP)
-# ifdef USE_AS_WMEMCMP
- shl $2, %RDX_LP
- test %RDX_LP, %RDX_LP
- jz L(equal)
-# elif defined __ILP32__
- /* Clear the upper 32 bits. */
- mov %edx, %edx
-# endif
- mov %rdx, %rcx
- mov %rdi, %rdx
- cmp $48, %rcx;
- jae L(48bytesormore) /* LEN => 48 */
-
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
- .p2align 4
-/* ECX >= 32. */
-L(48bytesormore):
- movdqu (%rdi), %xmm3
- movdqu (%rsi), %xmm0
- pcmpeqb %xmm0, %xmm3
- pmovmskb %xmm3, %edx
- lea 16(%rdi), %rdi
- lea 16(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(less16bytes)
- mov %edi, %edx
- and $0xf, %edx
- xor %rdx, %rdi
- sub %rdx, %rsi
- add %rdx, %rcx
- mov %esi, %edx
- and $0xf, %edx
- jz L(shr_0)
- xor %rdx, %rsi
-
-# ifndef USE_AS_WMEMCMP
- cmp $8, %edx
- jae L(next_unaligned_table)
- cmp $0, %edx
- je L(shr_0)
- cmp $1, %edx
- je L(shr_1)
- cmp $2, %edx
- je L(shr_2)
- cmp $3, %edx
- je L(shr_3)
- cmp $4, %edx
- je L(shr_4)
- cmp $5, %edx
- je L(shr_5)
- cmp $6, %edx
- je L(shr_6)
- jmp L(shr_7)
-
- .p2align 2
-L(next_unaligned_table):
- cmp $8, %edx
- je L(shr_8)
- cmp $9, %edx
- je L(shr_9)
- cmp $10, %edx
- je L(shr_10)
- cmp $11, %edx
- je L(shr_11)
- cmp $12, %edx
- je L(shr_12)
- cmp $13, %edx
- je L(shr_13)
- cmp $14, %edx
- je L(shr_14)
- jmp L(shr_15)
-# else
- cmp $0, %edx
- je L(shr_0)
- cmp $4, %edx
- je L(shr_4)
- cmp $8, %edx
- je L(shr_8)
- jmp L(shr_12)
-# endif
-
- .p2align 4
-L(shr_0):
- cmp $80, %rcx
- lea -48(%rcx), %rcx
- jae L(shr_0_gobble)
- xor %eax, %eax
- movdqa (%rsi), %xmm1
- pcmpeqb (%rdi), %xmm1
- movdqa 16(%rsi), %xmm2
- pcmpeqb 16(%rdi), %xmm2
- pand %xmm1, %xmm2
- pmovmskb %xmm2, %edx
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
- .p2align 4
-L(shr_0_gobble):
- movdqa (%rsi), %xmm0
- xor %eax, %eax
- pcmpeqb (%rdi), %xmm0
- sub $32, %rcx
- movdqa 16(%rsi), %xmm2
- pcmpeqb 16(%rdi), %xmm2
-L(shr_0_gobble_loop):
- pand %xmm0, %xmm2
- sub $32, %rcx
- pmovmskb %xmm2, %edx
- movdqa %xmm0, %xmm1
- movdqa 32(%rsi), %xmm0
- movdqa 48(%rsi), %xmm2
- sbb $0xffff, %edx
- pcmpeqb 32(%rdi), %xmm0
- pcmpeqb 48(%rdi), %xmm2
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- jz L(shr_0_gobble_loop)
-
- pand %xmm0, %xmm2
- cmp $0, %rcx
- jge L(next)
- inc %edx
- add $32, %rcx
-L(next):
- test %edx, %edx
- jnz L(exit)
-
- pmovmskb %xmm2, %edx
- movdqa %xmm0, %xmm1
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
-# ifndef USE_AS_WMEMCMP
-
- .p2align 4
-L(shr_1):
- cmp $80, %rcx
- lea -48(%rcx), %rcx
- mov %edx, %eax
- jae L(shr_1_gobble)
-
- movdqa 16(%rsi), %xmm1
- movdqa %xmm1, %xmm2
- palignr $1, (%rsi), %xmm1
- pcmpeqb (%rdi), %xmm1
-
- movdqa 32(%rsi), %xmm3
- palignr $1, %xmm2, %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
- pand %xmm1, %xmm3
- pmovmskb %xmm3, %edx
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
- add $1, %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
- .p2align 4
-L(shr_1_gobble):
- sub $32, %rcx
- movdqa 16(%rsi), %xmm0
- palignr $1, (%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
-
- movdqa 32(%rsi), %xmm3
- palignr $1, 16(%rsi), %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
-L(shr_1_gobble_loop):
- pand %xmm0, %xmm3
- sub $32, %rcx
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
-
- movdqa 64(%rsi), %xmm3
- palignr $1, 48(%rsi), %xmm3
- sbb $0xffff, %edx
- movdqa 48(%rsi), %xmm0
- palignr $1, 32(%rsi), %xmm0
- pcmpeqb 32(%rdi), %xmm0
- lea 32(%rsi), %rsi
- pcmpeqb 48(%rdi), %xmm3
-
- lea 32(%rdi), %rdi
- jz L(shr_1_gobble_loop)
- pand %xmm0, %xmm3
-
- cmp $0, %rcx
- jge L(shr_1_gobble_next)
- inc %edx
- add $32, %rcx
-L(shr_1_gobble_next):
- test %edx, %edx
- jnz L(exit)
-
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
-
- lea 1(%rsi), %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
-
- .p2align 4
-L(shr_2):
- cmp $80, %rcx
- lea -48(%rcx), %rcx
- mov %edx, %eax
- jae L(shr_2_gobble)
-
- movdqa 16(%rsi), %xmm1
- movdqa %xmm1, %xmm2
- palignr $2, (%rsi), %xmm1
- pcmpeqb (%rdi), %xmm1
-
- movdqa 32(%rsi), %xmm3
- palignr $2, %xmm2, %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
- pand %xmm1, %xmm3
- pmovmskb %xmm3, %edx
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
- add $2, %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
- .p2align 4
-L(shr_2_gobble):
- sub $32, %rcx
- movdqa 16(%rsi), %xmm0
- palignr $2, (%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
-
- movdqa 32(%rsi), %xmm3
- palignr $2, 16(%rsi), %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
-L(shr_2_gobble_loop):
- pand %xmm0, %xmm3
- sub $32, %rcx
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
-
- movdqa 64(%rsi), %xmm3
- palignr $2, 48(%rsi), %xmm3
- sbb $0xffff, %edx
- movdqa 48(%rsi), %xmm0
- palignr $2, 32(%rsi), %xmm0
- pcmpeqb 32(%rdi), %xmm0
- lea 32(%rsi), %rsi
- pcmpeqb 48(%rdi), %xmm3
-
- lea 32(%rdi), %rdi
- jz L(shr_2_gobble_loop)
- pand %xmm0, %xmm3
-
- cmp $0, %rcx
- jge L(shr_2_gobble_next)
- inc %edx
- add $32, %rcx
-L(shr_2_gobble_next):
- test %edx, %edx
- jnz L(exit)
-
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
-
- lea 2(%rsi), %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
- .p2align 4
-L(shr_3):
- cmp $80, %rcx
- lea -48(%rcx), %rcx
- mov %edx, %eax
- jae L(shr_3_gobble)
-
- movdqa 16(%rsi), %xmm1
- movdqa %xmm1, %xmm2
- palignr $3, (%rsi), %xmm1
- pcmpeqb (%rdi), %xmm1
-
- movdqa 32(%rsi), %xmm3
- palignr $3, %xmm2, %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
- pand %xmm1, %xmm3
- pmovmskb %xmm3, %edx
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
- add $3, %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
- .p2align 4
-L(shr_3_gobble):
- sub $32, %rcx
- movdqa 16(%rsi), %xmm0
- palignr $3, (%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
-
- movdqa 32(%rsi), %xmm3
- palignr $3, 16(%rsi), %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
-L(shr_3_gobble_loop):
- pand %xmm0, %xmm3
- sub $32, %rcx
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
-
- movdqa 64(%rsi), %xmm3
- palignr $3, 48(%rsi), %xmm3
- sbb $0xffff, %edx
- movdqa 48(%rsi), %xmm0
- palignr $3, 32(%rsi), %xmm0
- pcmpeqb 32(%rdi), %xmm0
- lea 32(%rsi), %rsi
- pcmpeqb 48(%rdi), %xmm3
-
- lea 32(%rdi), %rdi
- jz L(shr_3_gobble_loop)
- pand %xmm0, %xmm3
-
- cmp $0, %rcx
- jge L(shr_3_gobble_next)
- inc %edx
- add $32, %rcx
-L(shr_3_gobble_next):
- test %edx, %edx
- jnz L(exit)
-
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
-
- lea 3(%rsi), %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
-# endif
-
- .p2align 4
-L(shr_4):
- cmp $80, %rcx
- lea -48(%rcx), %rcx
- mov %edx, %eax
- jae L(shr_4_gobble)
-
- movdqa 16(%rsi), %xmm1
- movdqa %xmm1, %xmm2
- palignr $4, (%rsi), %xmm1
- pcmpeqb (%rdi), %xmm1
-
- movdqa 32(%rsi), %xmm3
- palignr $4, %xmm2, %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
- pand %xmm1, %xmm3
- pmovmskb %xmm3, %edx
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
- add $4, %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
- .p2align 4
-L(shr_4_gobble):
- sub $32, %rcx
- movdqa 16(%rsi), %xmm0
- palignr $4, (%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
-
- movdqa 32(%rsi), %xmm3
- palignr $4, 16(%rsi), %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
-L(shr_4_gobble_loop):
- pand %xmm0, %xmm3
- sub $32, %rcx
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
-
- movdqa 64(%rsi), %xmm3
- palignr $4, 48(%rsi), %xmm3
- sbb $0xffff, %edx
- movdqa 48(%rsi), %xmm0
- palignr $4, 32(%rsi), %xmm0
- pcmpeqb 32(%rdi), %xmm0
- lea 32(%rsi), %rsi
- pcmpeqb 48(%rdi), %xmm3
-
- lea 32(%rdi), %rdi
- jz L(shr_4_gobble_loop)
- pand %xmm0, %xmm3
-
- cmp $0, %rcx
- jge L(shr_4_gobble_next)
- inc %edx
- add $32, %rcx
-L(shr_4_gobble_next):
- test %edx, %edx
- jnz L(exit)
-
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
-
- lea 4(%rsi), %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
-# ifndef USE_AS_WMEMCMP
-
- .p2align 4
-L(shr_5):
- cmp $80, %rcx
- lea -48(%rcx), %rcx
- mov %edx, %eax
- jae L(shr_5_gobble)
-
- movdqa 16(%rsi), %xmm1
- movdqa %xmm1, %xmm2
- palignr $5, (%rsi), %xmm1
- pcmpeqb (%rdi), %xmm1
-
- movdqa 32(%rsi), %xmm3
- palignr $5, %xmm2, %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
- pand %xmm1, %xmm3
- pmovmskb %xmm3, %edx
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
- add $5, %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
- .p2align 4
-L(shr_5_gobble):
- sub $32, %rcx
- movdqa 16(%rsi), %xmm0
- palignr $5, (%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
-
- movdqa 32(%rsi), %xmm3
- palignr $5, 16(%rsi), %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
-L(shr_5_gobble_loop):
- pand %xmm0, %xmm3
- sub $32, %rcx
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
-
- movdqa 64(%rsi), %xmm3
- palignr $5, 48(%rsi), %xmm3
- sbb $0xffff, %edx
- movdqa 48(%rsi), %xmm0
- palignr $5, 32(%rsi), %xmm0
- pcmpeqb 32(%rdi), %xmm0
- lea 32(%rsi), %rsi
- pcmpeqb 48(%rdi), %xmm3
-
- lea 32(%rdi), %rdi
- jz L(shr_5_gobble_loop)
- pand %xmm0, %xmm3
-
- cmp $0, %rcx
- jge L(shr_5_gobble_next)
- inc %edx
- add $32, %rcx
-L(shr_5_gobble_next):
- test %edx, %edx
- jnz L(exit)
-
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
-
- lea 5(%rsi), %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
- .p2align 4
-L(shr_6):
- cmp $80, %rcx
- lea -48(%rcx), %rcx
- mov %edx, %eax
- jae L(shr_6_gobble)
-
- movdqa 16(%rsi), %xmm1
- movdqa %xmm1, %xmm2
- palignr $6, (%rsi), %xmm1
- pcmpeqb (%rdi), %xmm1
-
- movdqa 32(%rsi), %xmm3
- palignr $6, %xmm2, %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
- pand %xmm1, %xmm3
- pmovmskb %xmm3, %edx
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
- add $6, %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
- .p2align 4
-L(shr_6_gobble):
- sub $32, %rcx
- movdqa 16(%rsi), %xmm0
- palignr $6, (%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
-
- movdqa 32(%rsi), %xmm3
- palignr $6, 16(%rsi), %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
-L(shr_6_gobble_loop):
- pand %xmm0, %xmm3
- sub $32, %rcx
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
-
- movdqa 64(%rsi), %xmm3
- palignr $6, 48(%rsi), %xmm3
- sbb $0xffff, %edx
- movdqa 48(%rsi), %xmm0
- palignr $6, 32(%rsi), %xmm0
- pcmpeqb 32(%rdi), %xmm0
- lea 32(%rsi), %rsi
- pcmpeqb 48(%rdi), %xmm3
-
- lea 32(%rdi), %rdi
- jz L(shr_6_gobble_loop)
- pand %xmm0, %xmm3
-
- cmp $0, %rcx
- jge L(shr_6_gobble_next)
- inc %edx
- add $32, %rcx
-L(shr_6_gobble_next):
- test %edx, %edx
- jnz L(exit)
-
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
-
- lea 6(%rsi), %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
- .p2align 4
-L(shr_7):
- cmp $80, %rcx
- lea -48(%rcx), %rcx
- mov %edx, %eax
- jae L(shr_7_gobble)
-
- movdqa 16(%rsi), %xmm1
- movdqa %xmm1, %xmm2
- palignr $7, (%rsi), %xmm1
- pcmpeqb (%rdi), %xmm1
-
- movdqa 32(%rsi), %xmm3
- palignr $7, %xmm2, %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
- pand %xmm1, %xmm3
- pmovmskb %xmm3, %edx
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
- add $7, %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
- .p2align 4
-L(shr_7_gobble):
- sub $32, %rcx
- movdqa 16(%rsi), %xmm0
- palignr $7, (%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
-
- movdqa 32(%rsi), %xmm3
- palignr $7, 16(%rsi), %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
-L(shr_7_gobble_loop):
- pand %xmm0, %xmm3
- sub $32, %rcx
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
-
- movdqa 64(%rsi), %xmm3
- palignr $7, 48(%rsi), %xmm3
- sbb $0xffff, %edx
- movdqa 48(%rsi), %xmm0
- palignr $7, 32(%rsi), %xmm0
- pcmpeqb 32(%rdi), %xmm0
- lea 32(%rsi), %rsi
- pcmpeqb 48(%rdi), %xmm3
-
- lea 32(%rdi), %rdi
- jz L(shr_7_gobble_loop)
- pand %xmm0, %xmm3
-
- cmp $0, %rcx
- jge L(shr_7_gobble_next)
- inc %edx
- add $32, %rcx
-L(shr_7_gobble_next):
- test %edx, %edx
- jnz L(exit)
-
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
-
- lea 7(%rsi), %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
-# endif
-
- .p2align 4
-L(shr_8):
- cmp $80, %rcx
- lea -48(%rcx), %rcx
- mov %edx, %eax
- jae L(shr_8_gobble)
-
- movdqa 16(%rsi), %xmm1
- movdqa %xmm1, %xmm2
- palignr $8, (%rsi), %xmm1
- pcmpeqb (%rdi), %xmm1
-
- movdqa 32(%rsi), %xmm3
- palignr $8, %xmm2, %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
- pand %xmm1, %xmm3
- pmovmskb %xmm3, %edx
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
- add $8, %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
- .p2align 4
-L(shr_8_gobble):
- sub $32, %rcx
- movdqa 16(%rsi), %xmm0
- palignr $8, (%rsi), %xmm0
- pcmpeqb (%rdi), %xmm0
-
- movdqa 32(%rsi), %xmm3
- palignr $8, 16(%rsi), %xmm3
- pcmpeqb 16(%rdi), %xmm3
-
-L(shr_8_gobble_loop):
- pand %xmm0, %xmm3
- sub $32, %rcx
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
-
- movdqa 64(%rsi), %xmm3
- palignr $8, 48(%rsi), %xmm3
- sbb $0xffff, %edx
- movdqa 48(%rsi), %xmm0
- palignr $8, 32(%rsi), %xmm0
- pcmpeqb 32(%rdi), %xmm0
- lea 32(%rsi), %rsi
- pcmpeqb 48(%rdi), %xmm3
-
- lea 32(%rdi), %rdi
- jz L(shr_8_gobble_loop)
- pand %xmm0, %xmm3
-
- cmp $0, %rcx
- jge L(shr_8_gobble_next)
- inc %edx
- add $32, %rcx
-L(shr_8_gobble_next):
- test %edx, %edx
- jnz L(exit)
-
- pmovmskb %xmm3, %edx
- movdqa %xmm0, %xmm1
- lea 32(%rdi), %rdi
- lea 32(%rsi), %rsi
- sub $0xffff, %edx
- jnz L(exit)
-
- lea 8(%rsi), %rsi
- add %rcx, %rsi
- add %rcx, %rdi
- jmp L(less48bytes)
-
-# ifndef USE_AS_WMEMCMP
-
- .p2align 4
-L(shr_9):
- cmp $80, %rcx
- lea -48(%rcx), %rcx
- mov %edx, %eax
- jae L(shr_9_gobble)