aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlan Modra <amodra@gmail.com>2013-08-17 18:46:47 +0930
committerAlan Modra <amodra@gmail.com>2013-10-04 10:40:56 +0930
commitfe6e95d7171eba5f3e07848f081676fae4e86322 (patch)
treef4a262abf7061e3ab3b30ac9fa15b7a2e238b264
parent664318c3eb07032e2bfcf47cb2aa3c89280c19e7 (diff)
downloadglibc-fe6e95d7171eba5f3e07848f081676fae4e86322.tar.xz
glibc-fe6e95d7171eba5f3e07848f081676fae4e86322.zip
PowerPC LE memcmp
http://sourceware.org/ml/libc-alpha/2013-08/msg00102.html This is a rather large patch due to formatting and renaming. The formatting changes were to make it possible to compare power7 and power4 versions of memcmp. Using different register defines came about while I was wrestling with the code, trying to find spare registers at one stage. I found it much simpler if we refer to a reg by the same name throughout a function, so it's better if short-term multiple use regs like rTMP are referred to using their register number. I made the cr field usage changes when attempting to reload rWORDn regs in the exit path to byte swap before comparing when little-endian. That proved a bad idea due to the pipelining involved in the main loop; Offsets to reload the regs were different first time around the loop.. Anyway, I left the cr field usage changes in place for consistency. Aside from these more-or-less cosmetic changes, I fixed a number of places where an early exit path restores regs unnecessarily, removed some dead code, and optimised one or two exits. * sysdeps/powerpc/powerpc64/power7/memcmp.S: Add little-endian support. Formatting. Consistently use rXXX register defines or rN defines. Use early exit labels that avoid restoring unused non-volatile regs. Make cr field use more consistent with rWORDn compares. Rename regs used as shift registers for unaligned loop, using rN defines for short lifetime/multiple use regs. * sysdeps/powerpc/powerpc64/power4/memcmp.S: Likewise. * sysdeps/powerpc/powerpc32/power7/memcmp.S: Likewise. Exit with addi 1,1,64 to pop stack frame. Simplify return value code. * sysdeps/powerpc/powerpc32/power4/memcmp.S: Likewise.
-rw-r--r--ChangeLog13
-rw-r--r--sysdeps/powerpc/powerpc32/power4/memcmp.S1064
-rw-r--r--sysdeps/powerpc/powerpc32/power7/memcmp.S1626
-rw-r--r--sysdeps/powerpc/powerpc64/power4/memcmp.S1041
-rw-r--r--sysdeps/powerpc/powerpc64/power7/memcmp.S1613
5 files changed, 3464 insertions, 1893 deletions
diff --git a/ChangeLog b/ChangeLog
index 74c6203c97..51311857a6 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,18 @@
2013-10-04 Alan Modra <amodra@gmail.com>
+ * sysdeps/powerpc/powerpc64/power7/memcmp.S: Add little-endian support.
+ Formatting. Consistently use rXXX register defines or rN defines.
+ Use early exit labels that avoid restoring unused non-volatile regs.
+ Make cr field use more consistent with rWORDn compares. Rename
+ regs used as shift registers for unaligned loop, using rN defines
+ for short lifetime/multiple use regs.
+ * sysdeps/powerpc/powerpc64/power4/memcmp.S: Likewise.
+ * sysdeps/powerpc/powerpc32/power7/memcmp.S: Likewise. Exit with
+ addi 1,1,64 to pop stack frame. Simplify return value code.
+ * sysdeps/powerpc/powerpc32/power4/memcmp.S: Likewise.
+
+2013-10-04 Alan Modra <amodra@gmail.com>
+
* sysdeps/powerpc/powerpc64/power7/strchr.S (strchr): Add little-endian
support. Correct typos, formatting. Optimize tail. Use insrdi
rather than rlwimi.
diff --git a/sysdeps/powerpc/powerpc32/power4/memcmp.S b/sysdeps/powerpc/powerpc32/power4/memcmp.S
index 9a455a3c68..35e162667d 100644
--- a/sysdeps/powerpc/powerpc32/power4/memcmp.S
+++ b/sysdeps/powerpc/powerpc32/power4/memcmp.S
@@ -1,4 +1,4 @@
-/* Optimized strcmp implementation for PowerPC64.
+/* Optimized strcmp implementation for PowerPC32.
Copyright (C) 2003-2013 Free Software Foundation, Inc.
This file is part of the GNU C Library.
@@ -18,13 +18,14 @@
#include <sysdep.h>
-/* int [r3] memcmp (const char *s1 [r3], const char *s2 [r4], size_t size [r5]) */
+/* int [r3] memcmp (const char *s1 [r3],
+ const char *s2 [r4],
+ size_t size [r5]) */
.machine power4
EALIGN (memcmp, 4, 0)
CALL_MCOUNT
-#define rTMP r0
#define rRTN r3
#define rSTR1 r3 /* first string arg */
#define rSTR2 r4 /* second string arg */
@@ -35,33 +36,32 @@ EALIGN (memcmp, 4, 0)
#define rWORD4 r9 /* next word in s2 */
#define rWORD5 r10 /* next word in s1 */
#define rWORD6 r11 /* next word in s2 */
-#define rBITDIF r12 /* bits that differ in s1 & s2 words */
#define rWORD7 r30 /* next word in s1 */
#define rWORD8 r31 /* next word in s2 */
- xor rTMP, rSTR2, rSTR1
+ xor r0, rSTR2, rSTR1
cmplwi cr6, rN, 0
cmplwi cr1, rN, 12
- clrlwi. rTMP, rTMP, 30
- clrlwi rBITDIF, rSTR1, 30
- cmplwi cr5, rBITDIF, 0
+ clrlwi. r0, r0, 30
+ clrlwi r12, rSTR1, 30
+ cmplwi cr5, r12, 0
beq- cr6, L(zeroLength)
- dcbt 0,rSTR1
- dcbt 0,rSTR2
+ dcbt 0, rSTR1
+ dcbt 0, rSTR2
/* If less than 8 bytes or not aligned, use the unaligned
byte loop. */
blt cr1, L(bytealigned)
- stwu 1,-64(1)
+ stwu 1, -64(r1)
cfi_adjust_cfa_offset(64)
- stw r31,48(1)
- cfi_offset(31,(48-64))
- stw r30,44(1)
- cfi_offset(30,(44-64))
+ stw rWORD8, 48(r1)
+ cfi_offset(rWORD8, (48-64))
+ stw rWORD7, 44(r1)
+ cfi_offset(rWORD7, (44-64))
bne L(unaligned)
/* At this point we know both strings have the same alignment and the
- compare length is at least 8 bytes. rBITDIF contains the low order
+ compare length is at least 8 bytes. r12 contains the low order
2 bits of rSTR1 and cr5 contains the result of the logical compare
- of rBITDIF to 0. If rBITDIF == 0 then we are already word
+ of r12 to 0. If r12 == 0 then we are already word
aligned and can perform the word aligned loop.
Otherwise we know the two strings have the same alignment (but not
@@ -70,74 +70,95 @@ EALIGN (memcmp, 4, 0)
eliminate bits preceding the first byte. Since we want to join the
normal (word aligned) compare loop, starting at the second word,
we need to adjust the length (rN) and special case the loop
- versioning for the first word. This insures that the loop count is
+ versioning for the first word. This ensures that the loop count is
correct and the first word (shifted) is in the expected register pair. */
- .align 4
+ .align 4
L(samealignment):
clrrwi rSTR1, rSTR1, 2
clrrwi rSTR2, rSTR2, 2
beq cr5, L(Waligned)
- add rN, rN, rBITDIF
- slwi r11, rBITDIF, 3
- srwi rTMP, rN, 4 /* Divide by 16 */
- andi. rBITDIF, rN, 12 /* Get the word remainder */
+ add rN, rN, r12
+ slwi rWORD6, r12, 3
+ srwi r0, rN, 4 /* Divide by 16 */
+ andi. r12, rN, 12 /* Get the word remainder */
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD1, 0, rSTR1
+ lwbrx rWORD2, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD1, 0(rSTR1)
lwz rWORD2, 0(rSTR2)
- cmplwi cr1, rBITDIF, 8
+#endif
+ cmplwi cr1, r12, 8
cmplwi cr7, rN, 16
clrlwi rN, rN, 30
beq L(dPs4)
- mtctr rTMP /* Power4 wants mtctr 1st in dispatch group */
+ mtctr r0 /* Power4 wants mtctr 1st in dispatch group */
bgt cr1, L(dPs3)
beq cr1, L(dPs2)
/* Remainder is 4 */
- .align 3
+ .align 3
L(dsP1):
- slw rWORD5, rWORD1, r11
- slw rWORD6, rWORD2, r11
+ slw rWORD5, rWORD1, rWORD6
+ slw rWORD6, rWORD2, rWORD6
cmplw cr5, rWORD5, rWORD6
blt cr7, L(dP1x)
/* Do something useful in this cycle since we have to branch anyway. */
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD1, 0, rSTR1
+ lwbrx rWORD2, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD1, 4(rSTR1)
lwz rWORD2, 4(rSTR2)
- cmplw cr0, rWORD1, rWORD2
+#endif
+ cmplw cr7, rWORD1, rWORD2
b L(dP1e)
/* Remainder is 8 */
- .align 4
+ .align 4
L(dPs2):
- slw rWORD5, rWORD1, r11
- slw rWORD6, rWORD2, r11
+ slw rWORD5, rWORD1, rWORD6
+ slw rWORD6, rWORD2, rWORD6
cmplw cr6, rWORD5, rWORD6
blt cr7, L(dP2x)
/* Do something useful in this cycle since we have to branch anyway. */
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD7, 0, rSTR1
+ lwbrx rWORD8, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD7, 4(rSTR1)
lwz rWORD8, 4(rSTR2)
+#endif
cmplw cr5, rWORD7, rWORD8
b L(dP2e)
/* Remainder is 12 */
- .align 4
+ .align 4
L(dPs3):
- slw rWORD3, rWORD1, r11
- slw rWORD4, rWORD2, r11
+ slw rWORD3, rWORD1, rWORD6
+ slw rWORD4, rWORD2, rWORD6
cmplw cr1, rWORD3, rWORD4
b L(dP3e)
/* Count is a multiple of 16, remainder is 0 */
- .align 4
+ .align 4
L(dPs4):
- mtctr rTMP /* Power4 wants mtctr 1st in dispatch group */
- slw rWORD1, rWORD1, r11
- slw rWORD2, rWORD2, r11
- cmplw cr0, rWORD1, rWORD2
+ mtctr r0 /* Power4 wants mtctr 1st in dispatch group */
+ slw rWORD1, rWORD1, rWORD6
+ slw rWORD2, rWORD2, rWORD6
+ cmplw cr7, rWORD1, rWORD2
b L(dP4e)
/* At this point we know both strings are word aligned and the
compare length is at least 8 bytes. */
- .align 4
+ .align 4
L(Waligned):
- andi. rBITDIF, rN, 12 /* Get the word remainder */
- srwi rTMP, rN, 4 /* Divide by 16 */
- cmplwi cr1, rBITDIF, 8
+ andi. r12, rN, 12 /* Get the word remainder */
+ srwi r0, rN, 4 /* Divide by 16 */
+ cmplwi cr1, r12, 8
cmplwi cr7, rN, 16
clrlwi rN, rN, 30
beq L(dP4)
@@ -145,177 +166,352 @@ L(Waligned):
beq cr1, L(dP2)
/* Remainder is 4 */
- .align 4
+ .align 4
L(dP1):
- mtctr rTMP /* Power4 wants mtctr 1st in dispatch group */
+ mtctr r0 /* Power4 wants mtctr 1st in dispatch group */
/* Normally we'd use rWORD7/rWORD8 here, but since we might exit early
(8-15 byte compare), we want to use only volatile registers. This
means we can avoid restoring non-volatile registers since we did not
change any on the early exit path. The key here is the non-early
exit path only cares about the condition code (cr5), not about which
register pair was used. */
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD5, 0, rSTR1
+ lwbrx rWORD6, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD5, 0(rSTR1)
lwz rWORD6, 0(rSTR2)
+#endif
cmplw cr5, rWORD5, rWORD6
blt cr7, L(dP1x)
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD1, 0, rSTR1
+ lwbrx rWORD2, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD1, 4(rSTR1)
lwz rWORD2, 4(rSTR2)
- cmplw cr0, rWORD1, rWORD2
+#endif
+ cmplw cr7, rWORD1, rWORD2
L(dP1e):
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD3, 0, rSTR1
+ lwbrx rWORD4, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD3, 8(rSTR1)
lwz rWORD4, 8(rSTR2)
+#endif
cmplw cr1, rWORD3, rWORD4
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD5, 0, rSTR1
+ lwbrx rWORD6, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD5, 12(rSTR1)
lwz rWORD6, 12(rSTR2)
+#endif
cmplw cr6, rWORD5, rWORD6
- bne cr5, L(dLcr5)
- bne cr0, L(dLcr0)
+ bne cr5, L(dLcr5x)
+ bne cr7, L(dLcr7x)
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD7, 0, rSTR1
+ lwbrx rWORD8, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwzu rWORD7, 16(rSTR1)
lwzu rWORD8, 16(rSTR2)
+#endif
bne cr1, L(dLcr1)
cmplw cr5, rWORD7, rWORD8
bdnz L(dLoop)
bne cr6, L(dLcr6)
- lwz r30,44(1)
- lwz r31,48(1)
- .align 3
+ lwz rWORD7, 44(r1)
+ lwz rWORD8, 48(r1)
+ .align 3
L(dP1x):
slwi. r12, rN, 3
- bne cr5, L(dLcr5)
+ bne cr5, L(dLcr5x)
subfic rN, r12, 32 /* Shift count is 32 - (rN * 8). */
- lwz 1,0(1)
+ addi 1, 1, 64
+ cfi_adjust_cfa_offset(-64)
bne L(d00)
li rRTN, 0
blr
/* Remainder is 8 */
- .align 4
+ .align 4
+ cfi_adjust_cfa_offset(64)
L(dP2):
- mtctr rTMP /* Power4 wants mtctr 1st in dispatch group */
+ mtctr r0 /* Power4 wants mtctr 1st in dispatch group */
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD5, 0, rSTR1
+ lwbrx rWORD6, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD5, 0(rSTR1)
lwz rWORD6, 0(rSTR2)
+#endif
cmplw cr6, rWORD5, rWORD6
blt cr7, L(dP2x)
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD7, 0, rSTR1
+ lwbrx rWORD8, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD7, 4(rSTR1)
lwz rWORD8, 4(rSTR2)
+#endif
cmplw cr5, rWORD7, rWORD8
L(dP2e):
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD1, 0, rSTR1
+ lwbrx rWORD2, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD1, 8(rSTR1)
lwz rWORD2, 8(rSTR2)
- cmplw cr0, rWORD1, rWORD2
+#endif
+ cmplw cr7, rWORD1, rWORD2
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD3, 0, rSTR1
+ lwbrx rWORD4, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD3, 12(rSTR1)
lwz rWORD4, 12(rSTR2)
+#endif
cmplw cr1, rWORD3, rWORD4
+#ifndef __LITTLE_ENDIAN__
addi rSTR1, rSTR1, 4
addi rSTR2, rSTR2, 4
+#endif
bne cr6, L(dLcr6)
bne cr5, L(dLcr5)
b L(dLoop2)
/* Again we are on a early exit path (16-23 byte compare), we want to
only use volatile registers and avoid restoring non-volatile
registers. */
- .align 4
+ .align 4
L(dP2x):
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD3, 0, rSTR1
+ lwbrx rWORD4, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD3, 4(rSTR1)
lwz rWORD4, 4(rSTR2)
- cmplw cr5, rWORD3, rWORD4
+#endif
+ cmplw cr1, rWORD3, rWORD4
slwi. r12, rN, 3
- bne cr6, L(dLcr6)
+ bne cr6, L(dLcr6x)
+#ifndef __LITTLE_ENDIAN__
addi rSTR1, rSTR1, 4
addi rSTR2, rSTR2, 4
- bne cr5, L(dLcr5)
+#endif
+ bne cr1, L(dLcr1x)
subfic rN, r12, 32 /* Shift count is 32 - (rN * 8). */
- lwz 1,0(1)
+ addi 1, 1, 64
+ cfi_adjust_cfa_offset(-64)
bne L(d00)
li rRTN, 0
blr
/* Remainder is 12 */
- .align 4
+ .align 4
+ cfi_adjust_cfa_offset(64)
L(dP3):
- mtctr rTMP /* Power4 wants mtctr 1st in dispatch group */
+ mtctr r0 /* Power4 wants mtctr 1st in dispatch group */
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD3, 0, rSTR1
+ lwbrx rWORD4, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD3, 0(rSTR1)
lwz rWORD4, 0(rSTR2)
+#endif
cmplw cr1, rWORD3, rWORD4
L(dP3e):
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD5, 0, rSTR1
+ lwbrx rWORD6, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD5, 4(rSTR1)
lwz rWORD6, 4(rSTR2)
+#endif
cmplw cr6, rWORD5, rWORD6
blt cr7, L(dP3x)
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD7, 0, rSTR1
+ lwbrx rWORD8, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD7, 8(rSTR1)
lwz rWORD8, 8(rSTR2)
+#endif
cmplw cr5, rWORD7, rWORD8
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD1, 0, rSTR1
+ lwbrx rWORD2, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD1, 12(rSTR1)
lwz rWORD2, 12(rSTR2)
- cmplw cr0, rWORD1, rWORD2
+#endif
+ cmplw cr7, rWORD1, rWORD2
+#ifndef __LITTLE_ENDIAN__
addi rSTR1, rSTR1, 8
addi rSTR2, rSTR2, 8
+#endif
bne cr1, L(dLcr1)
bne cr6, L(dLcr6)
b L(dLoop1)
/* Again we are on a early exit path (24-31 byte compare), we want to
only use volatile registers and avoid restoring non-volatile
registers. */
- .align 4
+ .align 4
L(dP3x):
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD1, 0, rSTR1
+ lwbrx rWORD2, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD1, 8(rSTR1)
lwz rWORD2, 8(rSTR2)
- cmplw cr5, rWORD1, rWORD2
+#endif
+ cmplw cr7, rWORD1, rWORD2
slwi. r12, rN, 3
- bne cr1, L(dLcr1)
+ bne cr1, L(dLcr1x)
+#ifndef __LITTLE_ENDIAN__
addi rSTR1, rSTR1, 8
addi rSTR2, rSTR2, 8
- bne cr6, L(dLcr6)
+#endif
+ bne cr6, L(dLcr6x)
subfic rN, r12, 32 /* Shift count is 32 - (rN * 8). */
- bne cr5, L(dLcr5)
- lwz 1,0(1)
+ bne cr7, L(dLcr7x)
+ addi 1, 1, 64
+ cfi_adjust_cfa_offset(-64)
bne L(d00)
li rRTN, 0
blr
/* Count is a multiple of 16, remainder is 0 */
- .align 4
+ .align 4
+ cfi_adjust_cfa_offset(64)
L(dP4):
- mtctr rTMP /* Power4 wants mtctr 1st in dispatch group */
+ mtctr r0 /* Power4 wants mtctr 1st in dispatch group */
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD1, 0, rSTR1
+ lwbrx rWORD2, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD1, 0(rSTR1)
lwz rWORD2, 0(rSTR2)
- cmplw cr0, rWORD1, rWORD2
+#endif
+ cmplw cr7, rWORD1, rWORD2
L(dP4e):
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD3, 0, rSTR1
+ lwbrx rWORD4, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD3, 4(rSTR1)
lwz rWORD4, 4(rSTR2)
+#endif
cmplw cr1, rWORD3, rWORD4
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD5, 0, rSTR1
+ lwbrx rWORD6, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD5, 8(rSTR1)
lwz rWORD6, 8(rSTR2)
+#endif
cmplw cr6, rWORD5, rWORD6
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD7, 0, rSTR1
+ lwbrx rWORD8, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwzu rWORD7, 12(rSTR1)
lwzu rWORD8, 12(rSTR2)
+#endif
cmplw cr5, rWORD7, rWORD8
- bne cr0, L(dLcr0)
+ bne cr7, L(dLcr7)
bne cr1, L(dLcr1)
bdz- L(d24) /* Adjust CTR as we start with +4 */
/* This is the primary loop */
- .align 4
+ .align 4
L(dLoop):
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD1, 0, rSTR1
+ lwbrx rWORD2, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD1, 4(rSTR1)
lwz rWORD2, 4(rSTR2)
+#endif
cmplw cr1, rWORD3, rWORD4
bne cr6, L(dLcr6)
L(dLoop1):
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD3, 0, rSTR1
+ lwbrx rWORD4, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD3, 8(rSTR1)
lwz rWORD4, 8(rSTR2)
+#endif
cmplw cr6, rWORD5, rWORD6
bne cr5, L(dLcr5)
L(dLoop2):
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD5, 0, rSTR1
+ lwbrx rWORD6, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD5, 12(rSTR1)
lwz rWORD6, 12(rSTR2)
+#endif
cmplw cr5, rWORD7, rWORD8
- bne cr0, L(dLcr0)
+ bne cr7, L(dLcr7)
L(dLoop3):
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD7, 0, rSTR1
+ lwbrx rWORD8, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwzu rWORD7, 16(rSTR1)
lwzu rWORD8, 16(rSTR2)
+#endif
bne- cr1, L(dLcr1)
- cmplw cr0, rWORD1, rWORD2
+ cmplw cr7, rWORD1, rWORD2
bdnz+ L(dLoop)
L(dL4):
@@ -325,7 +521,7 @@ L(dL4):
bne cr5, L(dLcr5)
cmplw cr5, rWORD7, rWORD8
L(d44):
- bne cr0, L(dLcr0)
+ bne cr7, L(dLcr7)
L(d34):
bne cr1, L(dLcr1)
L(d24):
@@ -334,69 +530,82 @@ L(d14):
slwi. r12, rN, 3
bne cr5, L(dLcr5)
L(d04):
- lwz r30,44(1)
- lwz r31,48(1)
- lwz 1,0(1)
+ lwz rWORD7, 44(r1)
+ lwz rWORD8, 48(r1)
+ addi 1, 1, 64
+ cfi_adjust_cfa_offset(-64)
subfic rN, r12, 32 /* Shift count is 32 - (rN * 8). */
beq L(zeroLength)
/* At this point we have a remainder of 1 to 3 bytes to compare. Since
we are aligned it is safe to load the whole word, and use
- shift right to eliminate bits beyond the compare length. */
+ shift right to eliminate bits beyond the compare length. */
L(d00):
+#ifdef __LITTLE_ENDIAN__
+ lwbrx rWORD1, 0, rSTR1
+ lwbrx rWORD2, 0, rSTR2
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+#else
lwz rWORD1, 4(rSTR1)
lwz rWORD2, 4(rSTR2)
+#endif
srw rWORD1, rWORD1, rN
srw rWORD2, rWORD2, rN
- cmplw rWORD1,rWORD2
- li rRTN,0
- beqlr
- li rRTN,1
- bgtlr
- li rRTN,-1
- blr
-
- .align 4
-L(dLcr0):
- lwz r30,44(1)
- lwz r31,48(1)
+ sub rRTN, rWORD1, rWORD2
+ blr
+
+ .align 4
+ cfi_adjust_cfa_offset(64)
+L(dLcr7):
+ lwz rWORD7, 44(r1)
+ lwz rWORD8, 48(r1)
+L(dLcr7x):
li rRTN, 1
- lwz 1,0(1)
- bgtlr cr0
+ addi 1, 1, 64
+ cfi_adjust_cfa_offset(-64)
+ bgtlr cr7
li rRTN, -1
blr
- .align 4
+ .align 4
+ cfi_adjust_cfa_offset(64)
L(dLcr1):
- lwz r30,44(1)
- lwz r31,48(1)
+ lwz rWORD7, 44(r1)
+ lwz rWORD8, 48(r1)
+L(dLcr1x):
li rRTN, 1
- lwz 1,0(1)
+ addi 1, 1, 64
+ cfi_adjust_cfa_offset(-64)
bgtlr cr1
li rRTN, -1
blr
- .align 4
+ .align 4
+ cfi_adjust_cfa_offset(64)
L(dLcr6):
- lwz r30,44(1)
- lwz r31,48(1)
+ lwz rWORD7, 44(r1)
+ lwz rWORD8, 48(r1)
+L(dLcr6x):
li rRTN, 1
- lwz 1,0(1)
+ addi 1, 1, 64
+ cfi_adjust_cfa_offset(-64)
bgtlr cr6
li rRTN, -1
blr
- .align 4
+ .align 4
+ cfi_adjust_cfa_offset(64)
L(dLcr5):
- lwz r30,44(1)
- lwz r31,48(1)
+ lwz rWORD7, 44(r1)
+ lwz rWORD8, 48(r1)
L(dLcr5x):
li rRTN, 1
- lwz 1,0(1)
+ addi 1, 1, 64
+ cfi_adjust_cfa_offset(-64)
bgtlr cr5
li rRTN, -1
blr
- .align 4
+ .align 4
L(bytealigned):
- cfi_adjust_cfa_offset(-64)
- mtctr rN /* Power4 wants mtctr 1st in dispatch group */
+ mtctr rN /* Power4 wants mtctr 1st in dispatch group */
/* We need to prime this loop. This loop is swing modulo scheduled
to avoid pipe delays. The dependent instruction latencies (load to
@@ -411,7 +620,7 @@ L(bytealigned):
lbz rWORD1, 0(rSTR1)
lbz rWORD2, 0(rSTR2)
bdz- L(b11)
- cmplw cr0, rWORD1, rWORD2
+ cmplw cr7, rWORD1, rWORD2
lbz rWORD3, 1(rSTR1)
lbz rWORD4, 1(rSTR2)
bdz- L(b12)
@@ -419,11 +628,11 @@ L(bytealigned):
lbzu rWORD5, 2(rSTR1)
lbzu rWORD6, 2(rSTR2)
bdz- L(b13)
- .align 4
+ .align 4
L(bLoop):
lbzu rWORD1, 1(rSTR1)
lbzu rWORD2, 1(rSTR2)
- bne- cr0, L(bLcr0)
+ bne- cr7, L(bLcr7)
cmplw cr6, rWORD5, rWORD6
bdz- L(b3i)
@@ -432,7 +641,7 @@ L(bLoop):
lbzu rWORD4, 1(rSTR2)
bne- cr1, L(bLcr1)
- cmplw cr0, rWORD1, rWORD2
+ cmplw cr7, rWORD1, rWORD2
bdz- L(b2i)
lbzu rWORD5, 1(rSTR1)
@@ -449,23 +658,23 @@ L(bLoop):
tested. In this case we must complete the pending operations
before returning. */
L(b1i):
- bne- cr0, L(bLcr0)
+ bne- cr7, L(bLcr7)
bne