aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--stdlib/gmp-impl.h20
-rw-r--r--stdlib/gmp.h11
-rw-r--r--stdlib/longlong.h88
-rw-r--r--sysdeps/generic/mul_n.c20
-rw-r--r--sysdeps/i386/i586/lshift.S11
-rw-r--r--sysdeps/i386/i586/rshift.S11
-rw-r--r--sysdeps/m68k/add_n.S76
-rw-r--r--sysdeps/m68k/sub_n.S76
-rw-r--r--sysdeps/m88k/add_n.s103
-rw-r--r--sysdeps/m88k/mul_1.s128
-rw-r--r--sysdeps/m88k/sub_n.s104
-rw-r--r--sysdeps/rs6000/add_n.s2
-rw-r--r--sysdeps/rs6000/sub_n.s2
-rw-r--r--sysdeps/sparc/add_n.S304
-rw-r--r--sysdeps/sparc/lshift.S94
-rw-r--r--sysdeps/sparc/rshift.S91
-rw-r--r--sysdeps/sparc/sub_n.S391
17 files changed, 1246 insertions, 286 deletions
diff --git a/stdlib/gmp-impl.h b/stdlib/gmp-impl.h
index 0d2a8fcede..2f0956d960 100644
--- a/stdlib/gmp-impl.h
+++ b/stdlib/gmp-impl.h
@@ -179,24 +179,22 @@ void _mp_default_free ();
strings in base 2..36. */
struct bases
{
- /* Number of digits in the conversion base that always fits in
- an mp_limb. For example, for base 10 this is 10, since
- 2**32 = 4294967296 has ten digits. */
+ /* Number of digits in the conversion base that always fits in an mp_limb.
+ For example, for base 10 on a machine where a mp_limb has 32 bits this
+ is 9, since 10**9 is the largest number that fits into a mp_limb. */
int chars_per_limb;
/* log(2)/log(conversion_base) */
float chars_per_bit_exactly;
- /* big_base is conversion_base**chars_per_limb, i.e. the biggest
- number that fits a word, built by factors of conversion_base.
- Exception: For 2, 4, 8, etc, big_base is log2(base), i.e. the
- number of bits used to represent each digit in the base. */
+ /* base**chars_per_limb, i.e. the biggest number that fits a word, built by
+ factors of base. Exception: For 2, 4, 8, etc, big_base is log2(base),
+ i.e. the number of bits used to represent each digit in the base. */
mp_limb big_base;
- /* big_base_inverted is a BITS_PER_MP_LIMB bit approximation to
- 1/big_base, represented as a fixed-point number. Instead of
- dividing by big_base an application can choose to multiply
- by big_base_inverted. */
+ /* A BITS_PER_MP_LIMB bit approximation to 1/big_base, represented as a
+ fixed-point number. Instead of dividing by big_base an application can
+ choose to multiply by big_base_inverted. */
mp_limb big_base_inverted;
};
diff --git a/stdlib/gmp.h b/stdlib/gmp.h
index 0b2cb29014..243779996d 100644
--- a/stdlib/gmp.h
+++ b/stdlib/gmp.h
@@ -23,6 +23,7 @@ the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
#ifndef __GNU_MP__
#define __need_size_t
#include <stddef.h>
+#undef __need_size_t
#if defined (__STDC__)
#define __gmp_const const
@@ -40,7 +41,7 @@ the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
typedef unsigned int mp_limb;
typedef int mp_limb_signed;
#else
-#if _LONG_LONG_LIMB
+#ifdef _LONG_LONG_LIMB
typedef unsigned long long int mp_limb;
typedef long long int mp_limb_signed;
#else
@@ -110,11 +111,11 @@ typedef __mpq_struct mpq_t[1];
typedef struct
{
- mp_size_t alloc; /* Number of *limbs* allocated and pointed
- to by the D field. */
mp_size_t prec; /* Max precision, in number of `mp_limb's.
Set by mpf_init and modified by
- mpf_set_prec. */
+ mpf_set_prec. The area pointed to
+ by the `d' field contains `prec' + 1
+ limbs. */
mp_size_t size; /* abs(SIZE) is the number of limbs
the last field points to. If SIZE
is negative this is a negative
@@ -127,7 +128,7 @@ typedef struct
typedef __mpf_struct mpf_t[1];
/* Types for function declarations in gmp files. */
-/* ??? Should not pollute user name space ??? */
+/* ??? Should not pollute user name space with these ??? */
typedef __gmp_const __mpz_struct *mpz_srcptr;
typedef __mpz_struct *mpz_ptr;
typedef __gmp_const __mpf_struct *mpf_srcptr;
diff --git a/stdlib/longlong.h b/stdlib/longlong.h
index bbb92e3af8..e52bf32dba 100644
--- a/stdlib/longlong.h
+++ b/stdlib/longlong.h
@@ -139,6 +139,7 @@ the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
__asm__ ("clz %0,%1" \
: "=r" ((USItype)(count)) \
: "r" ((USItype)(x)))
+#define COUNT_LEADING_ZEROS_0 32
#endif /* __a29k__ */
#if defined (__alpha__) && W_TYPE_SIZE == 64
@@ -298,9 +299,9 @@ extern UDItype __udiv_qrnnd ();
struct {USItype __h, __l;} __i; \
} __xx; \
__asm__ ("xmpyu %1,%2,%0" \
- : "=fx" (__xx.__ll) \
- : "fx" ((USItype)(u)), \
- "fx" ((USItype)(v))); \
+ : "=*f" (__xx.__ll) \
+ : "*f" ((USItype)(u)), \
+ "*f" ((USItype)(v))); \
(wh) = __xx.__i.__h; \
(wl) = __xx.__i.__l; \
} while (0)
@@ -339,7 +340,7 @@ extern USItype __udiv_qrnnd ();
sub %0,%1,%0 ; Subtract it.
" : "=r" (count), "=r" (__tmp) : "1" (x)); \
} while (0)
-#endif
+#endif /* hppa */
#if (defined (__i370__) || defined (__mvs__)) && W_TYPE_SIZE == 32
#define umul_ppmm(xh, xl, m0, m1) \
@@ -431,7 +432,29 @@ extern USItype __udiv_qrnnd ();
#endif
#endif /* 80x86 */
+#if defined (__i860__) && W_TYPE_SIZE == 32
+#define rshift_rhlc(r,h,l,c) \
+ __asm__ ("shr %3,r0,r0\;shrd %1,%2,%0" \
+ "=r" (r) : "r" (h), "r" (l), "rn" (c))
+#endif /* i860 */
+
#if defined (__i960__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("cmpo 1,0\;addc %5,%4,%1\;addc %3,%2,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%dI" ((USItype)(ah)), \
+ "dI" ((USItype)(bh)), \
+ "%dI" ((USItype)(al)), \
+ "dI" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("cmpo 0,0\;subc %5,%4,%1\;subc %3,%2,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "dI" ((USItype)(ah)), \
+ "dI" ((USItype)(bh)), \
+ "dI" ((USItype)(al)), \
+ "dI" ((USItype)(bl)))
#define umul_ppmm(w1, w0, u, v) \
({union {UDItype __ll; \
struct {USItype __l, __h;} __i; \
@@ -448,7 +471,39 @@ extern USItype __udiv_qrnnd ();
: "%dI" ((USItype)(u)), \
"dI" ((USItype)(v))); \
__w; })
-#endif /* __i960__ */
+#define udiv_qrnnd(q, r, nh, nl, d) \
+ do { \
+ union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __nn; \
+ __nn.__i.__h = (nh); __nn.__i.__l = (nl); \
+ __asm__ ("ediv %d,%n,%0" \
+ : "=d" (__rq.__ll) \
+ : "dI" (__nn.__ll), \
+ "dI" ((USItype)(d))); \
+ (r) = __rq.__i.__l; (q) = __rq.__i.__h; \
+ } while (0)
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __cbtmp; \
+ __asm__ ("scanbit %1,%0" \
+ : "=r" (__cbtmp) \
+ : "r" ((USItype)(x))); \
+ (count) = __cbtmp ^ 31; \
+ } while (0)
+#define COUNT_LEADING_ZEROS_0 (-32) /* sic */
+#if defined (__i960mx) /* what is the proper symbol to test??? */
+#define rshift_rhlc(r,h,l,c) \
+ do { \
+ union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __nn; \
+ __nn.__i.__h = (h); __nn.__i.__l = (l); \
+ __asm__ ("shre %2,%1,%0" \
+ : "=d" (r) : "dI" (__nn.__ll), "dI" (c)); \
+ }
+#endif /* i960mx */
+#endif /* i960 */
#if (defined (__mc68000__) || defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
@@ -469,7 +524,7 @@ extern USItype __udiv_qrnnd ();
"d" ((USItype)(bh)), \
"1" ((USItype)(al)), \
"g" ((USItype)(bl)))
-#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)) && W_TYPE_SIZE == 32
+#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020))
#define umul_ppmm(w1, w0, u, v) \
__asm__ ("mulu%.l %3,%1:%0" \
: "=d" ((USItype)(w0)), \
@@ -496,8 +551,9 @@ extern USItype __udiv_qrnnd ();
__asm__ ("bfffo %1{%b2:%b2},%0" \
: "=d" ((USItype)(count)) \
: "od" ((USItype)(x)), "n" (0))
+#define COUNT_LEADING_ZEROS_0 32
#else /* not mc68020 */
-#define umul_ppmmxx(xh, xl, a, b) \
+#define umul_ppmm(xh, xl, a, b) \
do { USItype __umul_tmp1, __umul_tmp2; \
__asm__ ("| Inlined umul_ppmm
move%.l %5,%3
@@ -557,6 +613,7 @@ extern USItype __udiv_qrnnd ();
: "r" ((USItype)(x))); \
(count) = __cbtmp ^ 31; \
} while (0)
+#define COUNT_LEADING_ZEROS_0 63 /* sic */
#if defined (__m88110__)
#define umul_ppmm(wh, wl, u, v) \
do { \
@@ -738,6 +795,7 @@ extern USItype __udiv_qrnnd ();
__asm__ ("{cntlz|cntlzw} %0,%1" \
: "=r" ((USItype)(count)) \
: "r" ((USItype)(x)))
+#define COUNT_LEADING_ZEROS_0 32
#if defined (_ARCH_PPC)
#define umul_ppmm(ph, pl, m0, m1) \
do { \
@@ -887,7 +945,7 @@ extern USItype __udiv_qrnnd ();
(count) += 16; \
} \
} while (0)
-#endif
+#endif /* RT/ROMP */
#if defined (__sh2__) && W_TYPE_SIZE == 32
#define umul_ppmm(w1, w0, u, v) \
@@ -1154,20 +1212,6 @@ extern USItype __udiv_qrnnd ();
(xh) += ((((signed int) __m0 >> 15) & __m1) \
+ (((signed int) __m1 >> 15) & __m0)); \
} while (0)
-#define umul_ppmm_off(xh, xl, m0, m1) \
- do { \
- union {long int __ll; \
- struct {unsigned int __h, __l;} __i; \
- } __xx; \
- __asm__ ("mult %S0,%H3" \
- : "=r" (__xx.__i.__h), \
- "=r" (__xx.__i.__l) \
- : "%1" (m0), \
- "rQR" (m1)); \
- (xh) = __xx.__i.__h + ((((signed int) m0 >> 15) & m1) \
- + (((signed int) m1 >> 15) & m0)); \
- (xl) = __xx.__i.__l; \
- } while (0)
#endif /* __z8000__ */
#endif /* __GNUC__ */
diff --git a/sysdeps/generic/mul_n.c b/sysdeps/generic/mul_n.c
index 7900988143..e37c5d8290 100644
--- a/sysdeps/generic/mul_n.c
+++ b/sysdeps/generic/mul_n.c
@@ -216,15 +216,7 @@ ____mpn_mul_n (prodp, up, vp, size, tspace)
cy += __mpn_add_n (prodp + hsize, prodp + hsize, tspace, size);
if (cy)
- {
- if (cy > 0)
- __mpn_add_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
- else
- {
- __mpn_sub_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
- abort ();
- }
- }
+ __mpn_add_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
MPN_COPY (prodp, tspace, hsize);
cy = __mpn_add_n (prodp + hsize, prodp + hsize, tspace + hsize, hsize);
@@ -362,15 +354,7 @@ ____mpn_sqr_n (prodp, up, size, tspace)
cy += __mpn_add_n (prodp + hsize, prodp + hsize, tspace, size);
if (cy)
- {
- if (cy > 0)
- __mpn_add_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
- else
- {
- __mpn_sub_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
- abort ();
- }
- }
+ __mpn_add_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
MPN_COPY (prodp, tspace, hsize);
cy = __mpn_add_n (prodp + hsize, prodp + hsize, tspace + hsize, hsize);
diff --git a/sysdeps/i386/i586/lshift.S b/sysdeps/i386/i586/lshift.S
index b9f8131297..c41f74e17d 100644
--- a/sysdeps/i386/i586/lshift.S
+++ b/sysdeps/i386/i586/lshift.S
@@ -43,12 +43,15 @@ C_SYMBOL_NAME(__mpn_lshift:)
movl 28(%esp),%ebp /* size */
movl 32(%esp),%ecx /* cnt */
+/* We can use faster code for shift-by-1 under certain conditions. */
cmp $1,%ecx
jne Lnormal
- movl %edi,%eax
- subl %esi,%eax
- cmpl %ebp,%eax
- jnc Lspecial
+ leal 4(%esi),%eax
+ cmpl %edi,%eax
+ jnc Lspecial /* jump if s_ptr + 1 >= res_ptr */
+ leal (%esi,%ebp,4),%eax
+ cmpl %eax,%edi
+ jnc Lspecial /* jump if res_ptr >= s_ptr + size */
Lnormal:
leal -4(%edi,%ebp,4),%edi
diff --git a/sysdeps/i386/i586/rshift.S b/sysdeps/i386/i586/rshift.S
index 51cde8f07f..a820a79bc7 100644
--- a/sysdeps/i386/i586/rshift.S
+++ b/sysdeps/i386/i586/rshift.S
@@ -43,12 +43,15 @@ C_SYMBOL_NAME(__mpn_rshift:)
movl 28(%esp),%ebp /* size */
movl 32(%esp),%ecx /* cnt */
+/* We can use faster code for shift-by-1 under certain conditions. */
cmp $1,%ecx
jne Lnormal
- movl %edi,%eax
- subl %esi,%eax
- cmpl %ebp,%eax
- jnc Lspecial
+ leal 4(%edi),%eax
+ cmpl %esi,%eax
+ jnc Lspecial /* jump if res_ptr + 1 >= s_ptr */
+ leal (%edi,%ebp,4),%eax
+ cmpl %eax,%esi
+ jnc Lspecial /* jump if s_ptr >= res_ptr + size */
Lnormal:
movl (%esi),%edx
diff --git a/sysdeps/m68k/add_n.S b/sysdeps/m68k/add_n.S
new file mode 100644
index 0000000000..ea7a4458ea
--- /dev/null
+++ b/sysdeps/m68k/add_n.S
@@ -0,0 +1,76 @@
+/* mc68020 __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+ sum in a third limb vector.
+
+Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/*
+ INPUT PARAMETERS
+ res_ptr (sp + 4)
+ s1_ptr (sp + 8)
+ s2_ptr (sp + 16)
+ size (sp + 12)
+*/
+
+#include "asm-syntax.h"
+
+ TEXT
+ ALIGN
+ GLOBL ___mpn_add_n
+
+LAB(___mpn_add_n)
+/* Save used registers on the stack. */
+ INSN2(move,l ,MEM_PREDEC(sp),d2)
+ INSN2(move,l ,MEM_PREDEC(sp),a2)
+
+/* Copy the arguments to registers. Better use movem? */
+ INSN2(move,l ,a2,MEM_DISP(sp,12))
+ INSN2(move,l ,a0,MEM_DISP(sp,16))
+ INSN2(move,l ,a1,MEM_DISP(sp,20))
+ INSN2(move,l ,d2,MEM_DISP(sp,24))
+
+ INSN2(eor,w ,d2,#1)
+ INSN2(lsr,l ,d2,#1)
+ bcc L1
+ INSN2(subq,l ,d2,#1) /* clears cy as side effect */
+
+LAB(Loop)
+ INSN2(move,l ,d0,MEM_POSTINC(a0))
+ INSN2(move,l ,d1,MEM_POSTINC(a1))
+ INSN2(addx,l ,d0,d1)
+ INSN2(move,l ,MEM_POSTINC(a2),d0)
+LAB(L1) INSN2(move,l ,d0,MEM_POSTINC(a0))
+ INSN2(move,l ,d1,MEM_POSTINC(a1))
+ INSN2(addx,l ,d0,d1)
+ INSN2(move,l ,MEM_POSTINC(a2),d0)
+
+ dbf d2,Loop /* loop until 16 lsb of %4 == -1 */
+ INSN2(subx,l ,d0,d0) /* d0 <= -cy; save cy as 0 or -1 in d0 */
+ INSN2(sub,l ,d2,#0x10000)
+ bcs L2
+ INSN2(add,l ,d0,d0) /* restore cy */
+ bra Loop
+
+LAB(L2)
+ INSN1(neg,l ,d0)
+
+/* Restore used registers from stack frame. */
+ INSN2(move,l ,a2,MEM_POSTINC(sp))
+ INSN2(move,l ,d2,MEM_POSTINC(sp))
+
+ rts
diff --git a/sysdeps/m68k/sub_n.S b/sysdeps/m68k/sub_n.S
new file mode 100644
index 0000000000..19f0ec1568
--- /dev/null
+++ b/sysdeps/m68k/sub_n.S
@@ -0,0 +1,76 @@
+/* mc68020 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
+ store difference in a third limb vector.
+
+Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/*
+ INPUT PARAMETERS
+ res_ptr (sp + 4)
+ s1_ptr (sp + 8)
+ s2_ptr (sp + 16)
+ size (sp + 12)
+*/
+
+#include "asm-syntax.h"
+
+ TEXT
+ ALIGN
+ GLOBL ___mpn_sub_n
+
+LAB(___mpn_sub_n)
+/* Save used registers on the stack. */
+ INSN2(move,l ,MEM_PREDEC(sp),d2)
+ INSN2(move,l ,MEM_PREDEC(sp),a2)
+
+/* Copy the arguments to registers. Better use movem? */
+ INSN2(move,l ,a2,MEM_DISP(sp,12))
+ INSN2(move,l ,a0,MEM_DISP(sp,16))
+ INSN2(move,l ,a1,MEM_DISP(sp,20))
+ INSN2(move,l ,d2,MEM_DISP(sp,24))
+
+ INSN2(eor,w ,d2,#1)
+ INSN2(lsr,l ,d2,#1)
+ bcc L1
+ INSN2(subq,l ,d2,#1) /* clears cy as side effect */
+
+LAB(Loop)
+ INSN2(move,l ,d0,MEM_POSTINC(a0))
+ INSN2(move,l ,d1,MEM_POSTINC(a1))
+ INSN2(subx,l ,d0,d1)
+ INSN2(move,l ,MEM_POSTINC(a2),d0)
+LAB(L1) INSN2(move,l ,d0,MEM_POSTINC(a0))
+ INSN2(move,l ,d1,MEM_POSTINC(a1))
+ INSN2(subx,l ,d0,d1)
+ INSN2(move,l ,MEM_POSTINC(a2),d0)
+
+ dbf d2,Loop /* loop until 16 lsb of %4 == -1 */
+ INSN2(subx,l ,d0,d0) /* d0 <= -cy; save cy as 0 or -1 in d0 */
+ INSN2(sub,l ,d2,#0x10000)
+ bcs L2
+ INSN2(add,l ,d0,d0) /* restore cy */
+ bra Loop
+
+LAB(L2)
+ INSN1(neg,l ,d0)
+
+/* Restore used registers from stack frame. */
+ INSN2(move,l ,a2,MEM_POSTINC(sp))
+ INSN2(move,l ,d2,MEM_POSTINC(sp))
+
+ rts
diff --git a/sysdeps/m88k/add_n.s b/sysdeps/m88k/add_n.s
new file mode 100644
index 0000000000..7e4ccccb90
--- /dev/null
+++ b/sysdeps/m88k/add_n.s
@@ -0,0 +1,103 @@
+; mc88100 __mpn_add -- Add two limb vectors of the same length > 0 and store
+; sum in a third limb vector.
+
+; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+; the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr r2
+; s1_ptr r3
+; s2_ptr r4
+; size r5
+
+; This code has been optimized to run one instruction per clock, avoiding
+; load stalls and writeback contention. As a result, the instruction
+; order is not always natural.
+
+; The speed is about 4.6 clocks/limb + 18 clocks/limb-vector on an 88100,
+; but on the 88110, it seems to run much slower, 6.6 clocks/limb.
+
+ text
+ align 16
+ global ___mpn_add_n
+___mpn_add_n:
+ ld r6,r3,0 ; read first limb from s1_ptr
+ extu r10,r5,3
+ ld r7,r4,0 ; read first limb from s2_ptr
+
+ subu.co r5,r0,r5 ; (clear carry as side effect)
+ mak r5,r5,3<4>
+ bcnd eq0,r5,Lzero
+
+ or r12,r0,lo16(Lbase)
+ or.u r12,r12,hi16(Lbase)
+ addu r12,r12,r5 ; r12 is address for entering in loop
+
+ extu r5,r5,2 ; divide by 4
+ subu r2,r2,r5 ; adjust res_ptr
+ subu r3,r3,r5 ; adjust s1_ptr
+ subu r4,r4,r5 ; adjust s2_ptr
+
+ or r8,r6,r0
+
+ jmp.n r12
+ or r9,r7,r0
+
+Loop: addu r3,r3,32
+ st r8,r2,28
+ addu r4,r4,32
+ ld r6,r3,0
+ addu r2,r2,32
+ ld r7,r4,0
+Lzero: subu r10,r10,1 ; add 0 + 8r limbs (adj loop cnt)
+Lbase: ld r8,r3,4
+ addu.cio r6,r6,r7
+ ld r9,r4,4
+ st r6,r2,0
+ ld r6,r3,8 ; add 7 + 8r limbs
+ addu.cio r8,r8,r9
+ ld r7,r4,8
+ st r8,r2,4
+ ld r8,r3,12 ; add 6 + 8r limbs
+ addu.cio r6,r6,r7
+ ld r9,r4,12
+ st r6,r2,8
+ ld r6,r3,16 ; add 5 + 8r limbs
+ addu.cio r8,r8,r9
+ ld r7,r4,16
+ st r8,r2,12
+ ld r8,r3,20 ; add 4 + 8r limbs
+ addu.cio r6,r6,r7
+ ld r9,r4,20
+ st r6,r2,16
+ ld r6,r3,24 ; add 3 + 8r limbs
+ addu.cio r8,r8,r9
+ ld r7,r4,24
+ st r8,r2,20
+ ld r8,r3,28 ; add 2 + 8r limbs
+ addu.cio r6,r6,r7
+ ld r9,r4,28
+ st r6,r2,24
+ bcnd.n ne0,r10,Loop ; add 1 + 8r limbs
+ addu.cio r8,r8,r9
+
+ st r8,r2,28 ; store most significant limb
+
+ jmp.n r1
+ addu.ci r2,r0,r0 ; return carry-out from most sign. limb
diff --git a/sysdeps/m88k/mul_1.s b/sysdeps/m88k/mul_1.s
new file mode 100644
index 0000000000..35c238d570
--- /dev/null
+++ b/sysdeps/m88k/mul_1.s
@@ -0,0 +1,128 @@
+; mc88100 __mpn_mul_1 -- Multiply a limb vector with a single limb and
+; store the prod