aboutsummaryrefslogtreecommitdiff
path: root/crypto/bn
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/bn')
-rw-r--r--crypto/bn/Makefile107
-rwxr-xr-xcrypto/bn/asm/armv4-gf2m.pl278
-rwxr-xr-xcrypto/bn/asm/armv4-mont.pl204
-rw-r--r--crypto/bn/asm/bn-586.pl203
-rw-r--r--crypto/bn/asm/co-586.pl3
-rwxr-xr-xcrypto/bn/asm/ia64-mont.pl851
-rwxr-xr-xcrypto/bn/asm/mips-mont.pl426
-rwxr-xr-xcrypto/bn/asm/mips.pl2585
-rwxr-xr-xcrypto/bn/asm/mips3-mont.pl327
-rwxr-xr-xcrypto/bn/asm/modexp512-x86_64.pl1496
-rwxr-xr-xcrypto/bn/asm/parisc-mont.pl993
-rwxr-xr-xcrypto/bn/asm/ppc-mont.pl334
-rw-r--r--crypto/bn/asm/ppc.pl278
-rwxr-xr-xcrypto/bn/asm/ppc64-mont.pl1088
-rwxr-xr-xcrypto/bn/asm/s390x-gf2m.pl221
-rwxr-xr-xcrypto/bn/asm/s390x-mont.pl277
-rwxr-xr-xcrypto/bn/asm/s390x.S678
-rw-r--r--crypto/bn/asm/sparcv8plus.S15
-rwxr-xr-xcrypto/bn/asm/sparcv9-mont.pl606
-rwxr-xr-xcrypto/bn/asm/sparcv9a-mont.pl882
-rwxr-xr-xcrypto/bn/asm/via-mont.pl242
-rwxr-xr-xcrypto/bn/asm/x86-gf2m.pl313
-rwxr-xr-xcrypto/bn/asm/x86-mont.pl (renamed from crypto/bn/asm/mo-586.pl)16
-rw-r--r--crypto/bn/asm/x86_64-gcc.c10
-rwxr-xr-xcrypto/bn/asm/x86_64-gf2m.pl389
-rwxr-xr-xcrypto/bn/asm/x86_64-mont.pl1594
-rwxr-xr-xcrypto/bn/asm/x86_64-mont5.pl1070
-rw-r--r--crypto/bn/bn.h190
-rw-r--r--crypto/bn/bn_asm.c322
-rw-r--r--crypto/bn/bn_blind.c17
-rw-r--r--crypto/bn/bn_ctx.c6
-rw-r--r--crypto/bn/bn_div.c278
-rw-r--r--crypto/bn/bn_exp.c240
-rw-r--r--crypto/bn/bn_gf2m.c250
-rw-r--r--crypto/bn/bn_lcl.h26
-rw-r--r--crypto/bn/bn_lib.c10
-rw-r--r--crypto/bn/bn_mont.c335
-rw-r--r--crypto/bn/bn_nist.c402
-rw-r--r--crypto/bn/bn_opt.c87
-rw-r--r--crypto/bn/bn_print.c40
-rw-r--r--crypto/bn/bn_shift.c27
-rw-r--r--crypto/bn/bntest.c44
-rw-r--r--crypto/bn/exptest.c4
43 files changed, 16339 insertions, 1425 deletions
diff --git a/crypto/bn/Makefile b/crypto/bn/Makefile
index f5e8f65a4698..672773454cfd 100644
--- a/crypto/bn/Makefile
+++ b/crypto/bn/Makefile
@@ -12,8 +12,6 @@ MAKEFILE= Makefile
AR= ar r
BN_ASM= bn_asm.o
-# or use
-#BN_ASM= bn86-elf.o
CFLAGS= $(INCLUDES) $(CFLAG)
ASFLAGS= $(INCLUDES) $(ASFLAG)
@@ -28,13 +26,13 @@ LIBSRC= bn_add.c bn_div.c bn_exp.c bn_lib.c bn_ctx.c bn_mul.c bn_mod.c \
bn_print.c bn_rand.c bn_shift.c bn_word.c bn_blind.c \
bn_kron.c bn_sqrt.c bn_gcd.c bn_prime.c bn_err.c bn_sqr.c bn_asm.c \
bn_recp.c bn_mont.c bn_mpi.c bn_exp2.c bn_gf2m.c bn_nist.c \
- bn_depr.c bn_x931p.c bn_const.c bn_opt.c
+ bn_depr.c bn_const.c bn_x931p.c
LIBOBJ= bn_add.o bn_div.o bn_exp.o bn_lib.o bn_ctx.o bn_mul.o bn_mod.o \
bn_print.o bn_rand.o bn_shift.o bn_word.o bn_blind.o \
bn_kron.o bn_sqrt.o bn_gcd.o bn_prime.o bn_err.o bn_sqr.o $(BN_ASM) \
bn_recp.o bn_mont.o bn_mpi.o bn_exp2.o bn_gf2m.o bn_nist.o \
- bn_depr.o bn_x931p.o bn_const.o bn_opt.o
+ bn_depr.o bn_const.o bn_x931p.o
SRC= $(LIBSRC)
@@ -58,36 +56,27 @@ bnbug: bnbug.c ../../libcrypto.a top
cc -g -I../../include bnbug.c -o bnbug ../../libcrypto.a
lib: $(LIBOBJ)
- $(ARX) $(LIB) $(LIBOBJ)
+ $(AR) $(LIB) $(LIBOBJ)
$(RANLIB) $(LIB) || echo Never mind.
@touch lib
-# ELF
-bn86-elf.s: asm/bn-586.pl ../perlasm/x86asm.pl
- (cd asm; $(PERL) bn-586.pl elf $(CFLAGS) > ../$@)
-co86-elf.s: asm/co-586.pl ../perlasm/x86asm.pl
- (cd asm; $(PERL) co-586.pl elf $(CFLAGS) > ../$@)
-mo86-elf.s: asm/mo-586.pl ../perlasm/x86asm.pl
- (cd asm; $(PERL) mo-586.pl elf $(CFLAGS) > ../$@)
-# COFF
-bn86-cof.s: asm/bn-586.pl ../perlasm/x86asm.pl
- (cd asm; $(PERL) bn-586.pl coff $(CFLAGS) > ../$@)
-co86-cof.s: asm/co-586.pl ../perlasm/x86asm.pl
- (cd asm; $(PERL) co-586.pl coff $(CFLAGS) > ../$@)
-mo86-cof.s: asm/mo-586.pl ../perlasm/x86asm.pl
- (cd asm; $(PERL) mo-586.pl coff $(CFLAGS) > ../$@)
-# a.out
-bn86-out.s: asm/bn-586.pl ../perlasm/x86asm.pl
- (cd asm; $(PERL) bn-586.pl a.out $(CFLAGS) > ../$@)
-co86-out.s: asm/co-586.pl ../perlasm/x86asm.pl
- (cd asm; $(PERL) co-586.pl a.out $(CFLAGS) > ../$@)
-mo86-out.s: asm/mo-586.pl ../perlasm/x86asm.pl
- (cd asm; $(PERL) mo-586.pl a.out $(CFLAGS) > ../$@)
+bn-586.s: asm/bn-586.pl ../perlasm/x86asm.pl
+ $(PERL) asm/bn-586.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
+co-586.s: asm/co-586.pl ../perlasm/x86asm.pl
+ $(PERL) asm/co-586.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
+x86-mont.s: asm/x86-mont.pl ../perlasm/x86asm.pl
+ $(PERL) asm/x86-mont.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
+x86-gf2m.s: asm/x86-gf2m.pl ../perlasm/x86asm.pl
+ $(PERL) asm/x86-gf2m.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
sparcv8.o: asm/sparcv8.S
$(CC) $(CFLAGS) -c asm/sparcv8.S
-sparcv8plus.o: asm/sparcv8plus.S
- $(CC) $(CFLAGS) -c asm/sparcv8plus.S
+bn-sparcv9.o: asm/sparcv8plus.S
+ $(CC) $(CFLAGS) -c -o $@ asm/sparcv8plus.S
+sparcv9a-mont.s: asm/sparcv9a-mont.pl
+ $(PERL) asm/sparcv9a-mont.pl $(CFLAGS) > $@
+sparcv9-mont.s: asm/sparcv9-mont.pl
+ $(PERL) asm/sparcv9-mont.pl $(CFLAGS) > $@
bn-mips3.o: asm/mips3.s
@if [ "$(CC)" = "gcc" ]; then \
@@ -95,13 +84,31 @@ bn-mips3.o: asm/mips3.s
as -$$ABI -O -o $@ asm/mips3.s; \
else $(CC) -c $(CFLAGS) -o $@ asm/mips3.s; fi
+bn-mips.s: asm/mips.pl
+ $(PERL) asm/mips.pl $(PERLASM_SCHEME) $@
+mips-mont.s: asm/mips-mont.pl
+ $(PERL) asm/mips-mont.pl $(PERLASM_SCHEME) $@
+
+bn-s390x.o: asm/s390x.S
+ $(CC) $(CFLAGS) -c -o $@ asm/s390x.S
+s390x-gf2m.s: asm/s390x-gf2m.pl
+ $(PERL) asm/s390x-gf2m.pl $(PERLASM_SCHEME) $@
+
x86_64-gcc.o: asm/x86_64-gcc.c
$(CC) $(CFLAGS) -c -o $@ asm/x86_64-gcc.c
x86_64-mont.s: asm/x86_64-mont.pl
- $(PERL) asm/x86_64-mont.pl $@
+ $(PERL) asm/x86_64-mont.pl $(PERLASM_SCHEME) > $@
+x86_64-mont5.s: asm/x86_64-mont5.pl
+ $(PERL) asm/x86_64-mont5.pl $(PERLASM_SCHEME) > $@
+x86_64-gf2m.s: asm/x86_64-gf2m.pl
+ $(PERL) asm/x86_64-gf2m.pl $(PERLASM_SCHEME) > $@
+modexp512-x86_64.s: asm/modexp512-x86_64.pl
+ $(PERL) asm/modexp512-x86_64.pl $(PERLASM_SCHEME) > $@
bn-ia64.s: asm/ia64.S
$(CC) $(CFLAGS) -E asm/ia64.S > $@
+ia64-mont.s: asm/ia64-mont.pl
+ $(PERL) asm/ia64-mont.pl $@ $(CFLAGS)
# GNU assembler fails to compile PA-RISC2 modules, insist on calling
# vendor assembler...
@@ -109,14 +116,22 @@ pa-risc2W.o: asm/pa-risc2W.s
/usr/ccs/bin/as -o pa-risc2W.o asm/pa-risc2W.s
pa-risc2.o: asm/pa-risc2.s
/usr/ccs/bin/as -o pa-risc2.o asm/pa-risc2.s
+parisc-mont.s: asm/parisc-mont.pl
+ $(PERL) asm/parisc-mont.pl $(PERLASM_SCHEME) $@
# ppc - AIX, Linux, MacOS X...
-linux_ppc32.s: asm/ppc.pl; $(PERL) $< $@
-linux_ppc64.s: asm/ppc.pl; $(PERL) $< $@
-aix_ppc32.s: asm/ppc.pl; $(PERL) asm/ppc.pl $@
-aix_ppc64.s: asm/ppc.pl; $(PERL) asm/ppc.pl $@
-osx_ppc32.s: asm/ppc.pl; $(PERL) $< $@
-osx_ppc64.s: asm/ppc.pl; $(PERL) $< $@
+bn-ppc.s: asm/ppc.pl; $(PERL) asm/ppc.pl $(PERLASM_SCHEME) $@
+ppc-mont.s: asm/ppc-mont.pl;$(PERL) asm/ppc-mont.pl $(PERLASM_SCHEME) $@
+ppc64-mont.s: asm/ppc64-mont.pl;$(PERL) asm/ppc64-mont.pl $(PERLASM_SCHEME) $@
+
+alpha-mont.s: asm/alpha-mont.pl
+ $(PERL) $< | $(CC) -E - | tee $@ > /dev/null
+
+# GNU make "catch all"
+%-mont.s: asm/%-mont.pl; $(PERL) $< $(PERLASM_SCHEME) $@
+%-gf2m.S: asm/%-gf2m.pl; $(PERL) $< $(PERLASM_SCHEME) $@
+
+armv4-gf2m.o: armv4-gf2m.S
files:
$(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO
@@ -184,8 +199,11 @@ bn_blind.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h
bn_blind.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h
bn_blind.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h
bn_blind.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_blind.c bn_lcl.h
-bn_const.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h
-bn_const.o: ../../include/openssl/ossl_typ.h bn.h bn_const.c
+bn_const.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h
+bn_const.o: ../../include/openssl/opensslconf.h
+bn_const.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h
+bn_const.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h
+bn_const.o: ../../include/openssl/symhacks.h bn.h bn_const.c
bn_ctx.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h
bn_ctx.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h
bn_ctx.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h
@@ -292,13 +310,6 @@ bn_nist.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h
bn_nist.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h
bn_nist.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h
bn_nist.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_nist.c
-bn_opt.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h
-bn_opt.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h
-bn_opt.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h
-bn_opt.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h
-bn_opt.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h
-bn_opt.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h
-bn_opt.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_opt.c
bn_prime.o: ../../e_os.h ../../include/openssl/bio.h ../../include/openssl/bn.h
bn_prime.o: ../../include/openssl/buffer.h ../../include/openssl/crypto.h
bn_prime.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h
@@ -357,6 +368,8 @@ bn_word.o: ../../include/openssl/lhash.h ../../include/openssl/opensslconf.h
bn_word.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h
bn_word.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h
bn_word.o: ../../include/openssl/symhacks.h ../cryptlib.h bn_lcl.h bn_word.c
-bn_x931p.o: ../../include/openssl/bn.h ../../include/openssl/e_os2.h
-bn_x931p.o: ../../include/openssl/opensslconf.h
-bn_x931p.o: ../../include/openssl/ossl_typ.h bn_x931p.c
+bn_x931p.o: ../../include/openssl/bn.h ../../include/openssl/crypto.h
+bn_x931p.o: ../../include/openssl/e_os2.h ../../include/openssl/opensslconf.h
+bn_x931p.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h
+bn_x931p.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h
+bn_x931p.o: ../../include/openssl/symhacks.h bn_x931p.c
diff --git a/crypto/bn/asm/armv4-gf2m.pl b/crypto/bn/asm/armv4-gf2m.pl
new file mode 100755
index 000000000000..c52e0b75b5b6
--- /dev/null
+++ b/crypto/bn/asm/armv4-gf2m.pl
@@ -0,0 +1,278 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication
+# used in bn_gf2m.c. It's kind of low-hanging mechanical port from
+# C for the time being... Except that it has two code paths: pure
+# integer code suitable for any ARMv4 and later CPU and NEON code
+# suitable for ARMv7. Pure integer 1x1 multiplication subroutine runs
+# in ~45 cycles on dual-issue core such as Cortex A8, which is ~50%
+# faster than compiler-generated code. For ECDH and ECDSA verify (but
+# not for ECDSA sign) it means 25%-45% improvement depending on key
+# length, more for longer keys. Even though NEON 1x1 multiplication
+# runs in even less cycles, ~30, improvement is measurable only on
+# longer keys. One has to optimize code elsewhere to get NEON glow...
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
+sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
+sub Q() { shift=~m|d([1-3]?[02468])|?"q".($1/2):""; }
+
+$code=<<___;
+#include "arm_arch.h"
+
+.text
+.code 32
+
+#if __ARM_ARCH__>=7
+.fpu neon
+
+.type mul_1x1_neon,%function
+.align 5
+mul_1x1_neon:
+ vshl.u64 `&Dlo("q1")`,d16,#8 @ q1-q3 are slided $a
+ vmull.p8 `&Q("d0")`,d16,d17 @ a·bb
+ vshl.u64 `&Dlo("q2")`,d16,#16
+ vmull.p8 q1,`&Dlo("q1")`,d17 @ a<<8·bb
+ vshl.u64 `&Dlo("q3")`,d16,#24
+ vmull.p8 q2,`&Dlo("q2")`,d17 @ a<<16·bb
+ vshr.u64 `&Dlo("q1")`,#8
+ vmull.p8 q3,`&Dlo("q3")`,d17 @ a<<24·bb
+ vshl.u64 `&Dhi("q1")`,#24
+ veor d0,`&Dlo("q1")`
+ vshr.u64 `&Dlo("q2")`,#16
+ veor d0,`&Dhi("q1")`
+ vshl.u64 `&Dhi("q2")`,#16
+ veor d0,`&Dlo("q2")`
+ vshr.u64 `&Dlo("q3")`,#24
+ veor d0,`&Dhi("q2")`
+ vshl.u64 `&Dhi("q3")`,#8
+ veor d0,`&Dlo("q3")`
+ veor d0,`&Dhi("q3")`
+ bx lr
+.size mul_1x1_neon,.-mul_1x1_neon
+#endif
+___
+################
+# private interface to mul_1x1_ialu
+#
+$a="r1";
+$b="r0";
+
+($a0,$a1,$a2,$a12,$a4,$a14)=
+($hi,$lo,$t0,$t1, $i0,$i1 )=map("r$_",(4..9),12);
+
+$mask="r12";
+
+$code.=<<___;
+.type mul_1x1_ialu,%function
+.align 5
+mul_1x1_ialu:
+ mov $a0,#0
+ bic $a1,$a,#3<<30 @ a1=a&0x3fffffff
+ str $a0,[sp,#0] @ tab[0]=0
+ add $a2,$a1,$a1 @ a2=a1<<1
+ str $a1,[sp,#4] @ tab[1]=a1
+ eor $a12,$a1,$a2 @ a1^a2
+ str $a2,[sp,#8] @ tab[2]=a2
+ mov $a4,$a1,lsl#2 @ a4=a1<<2
+ str $a12,[sp,#12] @ tab[3]=a1^a2
+ eor $a14,$a1,$a4 @ a1^a4
+ str $a4,[sp,#16] @ tab[4]=a4
+ eor $a0,$a2,$a4 @ a2^a4
+ str $a14,[sp,#20] @ tab[5]=a1^a4
+ eor $a12,$a12,$a4 @ a1^a2^a4
+ str $a0,[sp,#24] @ tab[6]=a2^a4
+ and $i0,$mask,$b,lsl#2
+ str $a12,[sp,#28] @ tab[7]=a1^a2^a4
+
+ and $i1,$mask,$b,lsr#1
+ ldr $lo,[sp,$i0] @ tab[b & 0x7]
+ and $i0,$mask,$b,lsr#4
+ ldr $t1,[sp,$i1] @ tab[b >> 3 & 0x7]
+ and $i1,$mask,$b,lsr#7
+ ldr $t0,[sp,$i0] @ tab[b >> 6 & 0x7]
+ eor $lo,$lo,$t1,lsl#3 @ stall
+ mov $hi,$t1,lsr#29
+ ldr $t1,[sp,$i1] @ tab[b >> 9 & 0x7]
+
+ and $i0,$mask,$b,lsr#10
+ eor $lo,$lo,$t0,lsl#6
+ eor $hi,$hi,$t0,lsr#26
+ ldr $t0,[sp,$i0] @ tab[b >> 12 & 0x7]
+
+ and $i1,$mask,$b,lsr#13
+ eor $lo,$lo,$t1,lsl#9
+ eor $hi,$hi,$t1,lsr#23
+ ldr $t1,[sp,$i1] @ tab[b >> 15 & 0x7]
+
+ and $i0,$mask,$b,lsr#16
+ eor $lo,$lo,$t0,lsl#12
+ eor $hi,$hi,$t0,lsr#20
+ ldr $t0,[sp,$i0] @ tab[b >> 18 & 0x7]
+
+ and $i1,$mask,$b,lsr#19
+ eor $lo,$lo,$t1,lsl#15
+ eor $hi,$hi,$t1,lsr#17
+ ldr $t1,[sp,$i1] @ tab[b >> 21 & 0x7]
+
+ and $i0,$mask,$b,lsr#22
+ eor $lo,$lo,$t0,lsl#18
+ eor $hi,$hi,$t0,lsr#14
+ ldr $t0,[sp,$i0] @ tab[b >> 24 & 0x7]
+
+ and $i1,$mask,$b,lsr#25
+ eor $lo,$lo,$t1,lsl#21
+ eor $hi,$hi,$t1,lsr#11
+ ldr $t1,[sp,$i1] @ tab[b >> 27 & 0x7]
+
+ tst $a,#1<<30
+ and $i0,$mask,$b,lsr#28
+ eor $lo,$lo,$t0,lsl#24
+ eor $hi,$hi,$t0,lsr#8
+ ldr $t0,[sp,$i0] @ tab[b >> 30 ]
+
+ eorne $lo,$lo,$b,lsl#30
+ eorne $hi,$hi,$b,lsr#2
+ tst $a,#1<<31
+ eor $lo,$lo,$t1,lsl#27
+ eor $hi,$hi,$t1,lsr#5
+ eorne $lo,$lo,$b,lsl#31
+ eorne $hi,$hi,$b,lsr#1
+ eor $lo,$lo,$t0,lsl#30
+ eor $hi,$hi,$t0,lsr#2
+
+ mov pc,lr
+.size mul_1x1_ialu,.-mul_1x1_ialu
+___
+################
+# void bn_GF2m_mul_2x2(BN_ULONG *r,
+# BN_ULONG a1,BN_ULONG a0,
+# BN_ULONG b1,BN_ULONG b0); # r[3..0]=a1a0·b1b0
+
+($A1,$B1,$A0,$B0,$A1B1,$A0B0)=map("d$_",(18..23));
+
+$code.=<<___;
+.global bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,%function
+.align 5
+bn_GF2m_mul_2x2:
+#if __ARM_ARCH__>=7
+ ldr r12,.LOPENSSL_armcap
+.Lpic: ldr r12,[pc,r12]
+ tst r12,#1
+ beq .Lialu
+
+ veor $A1,$A1
+ vmov.32 $B1,r3,r3 @ two copies of b1
+ vmov.32 ${A1}[0],r1 @ a1
+
+ veor $A0,$A0
+ vld1.32 ${B0}[],[sp,:32] @ two copies of b0
+ vmov.32 ${A0}[0],r2 @ a0
+ mov r12,lr
+
+ vmov d16,$A1
+ vmov d17,$B1
+ bl mul_1x1_neon @ a1·b1
+ vmov $A1B1,d0
+
+ vmov d16,$A0
+ vmov d17,$B0
+ bl mul_1x1_neon @ a0·b0
+ vmov $A0B0,d0
+
+ veor d16,$A0,$A1
+ veor d17,$B0,$B1
+ veor $A0,$A0B0,$A1B1
+ bl mul_1x1_neon @ (a0+a1)·(b0+b1)
+
+ veor d0,$A0 @ (a0+a1)·(b0+b1)-a0·b0-a1·b1
+ vshl.u64 d1,d0,#32
+ vshr.u64 d0,d0,#32
+ veor $A0B0,d1
+ veor $A1B1,d0
+ vst1.32 {${A0B0}[0]},[r0,:32]!
+ vst1.32 {${A0B0}[1]},[r0,:32]!
+ vst1.32 {${A1B1}[0]},[r0,:32]!
+ vst1.32 {${A1B1}[1]},[r0,:32]
+ bx r12
+.align 4
+.Lialu:
+#endif
+___
+$ret="r10"; # reassigned 1st argument
+$code.=<<___;
+ stmdb sp!,{r4-r10,lr}
+ mov $ret,r0 @ reassign 1st argument
+ mov $b,r3 @ $b=b1
+ ldr r3,[sp,#32] @ load b0
+ mov $mask,#7<<2
+ sub sp,sp,#32 @ allocate tab[8]
+
+ bl mul_1x1_ialu @ a1·b1
+ str $lo,[$ret,#8]
+ str $hi,[$ret,#12]
+
+ eor $b,$b,r3 @ flip b0 and b1
+ eor $a,$a,r2 @ flip a0 and a1
+ eor r3,r3,$b
+ eor r2,r2,$a
+ eor $b,$b,r3
+ eor $a,$a,r2
+ bl mul_1x1_ialu @ a0·b0
+ str $lo,[$ret]
+ str $hi,[$ret,#4]
+
+ eor $a,$a,r2
+ eor $b,$b,r3
+ bl mul_1x1_ialu @ (a1+a0)·(b1+b0)
+___
+@r=map("r$_",(6..9));
+$code.=<<___;
+ ldmia $ret,{@r[0]-@r[3]}
+ eor $lo,$lo,$hi
+ eor $hi,$hi,@r[1]
+ eor $lo,$lo,@r[0]
+ eor $hi,$hi,@r[2]
+ eor $lo,$lo,@r[3]
+ eor $hi,$hi,@r[3]
+ str $hi,[$ret,#8]
+ eor $lo,$lo,$hi
+ add sp,sp,#32 @ destroy tab[8]
+ str $lo,[$ret,#4]
+
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r10,pc}
+#else
+ ldmia sp!,{r4-r10,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+#endif
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+#if __ARM_ARCH__>=7
+.align 5
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-(.Lpic+8)
+#endif
+.asciz "GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align 5
+
+.comm OPENSSL_armcap_P,4,4
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
+print $code;
+close STDOUT; # enforce flush
diff --git a/crypto/bn/asm/armv4-mont.pl b/crypto/bn/asm/armv4-mont.pl
new file mode 100755
index 000000000000..f78a8b5f0f55
--- /dev/null
+++ b/crypto/bn/asm/armv4-mont.pl
@@ -0,0 +1,204 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# January 2007.
+
+# Montgomery multiplication for ARMv4.
+#
+# Performance improvement naturally varies among CPU implementations
+# and compilers. The code was observed to provide +65-35% improvement
+# [depending on key length, less for longer keys] on ARM920T, and
+# +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
+# base and compiler generated code with in-lined umull and even umlal
+# instructions. The latter means that this code didn't really have an
+# "advantage" of utilizing some "secret" instruction.
+#
+# The code is interoperable with Thumb ISA and is rather compact, less
+# than 1/2KB. Windows CE port would be trivial, as it's exclusively
+# about decorations, ABI and instruction syntax are identical.
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$num="r0"; # starts as num argument, but holds &tp[num-1]
+$ap="r1";
+$bp="r2"; $bi="r2"; $rp="r2";
+$np="r3";
+$tp="r4";
+$aj="r5";
+$nj="r6";
+$tj="r7";
+$n0="r8";
+########### # r9 is reserved by ELF as platform specific, e.g. TLS pointer
+$alo="r10"; # sl, gcc uses it to keep @GOT
+$ahi="r11"; # fp
+$nlo="r12"; # ip
+########### # r13 is stack pointer
+$nhi="r14"; # lr
+########### # r15 is program counter
+
+#### argument block layout relative to &tp[num-1], a.k.a. $num
+$_rp="$num,#12*4";
+# ap permanently resides in r1
+$_bp="$num,#13*4";
+# np permanently resides in r3
+$_n0="$num,#14*4";
+$_num="$num,#15*4"; $_bpend=$_num;
+
+$code=<<___;
+.text
+
+.global bn_mul_mont
+.type bn_mul_mont,%function
+
+.align 2
+bn_mul_mont:
+ stmdb sp!,{r0,r2} @ sp points at argument block
+ ldr $num,[sp,#3*4] @ load num
+ cmp $num,#2
+ movlt r0,#0
+ addlt sp,sp,#2*4
+ blt .Labrt
+
+ stmdb sp!,{r4-r12,lr} @ save 10 registers
+
+ mov $num,$num,lsl#2 @ rescale $num for byte count
+ sub sp,sp,$num @ alloca(4*num)
+ sub sp,sp,#4 @ +extra dword
+ sub $num,$num,#4 @ "num=num-1"
+ add $tp,$bp,$num @ &bp[num-1]
+
+ add $num,sp,$num @ $num to point at &tp[num-1]
+ ldr $n0,[$_n0] @ &n0
+ ldr $bi,[$bp] @ bp[0]
+ ldr $aj,[$ap],#4 @ ap[0],ap++
+ ldr $nj,[$np],#4 @ np[0],np++
+ ldr $n0,[$n0] @ *n0
+ str $tp,[$_bpend] @ save &bp[num]
+
+ umull $alo,$ahi,$aj,$bi @ ap[0]*bp[0]
+ str $n0,[$_n0] @ save n0 value
+ mul $n0,$alo,$n0 @ "tp[0]"*n0
+ mov $nlo,#0
+ umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]"
+ mov $tp,sp
+
+.L1st:
+ ldr $aj,[$ap],#4 @ ap[j],ap++
+ mov $alo,$ahi
+ ldr $nj,[$np],#4 @ np[j],np++
+ mov $ahi,#0
+ umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0]
+ mov $nhi,#0
+ umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
+ adds $nlo,$nlo,$alo
+ str $nlo,[$tp],#4 @ tp[j-1]=,tp++
+ adc $nlo,$nhi,#0
+ cmp $tp,$num
+ bne .L1st
+
+ adds $nlo,$nlo,$ahi
+ ldr $tp,[$_bp] @ restore bp
+ mov $nhi,#0
+ ldr $n0,[$_n0] @ restore n0
+ adc $nhi,$nhi,#0
+ str $nlo,[$num] @ tp[num-1]=
+ str $nhi,[$num,#4] @ tp[num]=
+
+.Louter:
+ sub $tj,$num,sp @ "original" $num-1 value
+ sub $ap,$ap,$tj @ "rewind" ap to &ap[1]
+ ldr $bi,[$tp,#4]! @ *(++bp)
+ sub $np,$np,$tj @ "rewind" np to &np[1]
+ ldr $aj,[$ap,#-4] @ ap[0]
+ ldr $alo,[sp] @ tp[0]
+ ldr $nj,[$np,#-4] @ np[0]
+ ldr $tj,[sp,#4] @ tp[1]
+
+ mov $ahi,#0
+ umlal $alo,$ahi,$aj,$bi @ ap[0]*bp[i]+tp[0]
+ str $tp,[$_bp] @ save bp
+ mul $n0,$alo,$n0
+ mov $nlo,#0
+ umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]"
+ mov $tp,sp
+
+.Linner:
+ ldr $aj,[$ap],#4 @ ap[j],ap++
+ adds $alo,$ahi,$tj @ +=tp[j]
+ ldr $nj,[$np],#4 @ np[j],np++
+ mov $ahi,#0
+ umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i]
+ mov $nhi,#0
+ umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
+ adc $ahi,$ahi,#0
+ ldr $tj,[$tp,#8] @ tp[j+1]
+ adds $nlo,$nlo,$alo
+ str $nlo,[$tp],#4 @ tp[j-1]=,tp++
+ adc $nlo,$nhi,#0
+ cmp $tp,$num
+ bne .Linner
+
+ adds $nlo,$nlo,$ahi
+ mov $nhi,#0
+ ldr $tp,[$_bp] @ restore bp
+ adc $nhi,$nhi,#0
+ ldr $n0,[$_n0] @ restore n0
+ adds $nlo,$nlo,$tj
+ ldr $tj,[$_bpend] @ restore &bp[num]
+ adc $nhi,$nhi,#0
+ str $nlo,[$num] @ tp[num-1]=
+ str $nhi,[$num,#4] @ tp[num]=
+
+ cmp $tp,$tj
+ bne .Louter
+
+ ldr $rp,[$_rp] @ pull rp
+ add $num,$num,#4 @ $num to point at &tp[num]
+ sub $aj,$num,sp @ "original" num value
+ mov $tp,sp @ "rewind" $tp
+ mov $ap,$tp @ "borrow" $ap
+ sub $np,$np,$aj @ "rewind" $np to &np[0]
+
+ subs $tj,$tj,$tj @ "clear" carry flag
+.Lsub: ldr $tj,[$tp],#4
+ ldr $nj,[$np],#4
+ sbcs $tj,$tj,$nj @ tp[j]-np[j]
+ str $tj,[$rp],#4 @ rp[j]=
+ teq $tp,$num @ preserve carry
+ bne .Lsub
+ sbcs $nhi,$nhi,#0 @ upmost carry
+ mov $tp,sp @ "rewind" $tp
+ sub $rp,$rp,$aj @ "rewind" $rp
+
+ and $ap,$tp,$nhi
+ bic $np,$rp,$nhi
+ orr $ap,$ap,$np @ ap=borrow?tp:rp
+
+.Lcopy: ldr $tj,[$ap],#4 @ copy or in-place refresh
+ str sp,[$tp],#4 @ zap tp
+ str $tj,[$rp],#4
+ cmp $tp,$num
+ bne .Lcopy
+
+ add sp,$num,#4 @ skip over tp[num+1]
+ ldmia sp!,{r4-r12,lr} @ restore registers
+ add sp,sp,#2*4 @ skip over {r0,r2}
+ mov r0,#1
+.Labrt: tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+.size bn_mul_mont,.-bn_mul_mont
+.asciz "Montgomery multiplication for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+___
+
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
+print $code;
+close STDOUT;
diff --git a/crypto/bn/asm/bn-586.pl b/crypto/bn/asm/bn-586.pl
index 26c2685a726e..332ef3e91d62 100644
--- a/crypto/bn/asm/bn-586.pl
+++ b/crypto/bn/asm/bn-586.pl
@@ -1,6 +1,7 @@
#!/usr/local/bin/perl
-push(@INC,"perlasm","../../perlasm");
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
require "x86asm.pl";
&asm_init($ARGV[0],$0);
@@ -24,38 +25,25 @@ sub bn_mul_add_words
{
local($name)=@_;
- &function_begin($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
+ &function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
- &comment("");
- $Low="eax";
- $High="edx";
- $a="ebx";
- $w="ebp";
- $r="edi";
- $c="esi";
-
- &xor($c,$c); # clear carry
- &mov($r,&wparam(0)); #
-
- &mov("ecx",&wparam(2)); #
- &mov($a,&wparam(1)); #
-
- &and("ecx",0xfffffff8); # num / 8
- &mov($w,&wparam(3)); #
-
- &push("ecx"); # Up the stack for a tmp variable
-
- &jz(&label("maw_finish"));
+ $r="eax";
+ $a="edx";
+ $c="ecx";
if ($sse2) {
&picmeup("eax","OPENSSL_ia32cap_P");
&bt(&DWP(0,"eax"),26);
- &jnc(&label("maw_loop"));
+ &jnc(&label("maw_non_sse2"));
- &movd("mm0",$w); # mm0 = w
+ &mov($r,&wparam(0));
+ &mov($a,&wparam(1));
+ &mov($c,&wparam(2));
+ &movd("mm0",&wparam(3)); # mm0 = w
&pxor("mm1","mm1"); # mm1 = carry_in
-
- &set_label("maw_sse2_loop",0);
+ &jmp(&label("maw_sse2_entry"));
+
+ &set_label("maw_sse2_unrolled",16);
&movd("mm3",&DWP(0,$r,"",0)); # mm3 = r[0]
&paddq("mm1","mm3"); # mm1 = carry_in + r[0]
&movd("mm2",&DWP(0,$a,"",0)); # mm2 = a[0]
@@ -112,42 +100,82 @@ sub bn_mul_add_words
&psrlq("mm1",32); # mm1 = carry6
&paddq("mm1","mm3"); # mm1 = carry6 + r[7] + w*a[7]
&movd(&DWP(28,$r,"",0),"mm1");
- &add($r,32);
+ &lea($r,&DWP(32,$r));
&psrlq("mm1",32); # mm1 = carry_out
- &sub("ecx",8);
+ &sub($c,8);
+ &jz(&label("maw_sse2_exit"));
+ &set_label("maw_sse2_entry");
+ &test($c,0xfffffff8);
+ &jnz(&label("maw_sse2_unrolled"));
+
+ &set_label("maw_sse2_loop",4);
+ &movd("mm2",&DWP(0,$a)); # mm2 = a[i]
+ &movd("mm3",&DWP(0,$r)); # mm3 = r[i]
+ &pmuludq("mm2","mm0"); # a[i] *= w
+ &lea($a,&DWP(4,$a));
+ &paddq("mm1","mm3"); # carry += r[i]
+ &paddq("mm1","mm2"); # carry += a[i]*w
+ &movd(&DWP(0,$r),"mm1"); # r[i] = carry_low
+ &sub($c,1);
+ &psrlq("mm1",32); # carry = carry_high
+ &lea($r,&DWP(4,$r));
&jnz(&label("maw_sse2_loop"));
-
- &movd($c,"mm1"); # c = carry_out
+ &set_label("maw_sse2_exit");
+ &movd("eax","mm1"); # c = carry_out
&emms();
+ &ret();
- &jmp(&label("maw_finish"));
+ &set_label("maw_non_sse2",16);
}
- &set_label("maw_loop",0);
+ # function_begin prologue
+ &push("ebp");
+ &push("ebx");
+ &push("esi");
+ &push("edi");
+
+ &comment("");
+ $Low="eax";
+ $High="edx";
+ $a="ebx";
+ $w="ebp";
+ $r="edi";
+ $c="esi";
+
+ &xor($c,$c); # clear carry
+ &mov($r,&wparam(0)); #
+
+ &mov("ecx",&wparam(2)); #
+ &mov($a,&wparam(1)); #
+
+ &and("ecx",0xfffffff8); # num / 8
+ &mov($w,&wparam(3)); #
- &mov(&swtmp(0),"ecx"); #
+ &push("ecx"); # Up the stack for a tmp variable
+
+ &jz(&label("maw_finish"));
+
+ &set_label("maw_loop",16);
for ($i=0; $i<32; $i+=4)
{
&comment("Round $i");
- &mov("eax",&DWP($i,$a,"",0)); # *a
+ &mov("eax",&DWP($i,$a)); # *a
&mul($w); # *a * w
- &add("eax",$c); # L(t)+= *r
- &mov($c,&DWP($i,$r,"",0)); # L(t)+= *r
+ &add("eax",$c); # L(t)+= c
&adc("edx",0); # H(t)+=carry
- &add("eax",$c); # L(t)+=c
+ &add("eax",&DWP($i,$r)); # L(t)+= *r
&adc("edx",0); # H(t)+=carry
- &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t);
+ &mov(&DWP($i,$r),"eax"); # *r= L(t);
&mov($c,"edx"); # c= H(t);
}
&comment("");
- &mov("ecx",&swtmp(0)); #
- &add($a,32);
- &add($r,32);
&sub("ecx",8);
+ &lea($a,&DWP(32,$a));
+ &lea($r,&DWP(32,$r));
&jnz(&label("maw_loop"));
&set_label("maw_finish",0);
@@ -160,16 +188,15 @@ sub bn_mul_add_words
for ($i=0; $i<7; $i++)
{
&comment("Tail Round $i");
- &mov("eax",&DWP($i*4,$a,"",0));# *a
+ &mov("eax",&DWP($i*4,$a)); # *a
&mul($w); # *a * w
&add("eax",$c); # L(t)+=c
- &mov($c,&DWP($i*4,$r,"",0)); # L(t)+= *r
&adc("edx",0); # H(t)+=carry
- &add("eax",$c);
+ &add("eax",&DWP($i*4,$r)); # L(t)+= *r
&adc("edx",0); # H(t)+=carry
&dec("ecx") if ($i != 7-1);
- &mov(&DWP($i*4,$r,"",0),"eax"); # *r= L(t);
- &mov($c,"edx"); # c= H(t);
+ &mov(&DWP($i*4,$r),"eax"); # *r= L(t);
+ &mov($c,"edx"); # c= H(t);
&jz(&label("maw_end")) if ($i != 7-1);
}
&set_label("maw_end",0);
@@ -184,7 +211,45 @@ sub bn_mul_words
{
local($name)=@_;
- &function_begin($name,"");
+ &function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
+
+ $r="eax";
+ $a="edx";
+ $c="ecx";
+
+ if ($sse2) {
+ &picmeup("eax","OPENSSL_ia32cap_P");
+ &bt(&DWP(0,"eax"),26);
+ &jnc(&label("mw_non_sse2"));
+
+ &mov($r,&wparam(0));
+ &mov($a,&wparam(1));
+ &mov($c,&wparam(2));
+ &movd("mm0",&wparam(3)); # mm0 = w
+ &pxor("mm1","mm1"); # mm1 = carry = 0
+
+ &set_label("mw_sse2_loop",16);
+ &movd("mm2",&DWP(0,$a)); # mm2 = a[i]
+ &pmuludq("mm2","mm0"); # a[i] *= w
+ &lea($a,&DWP(4,$a));
+ &paddq("mm1","mm2"); # carry += a[i]*w
+ &movd(&DWP(0,$r),"mm1"); # r[i] = carry_low
+ &sub($c,1);
+ &psrlq("mm1",32); # carry = carry_high
+ &lea($r,&DWP(4,$r));
+ &jnz(&label("mw_sse2_loop"));
+
+ &movd("eax","mm1"); # return carry
+ &emms();
+ &ret();
+ &set_label("mw_non_sse2",16);
+ }
+
+ # function_begin prologue
+ &push("ebp");
+ &push("ebx");
+ &push("esi");
+ &push("edi");
&comment("");
$Low="eax";
@@ -257,7 +322,40 @@ sub bn_sqr_words
{
local($name)=@_;
- &function_begin($name,"");
+ &function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
+
+ $r="eax";
+ $a="edx";
+ $c="ecx";
+
+ if ($sse2) {
+ &picmeup("eax","OPENSSL_ia32cap_P");
+ &bt(&DWP(0,"eax"),26);
+ &jnc(&label("sqr_non_sse2"));
+
+ &mov($r,&wparam(0));
+ &mov($a,&wparam(1));
+ &mov($c,&wparam(2));
+
+ &set_label("sqr_sse2_loop",16);
+ &movd("mm0",&DWP(0,$a)); # mm0 = a[i]
+ &pmuludq("mm0","mm0"); # a[i] *= a[i]
+ &lea($a,&DWP(4,$a)); # a++
+ &movq(&QWP(0,$r),"mm0"); # r[i] = a[i]*a[i]
+ &sub($c,1);
+ &lea($r,&DWP(8,$r)); # r += 2
+ &jnz(&label("sqr_sse2_loop"));
+
+ &emms();
+ &ret();
+ &set_label("sqr_non_sse2",16);
+ }
+
+ # function_begin prologue
+ &push("ebp");
+ &push("ebx");
+ &push("esi");
+ &push("edi");
&comment("");
$r="esi";
@@ -313,12 +411,13 @@ sub bn_div_words
{
local($name)=@_;
- &function_begin($name,"");
+ &function_begin_B($name,"");
&mov("edx",&wparam(0)); #
&mov("eax",&wparam(1)); #
- &mov("ebx",&wparam(2)); #
- &div("ebx");
- &function_end($name);
+ &mov("ecx",&wparam(2)); #
+ &div("ecx");
+ &ret();
+ &function_end_B($name);
}
sub bn_add_words
diff --git a/crypto/bn/asm/co-586.pl b/crypto/bn/asm/co-586.pl
index 5d962cb957d3..57101a6bd775 100644
--- a/crypto/bn/asm/co-586.pl
+++ b/crypto/bn/asm/co-586.pl
@@ -1,6 +1,7 @@
#!/usr/local/bin/perl
-push(@INC,"perlasm","../../perlasm");
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
require "x86asm.pl";
&asm_init($ARGV[0],$0);
diff --git a/crypto/bn/asm/ia64-mont.pl b/crypto/bn/asm/ia64-mont.pl
new file mode 100755
index 000000000000..e258658428a3
--- /dev/null
+++ b/crypto/bn/asm/ia64-mont.pl
@@ -0,0 +1,851 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# January 2010
+#
+# "Teaser" Montgomery multiplication module for IA-64. There are
+# several possibilities for improvement:
+#
+# - modulo-scheduling outer loop would eliminate quite a number of
+# stalls after ldf8, xma and getf.sig outside inner loop and
+# improve shorter key performance;
+# - shorter vector support [with input vectors being fetched only
+# once] should be added;
+# - 2x unroll with help of n0[1] would make the code scalable on
+# "wider" IA-64, "wider" than Itanium 2 that is, which is not of
+# acute interest, because upcoming Tukwila's individual cores are
+# reportedly based on Itanium 2 design;
+# - dedicated squaring procedure(?);
+#
+# January 2010
+#
+# Shorter vector support is implemented by zero-padding ap and np
+# vectors up to 8 elements, or 512 bits. This means that 256-bit
+# inputs will be processed only 2 times faster than 512-bit inputs,
+# not 4 [as one would expect, because algorithm complexity is n^2].
+# The reason for padding is that inputs shorter than 512 bits won't
+# be processed faster anyway, because minimal critical path of the
+# core loop happens to match 512-bit timing. Either way, it resulted
+# in >100% improvement of 512-bit RSA sign benchmark and 50% - of
+# 1024-bit one [in comparison to original version of *this* module].
+#
+# So far 'openssl speed rsa dsa' output on 900MHz Itanium 2 *with*
+# this module is:
+# sign verify sign/s verify/s
+# rsa 512 bits 0.000290s 0.000024s 3452.8 42031.4
+# rsa 1024 bits 0.000793s 0.000058s 1261.7 17172.0
+# rsa 2048 bits 0.005908s 0.000148s 169.3 6754.0
+# rsa 4096 bits 0.033456s 0.000469s 29.9 2133.6
+# dsa 512 bits 0.000253s 0.000198s 3949.9 5057.0
+# dsa 1024 bits 0.000585s 0.000607s 1708.4 1647.4
+# dsa 2048 bits 0.001453s 0.001703s 688.1 587.4
+#
+# ... and *without* (but still with ia64.S):
+#
+# rsa 512 bits 0.000670s 0.000041s 1491.8 24145.5
+# rsa 1024 bits 0.001988s 0.000080s 502.9 12499.3
+# rsa 2048 bits 0.008702s 0.000189s 114.9 5293.9
+# rsa 4096 bits 0.043860s 0.000533s 22.8 1875.9
+# dsa 512 bits 0.000441s 0.000427s 2265.3 2340.6
+# dsa 1024 bits 0.000823s 0.000867s 1215.6 1153.2
+# dsa 2048 bits 0.001894s 0.002179s 528.1 458.9
+#
+# As it can be seen, RSA sign performance improves by 130-30%,
+# hereafter less for longer keys, while verify - by 74-13%.
+# DSA performance improves by 115-30%.
+
+if ($^O eq "hpux") {
+ $ADDP="addp4";
+ for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); }
+} else { $ADDP="add"; }
+
+$code=<<___;
+.explicit
+.text
+
+// int bn_mul_mont (BN_ULONG *rp,const BN_ULONG *ap,
+// const BN_ULONG *bp,const BN_ULONG *np,
+// const BN_ULONG *n0p,int num);
+.align 64
+.global bn_mul_mont#
+.proc bn_mul_mont#
+bn_mul_mont:
+ .prologue
+ .body
+{ .mmi; cmp4.le p6,p7=2,r37;;
+(p6) cmp4.lt.unc p8,p9=8,r37
+ mov ret0=r0 };;
+{ .bbb;
+(p9) br.cond.dptk.many bn_mul_mont_8
+(p8) br.cond.dpnt.many bn_mul_mont_general
+(p7) br.ret.spnt.many b0 };;
+.endp bn_mul_mont#
+
+prevfs=r2; prevpr=r3; prevlc=r10; prevsp=r11;
+
+rptr=r8; aptr=r9; bptr=r14; nptr=r15;
+tptr=r16; // &tp[0]
+tp_1=r17; // &tp[-1]
+num=r18; len=r19; lc=r20;
+topbit=r21; // carry bit from tmp[num]
+
+n0=f6;
+m0=f7;
+bi=f8;
+
+.align 64
+.local bn_mul_mont_general#
+.proc bn_mul_mont_general#
+bn_mul_mont_general:
+ .prologue
+{ .mmi; .save ar.pfs,prevfs
+ alloc prevfs=ar.pfs,6,2,0,8
+ $ADDP aptr=0,in1
+ .save ar.lc,prevlc
+ mov prevlc=ar.lc }
+{ .mmi; .vframe prevsp
+ mov prevsp=sp
+ $ADDP bptr=0,in2
+ .save pr,prevpr
+ mov prevpr=pr };;
+
+ .body
+ .rotf alo[6],nlo[4],ahi[8],nhi[6]
+ .rotr a[3],n[3],t[2]
+
+{ .mmi; ldf8 bi=[bptr],8 // (*bp++)
+ ldf8 alo[4]=[aptr],16 // ap[0]
+ $ADDP r30=8,in1 };;
+{ .mmi; ldf8 alo[3]=[r30],16 // ap[1]
+ ldf8 alo[2]=[aptr],16 // ap[2]
+ $ADDP in4=0,in4 };;
+{ .mmi; ldf8 alo[1]=[r30] // ap[3]
+ ldf8 n0=[in4] // n0
+ $ADDP rptr=0,in0 }
+{ .mmi; $ADDP nptr=0,in3
+ mov r31=16
+ zxt4 num=in5 };;
+{ .mmi; ldf8 nlo[2]=[nptr],8 // np[0]
+ shladd len=num,3,r0
+ shladd r31=num,3,r31 };;
+{ .mmi; ldf8 nlo[1]=[nptr],8 // np[1]
+ add lc=-5,num
+ sub r31=sp,r31 };;
+{ .mfb; and sp=-16,r31 // alloca
+ xmpy.hu ahi[2]=alo[4],bi // ap[0]*bp[0]
+ nop.b 0 }
+{ .mfb; nop.m 0
+ xmpy.lu alo[4]=alo[4],bi
+ brp.loop.imp .L1st_ctop,.L1st_cend-16
+ };;
+{ .mfi; nop.m 0
+ xma.hu ahi[1]=alo[3],bi,ahi[2] // ap[1]*bp[0]
+ add tp_1=8,sp }
+{ .mfi; nop.m 0
+ xma.lu alo[3]=alo[3],bi,ahi[2]
+ mov pr.rot=0x20001f<<16
+ // ------^----- (p40) at first (p23)
+ // ----------^^ p[16:20]=1
+ };;
+{ .mfi; nop.m 0
+ xmpy.lu m0=alo[4],n0 // (ap[0]*bp[0])*n0
+ mov ar.lc=lc }
+{ .mfi; nop.m 0
+ fcvt.fxu.s1 nhi[1]=f0
+ mov ar.ec=8 };;
+
+.align 32
+.L1st_ctop:
+.pred.rel "mutex",p40,p42
+{ .mfi; (p16) ldf8 alo[0]=[aptr],8 // *(aptr++)
+ (p18) xma.hu ahi[0]=alo[2],bi,ahi[1]
+ (p40) add n[2]=n[2],a[2] } // (p23) }
+{ .mfi; (p18) ldf8 nlo[0]=[nptr],8 // *(nptr++)(p16)
+ (p18) xma.lu alo[2]=alo[2],bi,ahi[1]
+ (p42) add n[2]=n[2],a[2],1 };; // (p23)
+{ .mfi; (p21) getf.sig a[0]=alo[5]
+ (p20) xma.hu nhi[0]=nlo[2],m0,nhi[1]
+ (p42) cmp.leu p41,p39=n[2],a[2] } // (p23)
+{ .mfi; (p23) st8 [tp_1]=n[2],8
+ (p20) xma.lu nlo[2]=nlo[2],m0,nhi[1]
+ (p40) cmp.ltu p41,p39=n[2],a[2] } // (p23)
+{ .mmb; (p21) getf.sig n[0]=nlo[3]
+ (p16) nop.m 0
+ br.ctop.sptk .L1st_ctop };;
+.L1st_cend:
+
+{ .mmi; getf.sig a[0]=ahi[6] // (p24)
+ getf.sig n[0]=nhi[4]
+ add num=-1,num };; // num--
+{ .mmi; .pred.rel "mutex",p40,p42
+(p40) add n[0]=n[0],a[0]
+(p42) add n[0]=n[0],a[0],1
+ sub aptr=aptr,len };; // rewind
+{ .mmi; .pred.rel "mutex",p40,p42
+(p40) cmp.ltu p41,p39=n[0],a[0]
+(p42) cmp.leu p41,p39=n[0],a[0]
+ sub nptr=nptr,len };;
+{ .mmi; .pred.rel "mutex",p39,p41
+(p39) add topbit=r0,r0
+(p41) add topbit=r0,r0,1
+ nop.i 0 }
+{ .mmi; st8 [tp_1]=n[0]
+ add tptr=16,sp
+ add tp_1=8,sp };;
+
+.Louter:
+{ .mmi; ldf8 bi=[bptr],8 // (*bp++)
+ ldf8 ahi[3]=[tptr] // tp[0]
+ add r30=8,aptr };;
+{ .mmi; ldf8 alo[4]=[aptr],16 // ap[0]
+ ldf8 alo[3]=[r30],16 // ap[1]
+ add r31=8,nptr };;
+{ .mfb; ldf8 alo[2]=[aptr],16 // ap[2]
+ xma.hu ahi[2]=alo[4],bi,ahi[3] // ap[0]*bp[i]+tp[0]
+ brp.loop.imp .Linner_ctop,.Linner_cend-16
+ }
+{ .mfb; ldf8 alo[1]=[r30] // ap[3]
+ xma.lu alo[4]=alo[4],bi,ahi[3]
+ clrrrb.pr };;
+{ .mfi; ldf8 nlo[2]=[nptr],16 // np[0]
+ xma.hu ahi[1]=alo[3],bi,ahi[2] // ap[1]*bp[i]
+ nop.i 0 }
+{ .mfi; ldf8 nlo[1]=[r31] // np[1]
+ xma.lu alo[3]=alo[3],bi,ahi[2]
+ mov pr.rot=0x20101f<<16
+ // ------^----- (p40) at first (p23)
+ // --------^--- (p30) at first (p22)
+ // ----------^^ p[16:20]=1
+ };;
+{ .mfi; st8 [tptr]=r0 // tp[0] is already accounted
+ xmpy.lu m0=alo[4],n0 // (ap[0]*bp[i]+tp[0])*n0
+ mov ar.lc=lc }
+{ .mfi;
+ fcvt.fxu.s1 nhi[1]=f0
+ mov ar.ec=8 };;
+
+// This loop spins in 4*(n+7) ticks on Itanium 2 and should spin in
+// 7*(n+7) ticks on Itanium (the one codenamed Merced). Factor of 7
+// in latter case accounts for two-tick pipeline stall, which means
+// that its performance would be ~20% lower than optimal one. No
+// attempt was made to address this, because original Itanium is
+// hardly represented out in the wild...
+.align 32
+.Linner_ctop:
+.pred.rel "mutex",p40,p42
+.pred.rel "mutex",p30,p32
+{ .mfi; (p16) ldf8 alo[0]=[aptr],8 // *(aptr++)
+ (p18) xma.hu ahi[0]=alo[2],bi,ahi[1]
+ (p40) add n[2]=n[2],a[2] } // (p23)
+{ .mfi; (p16) nop.m 0
+ (p18) xma.lu alo[2]=alo[2],bi,ahi[1]
+ (p42) add n[2]=n[2],a[2],1 };; // (p23)
+{ .mfi; (p21) getf.sig a[0]=alo[5]
+ (p16) nop.f 0
+ (p40) cmp.ltu p41,p39=n[2],a[2] } // (p23)
+{ .mfi; (p21) ld8 t[0]=[tptr],8
+ (p16) nop.f 0
+ (p42) cmp.leu p41,p39=n[2],a[2] };; // (p23)
+{ .mfi; (p18) ldf8 nlo[0]=[nptr],8 // *(nptr++)
+ (p20) xma.hu nhi[0]=nlo[2],m0,nhi[1]
+ (p30) add a[1]=a[1],t[1] } // (p22)
+{ .mfi; (p16) nop.m 0
+ (p20) xma.lu nlo[2]=nlo[2],m0,nhi[1]
+ (p32) add a[1]=a[1],t[1],1 };; // (p22)
+{ .mmi; (p21) getf.sig n[0]=nlo[3]
+ (p16) nop.m 0
+ (p30) cmp.ltu p31,p29=a[1],t[1] } // (p22)
+{ .mmb; (p23) st8 [tp_1]=n[2],8
+ (p32) cmp.leu p31,p29=a[1],t[1] // (p22)
+ br.ctop.sptk .Linner_ctop };;
+.Linner_cend:
+
+{ .mmi; getf.sig a[0]=ahi[6] // (p24)
+ getf.sig n[0]=nhi[4]
+ nop.i 0 };;
+
+{ .mmi; .pred.rel "mutex",p31,p33
+(p31) add a[0]=a[0],topbit
+(p33) add a[0]=a[0],topbit,1
+ mov topbit=r0 };;
+{ .mfi; .pred.rel "mutex",p31,p33
+(p31) cmp.ltu p32,p30=a[0],topbit
+(p33) cmp.leu p32,p30=a[0],topbit
+ }
+{ .mfi; .pred.rel "mutex",p40,p42
+(p40) add n[0]=n[0],a[0]
+(p42) add n[0]=n[0],a[0],1
+ };;
+{ .mmi; .pred.rel "mutex",p44,p46
+(p40) cmp.ltu p41,p39=n[0],a[0]
+(p42) cmp.leu p41,p39=n[0],a[0]
+(p32) add topbit=r0,r0,1 }
+
+{ .mmi; st8 [tp_1]=n[0],8
+ cmp4.ne p6,p0=1,num
+ sub aptr=aptr,len };; // rewind
+{ .mmi; sub nptr=nptr,len
+(p41) add topbit=r0,r0,1
+ add tptr=16,sp }
+{ .mmb; add tp_1=8,sp
+ add num=-1,num // num--
+(p6) br.cond.sptk.many .Louter };;
+
+{ .mbb; add lc=4,lc
+ brp.loop.imp .Lsub_ctop,.Lsub_cend-16
+ clrrrb.pr };;
+{ .mii; nop.m 0
+ mov pr.rot=0x10001<<16
+ // ------^---- (p33) at first (p17)
+ mov ar.lc=lc }
+{ .mii; nop.m 0
+ mov ar.ec=3
+ nop.i 0 };;
+
+.Lsub_ctop:
+.pred.rel "mutex",p33,p35
+{ .mfi; (p16) ld8 t[0]=[tptr],8 // t=*(tp++)
+ (p16) nop.f 0
+ (p33) sub n[1]=t[1],n[1] } // (p17)
+{ .mfi; (p16) ld8 n[0]=[nptr],8 // n=*(np++)
+ (p16) nop.f 0
+ (p35) sub n[1]=t[1],n[1],1 };; // (p17)
+{ .mib; (p18) st8 [rptr]=n[2],8 // *(rp++)=r
+ (p33) cmp.gtu p34,p32=n[1],t[1] // (p17)
+ (p18) nop.b 0 }
+{ .mib; (p18) nop.m 0
+ (p35) cmp.geu p34,p32=n[1],t[1] // (p17)
+ br.ctop.sptk .Lsub_ctop };;
+.Lsub_cend:
+
+{ .mmb; .pred.rel "mutex",p34,p36
+(p34) sub topbit=topbit,r0 // (p19)
+(p36) sub topbit=topbit,r0,1
+ brp.loop.imp .Lcopy_ctop,.Lcopy_cend-16
+ }
+{ .mmb; sub rptr=rptr,len // rewind
+ sub tptr=tptr,len
+ clrrrb.pr };;
+{ .mmi; and aptr=tptr,topbit
+ andcm bptr=rptr,topbit
+ mov pr.rot=1<<16 };;
+{ .mii; or nptr=aptr,bptr
+ mov ar.lc=lc
+ mov ar.ec=3 };;
+
+.Lcopy_ctop:
+{ .mmb; (p16) ld8 n[0]=[nptr],8
+ (p18) st8 [tptr]=r0,8
+ (p16) nop.b 0 }
+{ .mmb; (p16) nop.m 0
+ (p18) st8 [rptr]=n[2],8
+ br.ctop.sptk .Lcopy_ctop };;
+.Lcopy_cend:
+
+{ .mmi; mov ret0=1 // signal "handled"
+ rum 1<<5 // clear um.mfh
+ mov ar.lc=prevlc }
+{ .mib; .restore sp
+ mov sp=prevsp
+ mov pr=prevpr,0x1ffff
+ br.ret.sptk.many b0 };;
+.endp bn_mul_mont_general#
+
+a1=r16; a2=r17; a3=r18; a4=r19; a5=r20; a6=r21; a7=r22; a8=r23;
+n1=r24; n2=r25; n3=r26; n4=r27; n5=r28; n6=r29; n7=r30; n8=r31;
+t0=r15;
+
+ai0=f8; ai1=f9; ai2=f10; ai3=f11; ai4=f12; ai5=f13; ai6=f14; ai7=f15;
+ni0=f16; ni1=f17; ni2=f18; ni3=f19; ni4=f20; ni5=f21; ni6=f22; ni7=f23;
+
+.align 64
+.skip 48 // aligns loop body
+.local bn_mul_mont_8#
+.proc bn_mul_mont_8#
+bn_mul_mont_8:
+ .prologue
+{ .mmi; .save ar.pfs,prevfs
+ alloc prevfs=ar.pfs,6,2,0,8
+ .vframe prevsp
+ mov prevsp=sp
+ .save ar.lc,prevlc
+ mov prevlc=ar.lc }
+{ .mmi; add r17=-6*16,sp
+ add sp=-7*16,sp
+ .save pr,prevpr
+ mov prevpr=pr };;
+
+{ .mmi; .save.gf 0,0x10
+ stf.spill [sp]=f16,-16
+ .save.gf 0,0x20
+ stf.spill [r17]=f17,32
+ add r16=-5*16,prevsp};;
+{ .mmi; .save.gf 0,0x40
+ stf.spill [r16]=f18,32
+ .save.gf 0,0x80
+ stf.spill [r17]=f19,32
+ $ADDP aptr=0,in1 };;
+{ .mmi; .save.gf 0,0x100
+ stf.spill [r16]=f20,32
+ .save.gf 0,0x200
+ stf.spill [r17]=f21,32
+ $ADDP r29=8,in1 };;
+{ .mmi; .save.gf 0,0x400
+ stf.spill [r16]=f22
+ .save.gf 0,0x800
+ stf.spill [r17]=f23
+ $ADDP rptr=0,in0 };;
+
+ .body
+ .rotf bj[8],mj[2],tf[2],alo[10],ahi[10],nlo[10],nhi[10]
+ .rotr t[8]
+
+// load input vectors padding them to 8 elements
+{ .mmi; ldf8 ai0=[aptr],16 // ap[0]
+ ldf8 ai1=[r29],16 // ap[1]
+ $ADDP bptr=0,in2 }
+{ .mmi; $ADDP r30=8,in2
+ $ADDP nptr=0,in3
+ $ADDP r31=8,in3 };;
+{ .mmi; ldf8 bj[7]=[bptr],16 // bp[0]
+ ldf8 bj[6]=[r30],16 // bp[1]
+ cmp4.le p4,p5=3,in5 }
+{ .mmi; ldf8 ni0=[nptr],16 // np[0]
+ ldf8 ni1=[r31],16 // np[1]
+ cmp4.le p6,p7=4,in5 };;
+
+{ .mfi; (p4)ldf8 ai2=[aptr],16 // ap[2]
+ (p5)fcvt.fxu ai2=f0
+ cmp4.le p8,p9=5,in5 }
+{ .mfi; (p6)ldf8 ai3=[r29],16 // ap[3]
+ (p7)fcvt.fxu ai3=f0
+ cmp4.le p10,p11=6,in5 }
+{ .mfi; (p4)ldf8 bj[5]=[bptr],16 // bp[2]
+ (p5)fcvt.fxu bj[5]=f0
+ cmp4.le p12,p13=7,in5 }
+{ .mfi; (p6)ldf8 bj[4]=[r30],16 // bp[3]
+ (p7)fcvt.fxu bj[4]=f0
+ cmp4.le p14,p15=8,in5 }
+{ .mfi; (p4)ldf8 ni2=[nptr],16 // np[2]
+ (p5)fcvt.fxu ni2=f0
+ addp4 r28=-1,in5 }
+{ .mfi; (p6)ldf8 ni3=[r31],16 // np[3]
+ (p7)fcvt.fxu ni3=f0
+ $ADDP in4=0,in4 };;
+
+{ .mfi; ldf8 n0=[in4]
+ fcvt.fxu tf[1]=f0
+ nop.i 0 }
+
+{ .mfi; (p8)ldf8 ai4=[aptr],16 // ap[4]
+ (p9)fcvt.fxu ai4=f0
+ mov t[0]=r0 }
+{ .mfi; (p10)ldf8 ai5=[r29],16 // ap[5]
+ (p11)fcvt.fxu ai5=f0
+ mov t[1]=r0 }
+{ .mfi; (p8)ldf8 bj[3]=[bptr],16 // bp[4]
+ (p9)fcvt.fxu bj[3]=f0
+ mov t[2]=r0 }
+{ .mfi; (p10)ldf8 bj[2]=[r30],16 // bp[5]
+ (p11)fcvt.fxu bj[2]=f0
+ mov t[3]=r0 }
+{ .mfi; (p8)ldf8 ni4=[nptr],16 // np[4]
+ (p9)fcvt.fxu ni4=f0
+ mov t[4]=r0 }
+{ .mfi; (p10)ldf8 ni5=[r31],16 // np[5]
+ (p11)fcvt.fxu ni5=f0
+ mov t[5]=r0 };;
+
+{ .mfi; (p12)ldf8 ai6=[aptr],16 // ap[6]
+ (p13)fcvt.fxu ai6=f0
+ mov t[6]=r0 }
+{ .mfi; (p14)ldf8 ai7=[r29],16 // ap[7]
+ (p15)fcvt.fxu ai7=f0
+ mov t[7]=r0 }
+{ .mfi; (p12)ldf8 bj[1]=[bptr],16 // bp[6]
+ (p13)fcvt.fxu bj[1]=f0
+ mov ar.lc=r28 }
+{ .mfi; (p14)ldf8 bj[0]=[r30],16 // bp[7]
+ (p15)fcvt.fxu bj[0]=f0
+ mov ar.ec=1 }
+{ .mfi; (p12)ldf8 ni6=[nptr],16 // np[6]
+ (p13)fcvt.fxu ni6=f0
+ mov pr.rot=1<<16 }
+{ .mfb; (p14)ldf8 ni7=[r31],16 // np[7]
+ (p15)fcvt.fxu ni7=f0
+ brp.loop.imp .Louter_8_ctop,.Louter_8_cend-16
+ };;
+
+// The loop is scheduled for 32*n ticks on Itanium 2. Actual attempt
+// to measure with help of Interval Time Counter indicated that the
+// factor is a tad higher: 33 or 34, if not 35. Exact measurement and
+// addressing the issue is problematic, because I don't have access
+// to platform-specific instruction-level profiler. On Itanium it
+// should run in 56*n ticks, because of higher xma latency...
+.Louter_8_ctop:
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 0:
+ (p16) xma.hu ahi[0]=ai0,bj[7],tf[1] // ap[0]*b[i]+t[0]
+ (p40) add a3=a3,n3 } // (p17) a3+=n3
+{ .mfi; (p42) add a3=a3,n3,1
+ (p16) xma.lu alo[0]=ai0,bj[7],tf[1]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig a7=alo[8] // 1:
+ (p48) add t[6]=t[6],a3 // (p17) t[6]+=a3
+ (p50) add t[6]=t[6],a3,1 };;
+{ .mfi; (p17) getf.sig a8=ahi[8] // 2:
+ (p17) xma.hu nhi[7]=ni6,mj[1],nhi[6] // np[6]*m0
+ (p40) cmp.ltu p43,p41=a3,n3 }
+{ .mfi; (p42) cmp.leu p43,p41=a3,n3
+ (p17) xma.lu nlo[7]=ni6,mj[1],nhi[6]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig n5=nlo[6] // 3:
+ (p48) cmp.ltu p51,p49=t[6],a3
+ (p50) cmp.leu p51,p49=t[6],a3 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mfi; (p16) nop.m 0 // 4:
+ (p16) xma.hu ahi[1]=ai1,bj[7],ahi[0] // ap[1]*b[i]
+ (p41) add a4=a4,n4 } // (p17) a4+=n4
+{ .mfi; (p43) add a4=a4,n4,1
+ (p16) xma.lu alo[1]=ai1,bj[7],ahi[0]
+ (p16) nop.i 0 };;
+{ .mfi; (p49) add t[5]=t[5],a4 // 5: (p17) t[5]+=a4
+ (p16) xmpy.lu mj[0]=alo[0],n0 // (ap[0]*b[i]+t[0])*n0
+ (p51) add t[5]=t[5],a4,1 };;
+{ .mfi; (p16) nop.m 0 // 6:
+ (p17) xma.hu nhi[8]=ni7,mj[1],nhi[7] // np[7]*m0
+ (p41) cmp.ltu p42,p40=a4,n4 }
+{ .mfi; (p43) cmp.leu p42,p40=a4,n4
+ (p17) xma.lu nlo[8]=ni7,mj[1],nhi[7]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig n6=nlo[7] // 7:
+ (p49) cmp.ltu p50,p48=t[5],a4
+ (p51) cmp.leu p50,p48=t[5],a4 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 8:
+ (p16) xma.hu ahi[2]=ai2,bj[7],ahi[1] // ap[2]*b[i]
+ (p40) add a5=a5,n5 } // (p17) a5+=n5
+{ .mfi; (p42) add a5=a5,n5,1
+ (p16) xma.lu alo[2]=ai2,bj[7],ahi[1]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a1=alo[1] // 9:
+ (p48) add t[4]=t[4],a5 // p(17) t[4]+=a5
+ (p50) add t[4]=t[4],a5,1 };;
+{ .mfi; (p16) nop.m 0 // 10:
+ (p16) xma.hu nhi[0]=ni0,mj[0],alo[0] // np[0]*m0
+ (p40) cmp.ltu p43,p41=a5,n5 }
+{ .mfi; (p42) cmp.leu p43,p41=a5,n5
+ (p16) xma.lu nlo[0]=ni0,mj[0],alo[0]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig n7=nlo[8] // 11:
+ (p48) cmp.ltu p51,p49=t[4],a5
+ (p50) cmp.leu p51,p49=t[4],a5 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mfi; (p17) getf.sig n8=nhi[8] // 12:
+ (p16) xma.hu ahi[3]=ai3,bj[7],ahi[2] // ap[3]*b[i]
+ (p41) add a6=a6,n6 } // (p17) a6+=n6
+{ .mfi; (p43) add a6=a6,n6,1
+ (p16) xma.lu alo[3]=ai3,bj[7],ahi[2]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a2=alo[2] // 13:
+ (p49) add t[3]=t[3],a6 // (p17) t[3]+=a6
+ (p51) add t[3]=t[3],a6,1 };;
+{ .mfi; (p16) nop.m 0 // 14:
+ (p16) xma.hu nhi[1]=ni1,mj[0],nhi[0] // np[1]*m0
+ (p41) cmp.ltu p42,p40=a6,n6 }
+{ .mfi; (p43) cmp.leu p42,p40=a6,n6
+ (p16) xma.lu nlo[1]=ni1,mj[0],nhi[0]
+ (p16) nop.i 0 };;
+{ .mii; (p16) nop.m 0 // 15:
+ (p49) cmp.ltu p50,p48=t[3],a6
+ (p51) cmp.leu p50,p48=t[3],a6 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 16:
+ (p16) xma.hu ahi[4]=ai4,bj[7],ahi[3] // ap[4]*b[i]
+ (p40) add a7=a7,n7 } // (p17) a7+=n7
+{ .mfi; (p42) add a7=a7,n7,1
+ (p16) xma.lu alo[4]=ai4,bj[7],ahi[3]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a3=alo[3] // 17:
+ (p48) add t[2]=t[2],a7 // (p17) t[2]+=a7
+ (p50) add t[2]=t[2],a7,1 };;
+{ .mfi; (p16) nop.m 0 // 18:
+ (p16) xma.hu nhi[2]=ni2,mj[0],nhi[1] // np[2]*m0
+ (p40) cmp.ltu p43,p41=a7,n7 }
+{ .mfi; (p42) cmp.leu p43,p41=a7,n7
+ (p16) xma.lu nlo[2]=ni2,mj[0],nhi[1]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig n1=nlo[1] // 19:
+ (p48) cmp.ltu p51,p49=t[2],a7
+ (p50) cmp.leu p51,p49=t[2],a7 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mfi; (p16) nop.m 0 // 20:
+ (p16) xma.hu ahi[5]=ai5,bj[7],ahi[4] // ap[5]*b[i]
+ (p41) add a8=a8,n8 } // (p17) a8+=n8
+{ .mfi; (p43) add a8=a8,n8,1
+ (p16) xma.lu alo[5]=ai5,bj[7],ahi[4]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a4=alo[4] // 21:
+ (p49) add t[1]=t[1],a8 // (p17) t[1]+=a8
+ (p51) add t[1]=t[1],a8,1 };;
+{ .mfi; (p16) nop.m 0 // 22:
+ (p16) xma.hu nhi[3]=ni3,mj[0],nhi[2] // np[3]*m0
+ (p41) cmp.ltu p42,p40=a8,n8 }
+{ .mfi; (p43) cmp.leu p42,p40=a8,n8
+ (p16) xma.lu nlo[3]=ni3,mj[0],nhi[2]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig n2=nlo[2] // 23:
+ (p49) cmp.ltu p50,p48=t[1],a8
+ (p51) cmp.leu p50,p48=t[1],a8 };;
+{ .mfi; (p16) nop.m 0 // 24:
+ (p16) xma.hu ahi[6]=ai6,bj[7],ahi[5] // ap[6]*b[i]
+ (p16) add a1=a1,n1 } // (p16) a1+=n1
+{ .mfi; (p16) nop.m 0
+ (p16) xma.lu alo[6]=ai6,bj[7],ahi[5]
+ (p17) mov t[0]=r0 };;
+{ .mii; (p16) getf.sig a5=alo[5] // 25:
+ (p16) add t0=t[7],a1 // (p16) t[7]+=a1
+ (p42) add t[0]=t[0],r0,1 };;
+{ .mfi; (p16) setf.sig tf[0]=t0 // 26:
+ (p16) xma.hu nhi[4]=ni4,mj[0],nhi[3] // np[4]*m0
+ (p50) add t[0]=t[0],r0,1 }
+{ .mfi; (p16) cmp.ltu.unc p42,p40=a1,n1
+ (p16) xma.lu nlo[4]=ni4,mj[0],nhi[3]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig n3=nlo[3] // 27:
+ (p16) cmp.ltu.unc p50,p48=t0,a1
+ (p16) nop.i 0 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 28:
+ (p16) xma.hu ahi[7]=ai7,bj[7],ahi[6] // ap[7]*b[i]
+ (p40) add a2=a2,n2 } // (p16) a2+=n2
+{ .mfi; (p42) add a2=a2,n2,1
+ (p16) xma.lu alo[7]=ai7,bj[7],ahi[6]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a6=alo[6] // 29:
+ (p48) add t[6]=t[6],a2 // (p16) t[6]+=a2
+ (p50) add t[6]=t[6],a2,1 };;
+{ .mfi; (p16) nop.m 0 // 30:
+ (p16) xma.hu nhi[5]=ni5,mj[0],nhi[4] // np[5]*m0
+ (p40) cmp.ltu p41,p39=a2,n2 }
+{ .mfi; (p42) cmp.leu p41,p39=a2,n2
+ (p16) xma.lu nlo[5]=ni5,mj[0],nhi[4]
+ (p16) nop.i 0 };;
+{ .mfi; (p16) getf.sig n4=nlo[4] // 31:
+ (p16) nop.f 0
+ (p48) cmp.ltu p49,p47=t[6],a2 }
+{ .mfb; (p50) cmp.leu p49,p47=t[6],a2
+ (p16) nop.f 0
+ br.ctop.sptk.many .Louter_8_ctop };;
+.Louter_8_cend:
+
+// above loop has to execute one more time, without (p16), which is
+// replaced with merged move of np[8] to GPR bank
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mmi; (p0) getf.sig n1=ni0 // 0:
+ (p40) add a3=a3,n3 // (p17) a3+=n3
+ (p42) add a3=a3,n3,1 };;
+{ .mii; (p17) getf.sig a7=alo[8] // 1:
+ (p48) add t[6]=t[6],a3 // (p17) t[6]+=a3
+ (p50) add t[6]=t[6],a3,1 };;
+{ .mfi; (p17) getf.sig a8=ahi[8] // 2:
+ (p17) xma.hu nhi[7]=ni6,mj[1],nhi[6] // np[6]*m0
+ (p40) cmp.ltu p43,p41=a3,n3 }
+{ .mfi; (p42) cmp.leu p43,p41=a3,n3
+ (p17) xma.lu nlo[7]=ni6,mj[1],nhi[6]
+ (p0) nop.i 0 };;
+{ .mii; (p17) getf.sig n5=nlo[6] // 3:
+ (p48) cmp.ltu p51,p49=t[6],a3
+ (p50) cmp.leu p51,p49=t[6],a3 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mmi; (p0) getf.sig n2=ni1 // 4:
+ (p41) add a4=a4,n4 // (p17) a4+=n4
+ (p43) add a4=a4,n4,1 };;
+{ .mfi; (p49) add t[5]=t[5],a4 // 5: (p17) t[5]+=a4
+ (p0) nop.f 0
+ (p51) add t[5]=t[5],a4,1 };;
+{ .mfi; (p0) getf.sig n3=ni2 // 6:
+ (p17) xma.hu nhi[8]=ni7,mj[1],nhi[7] // np[7]*m0
+ (p41) cmp.ltu p42,p40=a4,n4 }
+{ .mfi; (p43) cmp.leu p42,p40=a4,n4
+ (p17) xma.lu nlo[8]=ni7,mj[1],nhi[7]
+ (p0) nop.i 0 };;
+{ .mii; (p17) getf.sig n6=nlo[7] // 7:
+ (p49) cmp.ltu p50,p48=t[5],a4
+ (p51) cmp.leu p50,p48=t[5],a4 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mii; (p0) getf.sig n4=ni3 // 8:
+ (p40) add a5=a5,n5 // (p17) a5+=n5
+ (p42) add a5=a5,n5,1 };;
+{ .mii; (p0) nop.m 0 // 9:
+ (p48) add t[4]=t[4],a5 // p(17) t[4]+=a5
+ (p50) add t[4]=t[4],a5,1 };;
+{ .mii; (p0) nop.m 0 // 10:
+ (p40) cmp.ltu p43,p41=a5,n5
+ (p42) cmp.leu p43,p41=a5,n5 };;
+{ .mii; (p17) getf.sig n7=nlo[8] // 11:
+ (p48) cmp.ltu p51,p49=t[4],a5
+ (p50) cmp.leu p51,p49=t[4],a5 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mii; (p17) getf.sig n8=nhi[8] // 12:
+ (p41) add a6=a6,n6 // (p17) a6+=n6
+ (p43) add a6=a6,n6,1 };;
+{ .mii; (p0) getf.sig n5=ni4 // 13:
+ (p49) add t[3]=t[3],a6 // (p17) t[3]+=a6
+ (p51) add t[3]=t[3],a6,1 };;
+{ .mii; (p0) nop.m 0 // 14:
+ (p41) cmp.ltu p42,p40=a6,n6
+ (p43) cmp.leu p42,p40=a6,n6 };;
+{ .mii; (p0) getf.sig n6=ni5 // 15:
+ (p49) cmp.ltu p50,p48=t[3],a6
+ (p51) cmp.leu p50,p48=t[3],a6 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mii; (p0) nop.m 0 // 16:
+ (p40) add a7=a7,n7 // (p17) a7+=n7
+ (p42) add a7=a7,n7,1 };;
+{ .mii; (p0) nop.m 0 // 17:
+ (p48) add t[2]=t[2],a7 // (p17) t[2]+=a7
+ (p50) add t[2]=t[2],a7,1 };;
+{ .mii; (p0) nop.m 0 // 18:
+ (p40) cmp.ltu p43,p41=a7,n7
+ (p42) cmp.leu p43,p41=a7,n7 };;
+{ .mii; (p0) getf.sig n7=ni6 // 19:
+ (p48) cmp.ltu p51,p49=t[2],a7
+ (p50) cmp.leu p51,p49=t[2],a7 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mii; (p0) nop.m 0 // 20:
+ (p41) add a8=a8,n8 // (p17) a8+=n8
+ (p43) add a8=a8,n8,1 };;
+{ .mmi; (p0) nop.m 0 // 21:
+ (p49) add t[1]=t[1],a8 // (p17) t[1]+=a8
+ (p51) add t[1]=t[1],a8,1 }
+{ .mmi; (p17) mov t[0]=r0
+ (p41) cmp.ltu p42,p40=a8,n8
+ (p43) cmp.leu p42,p40=a8,n8 };;
+{ .mmi; (p0) getf.sig n8=ni7 // 22:
+ (p49) cmp.ltu p50,p48=t[1],a8
+ (p51) cmp.leu p50,p48=t[1],a8 }
+{ .mmi; (p42) add t[0]=t[0],r0,1
+ (p0) add r16=-7*16,prevsp
+ (p0) add r17=-6*16,prevsp };;
+
+// subtract np[8] from carrybit|tmp[8]
+// carrybit|tmp[8] layout upon exit from above loop is:
+// t[0]|t[1]|t[2]|t[3]|t[4]|t[5]|t[6]|t[7]|t0 (least significant)
+{ .mmi; (p50)add t[0]=t[0],r0,1
+ add r18=-5*16,prevsp
+ sub n1=t0,n1 };;
+{ .mmi; cmp.gtu p34,p32=n1,t0;;
+ .pred.rel "mutex",p32,p34
+ (p32)sub n2=t[7],n2
+ (p34)sub n2=t[7],n2,1 };;
+{ .mii; (p32)cmp.gtu p35,p33=n2,t[7]
+ (p34)cmp.geu p35,p33=n2,t[7];;
+ .pred.rel "mutex",p33,p35
+ (p33)sub n3=t[6],n3 }
+{ .mmi; (p35)sub n3=t[6],n3,1;;
+ (p33)cmp.gtu p34,p32=n3,t[6]
+ (p35)cmp.geu p34,p32=n3,t[6] };;
+ .pred.rel "mutex",p32,p34
+{ .mii; (p32)sub n4=t[5],n4
+ (p34)sub n4=t[5],n4,1;;
+ (p32)cmp.gtu p35,p33=n4,t[5] }
+{ .mmi; (p34)cmp.geu p35,p33=n4,t[5];;
+ .pred.rel "mutex",p33,p35
+ (p33)sub n5=t[4],n5
+ (p35)sub n5=t[4],n5,1 };;
+{ .mii; (p33)cmp.gtu p34,p32=n5,t[4]
+ (p35)cmp.geu p34,p32=n5,t[4];;
+ .pred.rel "mutex",p32,p34
+ (p32)sub n6=t[3],n6 }
+{ .mmi; (p34)sub n6=t[3],n6,1;;
+ (p32)cmp.gtu p35,p33=n6,t[3]
+ (p34)cmp.geu p35,p33=n6,t[3] };;
+ .pred.rel "mutex",p33,p35
+{ .mii; (p33)sub n7=t[2],n7
+ (p35)sub n7=t[2],n7,1;;
+ (p33)cmp.gtu p34,p32=n7,t[2] }
+{ .mmi; (p35)cmp.geu p34,p32=n7,t[2];;
+ .pred.rel "mutex",p32,p34
+ (p32)sub n8=t[1],n8
+ (p34)sub n8=t[1],n8,1 };;
+{ .mii; (p32)cmp.gtu p35,p33=n8,t[1]
+ (p34)cmp.geu p35,p33=n8,t[1];;
+ .pred.rel "mutex",p33,p35
+ (p33)sub a8=t[0],r0 }
+{ .mmi; (p35)sub a8=t[0],r0,1;;
+ (p33)cmp.gtu p34,p32=a8,t[0]
+ (p35)cmp.geu p34,p32=a8,t[0] };;
+
+// save the result, either tmp[num] or tmp[num]-np[num]
+ .pred.rel "mutex",p32,p34
+{ .mmi; (p32)st8 [rptr]=n1,8
+ (p34)st8 [rptr]=t0,8
+ add r19=-4*16,prevsp};;
+{ .mmb; (p32)st8 [rptr]=n2,8
+ (p34)st8 [rptr]=t[7],8
+ (p5)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n3,8
+ (p34)st8 [rptr]=t[6],8
+ (p7)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n4,8
+ (p34)st8 [rptr]=t[5],8
+ (p9)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n5,8
+ (p34)st8 [rptr]=t[4],8
+ (p11)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n6,8
+ (p34)st8 [rptr]=t[3],8
+ (p13)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n7,8
+ (p34)st8 [rptr]=t[2],8
+ (p15)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n8,8
+ (p34)st8 [rptr]=t[1],8
+ nop.b 0 };;
+.Ldone: // epilogue
+{ .mmi; ldf.fill f16=[r16],64
+ ldf.fill f17=[r17],64
+ nop.i 0 }
+{ .mmi; ldf.fill f18=[r18],64
+ ldf.fill f19=[r19],64
+ mov pr=prevpr,0x1ffff };;
+{ .mmi; ldf.fill f20=[r16]
+ ldf.fill f21=[r17]
+ mov ar.lc=prevlc }
+{ .mmi; ldf.fill f22=[r18]
+ ldf.fill f23=[r19]
+ mov ret0=1 } // signal "handled"
+{ .mib; rum 1<<5
+ .restore sp
+ mov sp=prevsp
+ br.ret.sptk.many b0 };;
+.endp bn_mul_mont_8#
+
+.type copyright#,\@object
+copyright:
+stringz "Montgomery multiplication for IA-64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$output=shift and open STDOUT,">$output";
+print $code;
+close STDOUT;
diff --git a/crypto/bn/asm/mips-mont.pl b/crypto/bn/asm/mips-mont.pl
new file mode 100755
index 000000000000..b944a12b8e29
--- /dev/null
+++ b/crypto/bn/asm/mips-mont.pl
@@ -0,0 +1,426 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# This module doesn't present direct interest for OpenSSL, because it
+# doesn't provide better performance for longer keys, at least not on
+# in-order-execution cores. While 512-bit RSA sign operations can be
+# 65% faster in 64-bit mode, 1024-bit ones are only 15% faster, and
+# 4096-bit ones are up to 15% slower. In 32-bit mode it varies from
+# 16% improvement for 512-bit RSA sign to -33% for 4096-bit RSA
+# verify:-( All comparisons are against bn_mul_mont-free assembler.
+# The module might be of interest to embedded system developers, as
+# the code is smaller than 1KB, yet offers >3x improvement on MIPS64
+# and 75-30% [less for longer keys] on MIPS32 over compiler-generated
+# code.
+
+######################################################################
+# There is a number of MIPS ABI in use, O32 and N32/64 are most
+# widely used. Then there is a new contender: NUBI. It appears that if
+# one picks the latter, it's possible to arrange code in ABI neutral
+# manner. Therefore let's stick to NUBI register layout:
+#
+($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25));
+($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23));
+($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31));
+#
+# The return value is placed in $a0. Following coding rules facilitate
+# interoperability:
+#
+# - never ever touch $tp, "thread pointer", former $gp;
+# - copy return value to $t0, former $v0 [or to $a0 if you're adapting
+# old code];
+# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary;
+#
+# For reference here is register layout for N32/64 MIPS ABIs:
+#
+# ($zero,$at,$v0,$v1)=map("\$$_",(0..3));
+# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
+# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
+# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
+#
+$flavour = shift; # supported flavours are o32,n32,64,nubi32,nubi64
+
+if ($flavour =~ /64|n32/i) {
+ $PTR_ADD="dadd"; # incidentally works even on n32
+ $PTR_SUB="dsub"; # incidentally works even on n32
+ $REG_S="sd";
+ $REG_L="ld";
+ $SZREG=8;
+} else {
+ $PTR_ADD="add";
+ $PTR_SUB="sub";
+ $REG_S="sw";
+ $REG_L="lw";
+ $SZREG=4;
+}
+$SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0x00fff000 : 0x00ff0000;
+#
+# <appro@openssl.org>
+#
+######################################################################
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+if ($flavour =~ /64|n32/i) {
+ $LD="ld";
+ $ST="sd";
+ $MULTU="dmultu";
+ $ADDU="daddu";
+ $SUBU="dsubu";
+ $BNSZ=8;
+} else {
+ $LD="lw";
+ $ST="sw";
+ $MULTU="multu";
+ $ADDU="addu";
+ $SUBU="subu";
+ $BNSZ=4;
+}
+
+# int bn_mul_mont(
+$rp=$a0; # BN_ULONG *rp,
+$ap=$a1; # const BN_ULONG *ap,
+$bp=$a2; # const BN_ULONG *bp,
+$np=$a3; # const BN_ULONG *np,
+$n0=$a4; # const BN_ULONG *n0,
+$num=$a5; # int num);
+
+$lo0=$a6;
+$hi0=$a7;
+$lo1=$t1;
+$hi1=$t2;
+$aj=$s0;
+$bi=$s1;
+$nj=$s2;
+$tp=$s3;
+$alo=$s4;
+$ahi=$s5;
+$nlo=$s6;
+$nhi=$s7;
+$tj=$s8;
+$i=$s9;
+$j=$s10;
+$m1=$s11;
+
+$FRAMESIZE=14;
+
+$code=<<___;
+.text
+
+.set noat
+.set noreorder
+
+.align 5
+.globl bn_mul_mont
+.ent bn_mul_mont
+bn_mul_mont:
+___
+$code.=<<___ if ($flavour =~ /o32/i);
+ lw $n0,16($sp)
+ lw $num,20($sp)
+___
+$code.=<<___;
+ slt $at,$num,4
+ bnez $at,1f
+ li $t0,0
+ slt $at,$num,17 # on in-order CPU
+ bnezl $at,bn_mul_mont_internal
+ nop
+1: jr $ra
+ li $a0,0
+.end bn_mul_mont
+
+.align 5
+.ent bn_mul_mont_internal
+bn_mul_mont_internal:
+ .frame $fp,$FRAMESIZE*$SZREG,$ra
+ .mask 0x40000000|$SAVED_REGS_MASK,-$SZREG
+ $PTR_SUB $sp,$FRAMESIZE*$SZREG
+ $REG_S $fp,($FRAMESIZE-1)*$SZREG($sp)
+ $REG_S $s11,($FRAMESIZE-2)*$SZREG($sp)
+ $REG_S $s10,($FRAMESIZE-3)*$SZREG($sp)
+ $REG_S $s9,($FRAMESIZE-4)*$SZREG($sp)
+ $REG_S $s8,($FRAMESIZE-5)*$SZREG($sp)
+ $REG_S $s7,($FRAMESIZE-6)*$SZREG($sp)
+ $REG_S $s6,($FRAMESIZE-7)*$SZREG($sp)
+ $REG_S $s5,($FRAMESIZE-8)*$SZREG($sp)
+ $REG_S $s4,($FRAMESIZE-9)*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_S $s3,($FRAMESIZE-10)*$SZREG($sp)
+ $REG_S $s2,($FRAMESIZE-11)*$SZREG($sp)
+ $REG_S $s1,($FRAMESIZE-12)*$SZREG($sp)
+ $REG_S $s0,($FRAMESIZE-13)*$SZREG($sp)
+___
+$code.=<<___;
+ move $fp,$sp
+
+ .set reorder
+ $LD $n0,0($n0)
+ $LD $bi,0($bp) # bp[0]
+ $LD $aj,0($ap) # ap[0]
+ $LD $nj,0($np) # np[0]
+
+ $PTR_SUB $sp,2*$BNSZ # place for two extra words
+ sll $num,`log($BNSZ)/log(2)`
+ li $at,-4096
+ $PTR_SUB $sp,$num
+ and $sp,$at
+
+ $MULTU $aj,$bi
+ $LD $alo,$BNSZ($ap)
+ $LD $nlo,$BNSZ($np)
+ mflo $lo0
+ mfhi $hi0
+ $MULTU $lo0,$n0
+ mflo $m1
+
+ $MULTU $alo,$bi
+ mflo $alo
+ mfhi $ahi
+
+ $MULTU $nj,$m1
+ mflo $lo1
+ mfhi $hi1
+ $MULTU $nlo,$m1
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $ADDU $hi1,$at
+ mflo $nlo
+ mfhi $nhi
+
+ move $tp,$sp
+ li $j,2*$BNSZ
+.align 4
+.L1st:
+ .set noreorder
+ $PTR_ADD $aj,$ap,$j
+ $PTR_ADD $nj,$np,$j
+ $LD $aj,($aj)
+ $LD $nj,($nj)
+
+ $MULTU $aj,$bi
+ $ADDU $lo0,$alo,$hi0
+ $ADDU $lo1,$nlo,$hi1
+ sltu $at,$lo0,$hi0
+ sltu $t0,$lo1,$hi1
+ $ADDU $hi0,$ahi,$at
+ $ADDU $hi1,$nhi,$t0
+ mflo $alo
+ mfhi $ahi
+
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $MULTU $nj,$m1
+ $ADDU $hi1,$at
+ addu $j,$BNSZ
+ $ST $lo1,($tp)
+ sltu $t0,$j,$num
+ mflo $nlo
+ mfhi $nhi
+
+ bnez $t0,.L1st
+ $PTR_ADD $tp,$BNSZ
+ .set reorder
+
+ $ADDU $lo0,$alo,$hi0
+ sltu $at,$lo0,$hi0
+ $ADDU $hi0,$ahi,$at
+
+ $ADDU $lo1,$nlo,$hi1
+ sltu $t0,$lo1,$hi1
+ $ADDU $hi1,$nhi,$t0
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $ADDU $hi1,$at
+
+ $ST $lo1,($tp)
+
+ $ADDU $hi1,$hi0
+ sltu $at,$hi1,$hi0
+ $ST $hi1,$BNSZ($tp)
+ $ST $at,2*$BNSZ($tp)
+
+ li $i,$BNSZ
+.align 4
+.Louter:
+ $PTR_ADD $bi,$bp,$i
+ $LD $bi,($bi)
+ $LD $aj,($ap)
+ $LD $alo,$BNSZ($ap)
+ $LD $tj,($sp)
+
+ $MULTU $aj,$bi
+ $LD $nj,($np)
+ $LD $nlo,$BNSZ($np)
+ mflo $lo0
+ mfhi $hi0
+ $ADDU $lo0,$tj
+ $MULTU $lo0,$n0
+ sltu $at,$lo0,$tj
+ $ADDU $hi0,$at
+ mflo $m1
+
+ $MULTU $alo,$bi
+ mflo $alo
+ mfhi $ahi
+
+ $MULTU $nj,$m1
+ mflo $lo1
+ mfhi $hi1
+
+ $MULTU $nlo,$m1
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $ADDU $hi1,$at
+ mflo $nlo
+ mfhi $nhi
+
+ move $tp,$sp
+ li $j,2*$BNSZ
+ $LD $tj,$BNSZ($tp)
+.align 4
+.Linner:
+ .set noreorder
+ $PTR_ADD $aj,$ap,$j
+ $PTR_ADD $nj,$np,$j
+ $LD $aj,($aj)
+ $LD $nj,($nj)
+
+ $MULTU $aj,$bi
+ $ADDU $lo0,$alo,$hi0
+ $ADDU $lo1,$nlo,$hi1
+ sltu $at,$lo0,$hi0
+ sltu $t0,$lo1,$hi1
+ $ADDU $hi0,$ahi,$at
+ $ADDU $hi1,$nhi,$t0
+ mflo $alo
+ mfhi $ahi
+
+ $ADDU $lo0,$tj
+ addu $j,$BNSZ
+ $MULTU $nj,$m1
+ sltu $at,$lo0,$tj
+ $ADDU $lo1,$lo0
+ $ADDU $hi0,$at
+ sltu $t0,$lo1,$lo0
+ $LD $tj,2*$BNSZ($tp)
+ $ADDU $hi1,$t0
+ sltu $at,$j,$num
+ mflo $nlo
+ mfhi $nhi
+ $ST $lo1,($tp)
+ bnez $at,.Linner
+ $PTR_ADD $tp,$BNSZ
+ .set reorder
+
+ $ADDU $lo0,$alo,$hi0
+ sltu $at,$lo0,$hi0
+ $ADDU $hi0,$ahi,$at
+ $ADDU $lo0,$tj
+ sltu $t0,$lo0,$tj
+ $ADDU $hi0,$t0
+
+ $LD $tj,2*$BNSZ($tp)
+ $ADDU $lo1,$nlo,$hi1
+ sltu $at,$lo1,$hi1
+ $ADDU $hi1,$nhi,$at
+ $ADDU $lo1,$lo0
+ sltu $t0,$lo1,$lo0
+ $ADDU $hi1,$t0
+ $ST $lo1,($tp)
+
+ $ADDU $lo1,$hi1,$hi0
+ sltu $hi1,$lo1,$hi0
+ $ADDU $lo1,$tj
+ sltu $at,$lo1,$tj
+ $ADDU $hi1,$at
+ $ST $lo1,$BNSZ($tp)
+ $ST $hi1,2*$BNSZ($tp)
+
+ addu $i,$BNSZ
+ sltu $t0,$i,$num
+ bnez $t0,.Louter
+
+ .set noreorder
+ $PTR_ADD $tj,$sp,$num # &tp[num]
+ move $tp,$sp
+ move $ap,$sp
+ li $hi0,0 # clear borrow bit
+
+.align 4
+.Lsub: $LD $lo0,($tp)
+ $LD $lo1,($np)
+ $PTR_ADD $tp,$BNSZ
+ $PTR_ADD $np,$BNSZ
+ $SUBU $lo1,$lo0,$lo1 # tp[i]-np[i]
+ sgtu $at,$lo1,$lo0
+ $SUBU $lo0,$lo1,$hi0
+ sgtu $hi0,$lo0,$lo1
+ $ST $lo0,($rp)
+ or $hi0,$at
+ sltu $at,$tp,$tj
+ bnez $at,.Lsub
+ $PTR_ADD $rp,$BNSZ
+
+ $SUBU $hi0,$hi1,$hi0 # handle upmost overflow bit
+ move $tp,$sp
+ $PTR_SUB $rp,$num # restore rp
+ not $hi1,$hi0
+
+ and $ap,$hi0,$sp
+ and $bp,$hi1,$rp
+ or $ap,$ap,$bp # ap=borrow?tp:rp
+
+.align 4
+.Lcopy: $LD $aj,($ap)
+ $PTR_ADD $ap,$BNSZ
+ $ST $zero,($tp)
+ $PTR_ADD $tp,$BNSZ
+ sltu $at,$tp,$tj
+ $ST $aj,($rp)
+ bnez $at,.Lcopy
+ $PTR_ADD $rp,$BNSZ
+
+ li $a0,1
+ li $t0,1
+
+ .set noreorder
+ move $sp,$fp
+ $REG_L $fp,($FRAMESIZE-1)*$SZREG($sp)
+ $REG_L $s11,($FRAMESIZE-2)*$SZREG($sp)
+ $REG_L $s10,($FRAMESIZE-3)*$SZREG($sp)
+ $REG_L $s9,($FRAMESIZE-4)*$SZREG($sp)
+ $REG_L $s8,($FRAMESIZE-5)*$SZREG($sp)
+ $REG_L $s7,($FRAMESIZE-6)*$SZREG($sp)
+ $REG_L $s6,($FRAMESIZE-7)*$SZREG($sp)
+ $REG_L $s5,($FRAMESIZE-8)*$SZREG($sp)
+ $REG_L $s4,($FRAMESIZE-9)*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $s3,($FRAMESIZE-10)*$SZREG($sp)
+ $REG_L $s2,($FRAMESIZE-11)*$SZREG($sp)
+ $REG_L $s1,($FRAMESIZE-12)*$SZREG($sp)
+ $REG_L $s0,($FRAMESIZE-13)*$SZREG($sp)
+___
+$code.=<<___;
+ jr $ra
+ $PTR_ADD $sp,$FRAMESIZE*$SZREG
+.end bn_mul_mont_internal
+.rdata
+.asciiz "Montgomery Multiplication for MIPS, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+
+print $code;
+close STDOUT;
diff --git a/crypto/bn/asm/mips.pl b/crypto/bn/asm/mips.pl
new file mode 100755
index 000000000000..c162a3ec2304
--- /dev/null
+++ b/crypto/bn/asm/mips.pl
@@ -0,0 +1,2585 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project.
+#
+# Rights for redistribution and usage in source and binary forms are
+# granted according to the OpenSSL license. Warranty of any kind is
+# disclaimed.
+# ====================================================================
+
+
+# July 1999
+#
+# This is drop-in MIPS III/IV ISA replacement for crypto/bn/bn_asm.c.
+#
+# The module is designed to work with either of the "new" MIPS ABI(5),
+# namely N32 or N64, offered by IRIX 6.x. It's not ment to work under
+# IRIX 5.x not only because it doesn't support new ABIs but also
+# because 5.x kernels put R4x00 CPU into 32-bit mode and all those
+# 64-bit instructions (daddu, dmultu, etc.) found below gonna only
+# cause illegal instruction exception:-(
+#
+# In addition the code depends on preprocessor flags set up by MIPSpro
+# compiler driver (either as or cc) and therefore (probably?) can't be
+# compiled by the GNU assembler. GNU C driver manages fine though...
+# I mean as long as -mmips-as is specified or is the default option,
+# because then it simply invokes /usr/bin/as which in turn takes
+# perfect care of the preprocessor definitions. Another neat feature
+# offered by the MIPSpro assembler is an optimization pass. This gave
+# me the opportunity to have the code looking more regular as all those
+# architecture dependent instruction rescheduling details were left to
+# the assembler. Cool, huh?
+#
+# Performance improvement is astonishing! 'apps/openssl speed rsa dsa'
+# goes way over 3 times faster!
+#
+# <appro@fy.chalmers.se>
+
+# October 2010
+#
+# Adapt the module even for 32-bit ABIs and other OSes. The former was
+# achieved by mechanical replacement of 64-bit arithmetic instructions
+# such as dmultu, daddu, etc. with their 32-bit counterparts and
+# adjusting offsets denoting multiples of BN_ULONG. Above mentioned
+# >3x performance improvement naturally does not apply to 32-bit code
+# [because there is no instruction 32-bit compiler can't use], one
+# has to content with 40-85% improvement depending on benchmark and
+# key length, more for longer keys.
+
+$flavour = shift;
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+if ($flavour =~ /64|n32/i) {
+ $LD="ld";
+ $ST="sd";
+ $MULTU="dmultu";
+ $DIVU="ddivu";
+ $ADDU="daddu";
+ $SUBU="dsubu";
+ $SRL="dsrl";
+ $SLL="dsll";
+ $BNSZ=8;
+ $PTR_ADD="daddu";
+ $PTR_SUB="dsubu";
+ $SZREG=8;
+ $REG_S="sd";
+ $REG_L="ld";
+} else {
+ $LD="lw";
+ $ST="sw";
+ $MULTU="multu";
+ $DIVU="divu";
+ $ADDU="addu";
+ $SUBU="subu";
+ $SRL="srl";
+ $SLL="sll";
+ $BNSZ=4;
+ $PTR_ADD="addu";
+ $PTR_SUB="subu";
+ $SZREG=4;
+ $REG_S="sw";
+ $REG_L="lw";
+ $code=".set mips2\n";
+}
+
+# Below is N32/64 register layout used in the original module.
+#
+($zero,$at,$v0,$v1)=map("\$$_",(0..3));
+($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
+($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
+($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
+($ta0,$ta1,$ta2,$ta3)=($a4,$a5,$a6,$a7);
+#
+# No special adaptation is required for O32. NUBI on the other hand
+# is treated by saving/restoring ($v1,$t0..$t3).
+
+$gp=$v1 if ($flavour =~ /nubi/i);
+
+$minus4=$v1;
+
+$code.=<<___;
+.rdata
+.asciiz "mips3.s, Version 1.2"
+.asciiz "MIPS II/III/IV ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>"
+
+.text
+.set noat
+
+.align 5
+.globl bn_mul_add_words
+.ent bn_mul_add_words
+bn_mul_add_words:
+ .set noreorder
+ bgtz $a2,bn_mul_add_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_mul_add_words
+
+.align 5
+.ent bn_mul_add_words_internal
+bn_mul_add_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $ta0,$a2,$minus4
+ $LD $t0,0($a1)
+ beqz $ta0,.L_bn_mul_add_words_tail
+
+.L_bn_mul_add_words_loop:
+ $MULTU $t0,$a3
+ $LD $t1,0($a0)
+ $LD $t2,$BNSZ($a1)
+ $LD $t3,$BNSZ($a0)
+ $LD $ta0,2*$BNSZ($a1)
+ $LD $ta1,2*$BNSZ($a0)
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0 # All manuals say it "compares 32-bit
+ # values", but it seems to work fine
+ # even on 64-bit registers.
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ $MULTU $t2,$a3
+ sltu $at,$t1,$at
+ $ST $t1,0($a0)
+ $ADDU $v0,$at
+
+ $LD $ta2,3*$BNSZ($a1)
+ $LD $ta3,3*$BNSZ($a0)
+ $ADDU $t3,$v0
+ sltu $v0,$t3,$v0
+ mflo $at
+ mfhi $t2
+ $ADDU $t3,$at
+ $ADDU $v0,$t2
+ $MULTU $ta0,$a3
+ sltu $at,$t3,$at
+ $ST $t3,$BNSZ($a0)
+ $ADDU $v0,$at
+
+ subu $a2,4
+ $PTR_ADD $a0,4*$BNSZ
+ $PTR_ADD $a1,4*$BNSZ
+ $ADDU $ta1,$v0
+ sltu $v0,$ta1,$v0
+ mflo $at
+ mfhi $ta0
+ $ADDU $ta1,$at
+ $ADDU $v0,$ta0
+ $MULTU $ta2,$a3
+ sltu $at,$ta1,$at
+ $ST $ta1,-2*$BNSZ($a0)
+ $ADDU $v0,$at
+
+
+ and $ta0,$a2,$minus4
+ $ADDU $ta3,$v0
+ sltu $v0,$ta3,$v0
+ mflo $at
+ mfhi $ta2
+ $ADDU $ta3,$at
+ $ADDU $v0,$ta2
+ sltu $at,$ta3,$at
+ $ST $ta3,-$BNSZ($a0)
+ $ADDU $v0,$at
+ .set noreorder
+ bgtzl $ta0,.L_bn_mul_add_words_loop
+ $LD $t0,0($a1)
+
+ beqz $a2,.L_bn_mul_add_words_return
+ nop
+
+.L_bn_mul_add_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $MULTU $t0,$a3
+ $LD $t1,0($a0)
+ subu $a2,1
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ sltu $at,$t1,$at
+ $ST $t1,0($a0)
+ $ADDU $v0,$at
+ beqz $a2,.L_bn_mul_add_words_return
+
+ $LD $t0,$BNSZ($a1)
+ $MULTU $t0,$a3
+ $LD $t1,$BNSZ($a0)
+ subu $a2,1
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ sltu $at,$t1,$at
+ $ST $t1,$BNSZ($a0)
+ $ADDU $v0,$at
+ beqz $a2,.L_bn_mul_add_words_return
+
+ $LD $t0,2*$BNSZ($a1)
+ $MULTU $t0,$a3
+ $LD $t1,2*$BNSZ($a0)
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ sltu $at,$t1,$at
+ $ST $t1,2*$BNSZ($a0)
+ $ADDU $v0,$at
+
+.L_bn_mul_add_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_mul_add_words_internal
+
+.align 5
+.globl bn_mul_words
+.ent bn_mul_words
+bn_mul_words:
+ .set noreorder
+ bgtz $a2,bn_mul_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_mul_words
+
+.align 5
+.ent bn_mul_words_internal
+bn_mul_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $ta0,$a2,$minus4
+ $LD $t0,0($a1)
+ beqz $ta0,.L_bn_mul_words_tail
+
+.L_bn_mul_words_loop:
+ $MULTU $t0,$a3
+ $LD $t2,$BNSZ($a1)
+ $LD $ta0,2*$BNSZ($a1)
+ $LD $ta2,3*$BNSZ($a1)
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $MULTU $t2,$a3
+ $ST $v0,0($a0)
+ $ADDU $v0,$t1,$t0
+
+ subu $a2,4
+ $PTR_ADD $a0,4*$BNSZ
+ $PTR_ADD $a1,4*$BNSZ
+ mflo $at
+ mfhi $t2
+ $ADDU $v0,$at
+ sltu $t3,$v0,$at
+ $MULTU $ta0,$a3
+ $ST $v0,-3*$BNSZ($a0)
+ $ADDU $v0,$t3,$t2
+
+ mflo $at
+ mfhi $ta0
+ $ADDU $v0,$at
+ sltu $ta1,$v0,$at
+ $MULTU $ta2,$a3
+ $ST $v0,-2*$BNSZ($a0)
+ $ADDU $v0,$ta1,$ta0
+
+ and $ta0,$a2,$minus4
+ mflo $at
+ mfhi $ta2
+ $ADDU $v0,$at
+ sltu $ta3,$v0,$at
+ $ST $v0,-$BNSZ($a0)
+ $ADDU $v0,$ta3,$ta2
+ .set noreorder
+ bgtzl $ta0,.L_bn_mul_words_loop
+ $LD $t0,0($a1)
+
+ beqz $a2,.L_bn_mul_words_return
+ nop
+
+.L_bn_mul_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $MULTU $t0,$a3
+ subu $a2,1
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $ST $v0,0($a0)
+ $ADDU $v0,$t1,$t0
+ beqz $a2,.L_bn_mul_words_return
+
+ $LD $t0,$BNSZ($a1)
+ $MULTU $t0,$a3
+ subu $a2,1
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $ST $v0,$BNSZ($a0)
+ $ADDU $v0,$t1,$t0
+ beqz $a2,.L_bn_mul_words_return
+
+ $LD $t0,2*$BNSZ($a1)
+ $MULTU $t0,$a3
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $ST $v0,2*$BNSZ($a0)
+ $ADDU $v0,$t1,$t0
+
+.L_bn_mul_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_mul_words_internal
+
+.align 5
+.globl bn_sqr_words
+.ent bn_sqr_words
+bn_sqr_words:
+ .set noreorder
+ bgtz $a2,bn_sqr_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_sqr_words
+
+.align 5
+.ent bn_sqr_words_internal
+bn_sqr_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $ta0,$a2,$minus4
+ $LD $t0,0($a1)
+ beqz $ta0,.L_bn_sqr_words_tail
+
+.L_bn_sqr_words_loop:
+ $MULTU $t0,$t0
+ $LD $t2,$BNSZ($a1)
+ $LD $ta0,2*$BNSZ($a1)
+ $LD $ta2,3*$BNSZ($a1)
+ mflo $t1
+ mfhi $t0
+ $ST $t1,0($a0)
+ $ST $t0,$BNSZ($a0)
+
+ $MULTU $t2,$t2
+ subu $a2,4
+ $PTR_ADD $a0,8*$BNSZ
+ $PTR_ADD $a1,4*$BNSZ
+ mflo $t3
+ mfhi $t2
+ $ST $t3,-6*$BNSZ($a0)
+ $ST $t2,-5*$BNSZ($a0)
+
+ $MULTU $ta0,$ta0
+ mflo $ta1
+ mfhi $ta0
+ $ST $ta1,-4*$BNSZ($a0)
+ $ST $ta0,-3*$BNSZ($a0)
+
+
+ $MULTU $ta2,$ta2
+ and $ta0,$a2,$minus4
+ mflo $ta3
+ mfhi $ta2
+ $ST $ta3,-2*$BNSZ($a0)
+ $ST $ta2,-$BNSZ($a0)
+
+ .set noreorder
+ bgtzl $ta0,.L_bn_sqr_words_loop
+ $LD $t0,0($a1)
+
+ beqz $a2,.L_bn_sqr_words_return
+ nop
+
+.L_bn_sqr_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $MULTU $t0,$t0
+ subu $a2,1
+ mflo $t1
+ mfhi $t0
+ $ST $t1,0($a0)
+ $ST $t0,$BNSZ($a0)
+ beqz $a2,.L_bn_sqr_words_return
+
+ $LD $t0,$BNSZ($a1)
+ $MULTU $t0,$t0
+ subu $a2,1
+ mflo $t1
+ mfhi $t0
+ $ST $t1,2*$BNSZ($a0)
+ $ST $t0,3*$BNSZ($a0)
+ beqz $a2,.L_bn_sqr_words_return
+
+ $LD $t0,2*$BNSZ($a1)
+ $MULTU $t0,$t0
+ mflo $t1
+ mfhi $t0
+ $ST $t1,4*$BNSZ($a0)
+ $ST $t0,5*$BNSZ($a0)
+
+.L_bn_sqr_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+
+.end bn_sqr_words_internal
+
+.align 5
+.globl bn_add_words
+.ent bn_add_words
+bn_add_words:
+ .set noreorder
+ bgtz $a3,bn_add_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_add_words
+
+.align 5
+.ent bn_add_words_internal
+bn_add_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $at,$a3,$minus4
+ $LD $t0,0($a1)
+ beqz $at,.L_bn_add_words_tail
+
+.L_bn_add_words_loop:
+ $LD $ta0,0($a2)
+ subu $a3,4
+ $LD $t1,$BNSZ($a1)
+ and $at,$a3,$minus4
+ $LD $t2,2*$BNSZ($a1)
+ $PTR_ADD $a2,4*$BNSZ
+ $LD $t3,3*$BNSZ($a1)
+ $PTR_ADD $a0,4*$BNSZ
+ $LD $ta1,-3*$BNSZ($a2)
+ $PTR_ADD $a1,4*$BNSZ
+ $LD $ta2,-2*$BNSZ($a2)
+ $LD $ta3,-$BNSZ($a2)
+ $ADDU $ta0,$t0
+ sltu $t8,$ta0,$t0
+ $ADDU $t0,$ta0,$v0
+ sltu $v0,$t0,$ta0
+ $ST $t0,-4*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ $ADDU $ta1,$t1
+ sltu $t9,$ta1,$t1
+ $ADDU $t1,$ta1,$v0
+ sltu $v0,$t1,$ta1
+ $ST $t1,-3*$BNSZ($a0)
+ $ADDU $v0,$t9
+
+ $ADDU $ta2,$t2
+ sltu $t8,$ta2,$t2
+ $ADDU $t2,$ta2,$v0
+ sltu $v0,$t2,$ta2
+ $ST $t2,-2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ $ADDU $ta3,$t3
+ sltu $t9,$ta3,$t3
+ $ADDU $t3,$ta3,$v0
+ sltu $v0,$t3,$ta3
+ $ST $t3,-$BNSZ($a0)
+ $ADDU $v0,$t9
+
+ .set noreorder
+ bgtzl $at,.L_bn_add_words_loop
+ $LD $t0,0($a1)
+
+ beqz $a3,.L_bn_add_words_return
+ nop
+
+.L_bn_add_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $LD $ta0,0($a2)
+ $ADDU $ta0,$t0
+ subu $a3,1
+ sltu $t8,$ta0,$t0
+ $ADDU $t0,$ta0,$v0
+ sltu $v0,$t0,$ta0
+ $ST $t0,0($a0)
+ $ADDU $v0,$t8
+ beqz $a3,.L_bn_add_words_return
+
+ $LD $t1,$BNSZ($a1)
+ $LD $ta1,$BNSZ($a2)
+ $ADDU $ta1,$t1
+ subu $a3,1
+ sltu $t9,$ta1,$t1
+ $ADDU $t1,$ta1,$v0
+ sltu $v0,$t1,$ta1
+ $ST $t1,$BNSZ($a0)
+ $ADDU $v0,$t9
+ beqz $a3,.L_bn_add_words_return
+
+ $LD $t2,2*$BNSZ($a1)
+ $LD $ta2,2*$BNSZ($a2)
+ $ADDU $ta2,$t2
+ sltu $t8,$ta2,$t2
+ $ADDU $t2,$ta2,$v0
+ sltu $v0,$t2,$ta2
+ $ST $t2,2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+.L_bn_add_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+
+.end bn_add_words_internal
+
+.align 5
+.globl bn_sub_words
+.ent bn_sub_words
+bn_sub_words:
+ .set noreorder
+ bgtz $a3,bn_sub_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$zero
+.end bn_sub_words
+
+.align 5
+.ent bn_sub_words_internal
+bn_sub_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $at,$a3,$minus4
+ $LD $t0,0($a1)
+ beqz $at,.L_bn_sub_words_tail
+
+.L_bn_sub_words_loop:
+ $LD $ta0,0($a2)
+ subu $a3,4
+ $LD $t1,$BNSZ($a1)
+ and $at,$a3,$minus4
+ $LD $t2,2*$BNSZ($a1)
+ $PTR_ADD $a2,4*$BNSZ
+ $LD $t3,3*$BNSZ($a1)
+ $PTR_ADD $a0,4*$BNSZ
+ $LD $ta1,-3*$BNSZ($a2)
+ $PTR_ADD $a1,4*$BNSZ
+ $LD $ta2,-2*$BNSZ($a2)
+ $LD $ta3,-$BNSZ($a2)
+ sltu $t8,$t0,$ta0
+ $SUBU $ta0,$t0,$ta0
+ $SUBU $t0,$ta0,$v0
+ sgtu $v0,$t0,$ta0
+ $ST $t0,-4*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ sltu $t9,$t1,$ta1
+ $SUBU $ta1,$t1,$ta1
+ $SUBU $t1,$ta1,$v0
+ sgtu $v0,$t1,$ta1
+ $ST $t1,-3*$BNSZ($a0)
+ $ADDU $v0,$t9
+
+
+ sltu $t8,$t2,$ta2
+ $SUBU $ta2,$t2,$ta2
+ $SUBU $t2,$ta2,$v0
+ sgtu $v0,$t2,$ta2
+ $ST $t2,-2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ sltu $t9,$t3,$ta3
+ $SUBU $ta3,$t3,$ta3
+ $SUBU $t3,$ta3,$v0
+ sgtu $v0,$t3,$ta3
+ $ST $t3,-$BNSZ($a0)
+ $ADDU $v0,$t9
+
+ .set noreorder
+ bgtzl $at,.L_bn_sub_words_loop
+ $LD $t0,0($a1)
+
+ beqz $a3,.L_bn_sub_words_return
+ nop
+
+.L_bn_sub_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $LD $ta0,0($a2)
+ subu $a3,1
+ sltu $t8,$t0,$ta0
+ $SUBU $ta0,$t0,$ta0
+ $SUBU $t0,$ta0,$v0
+ sgtu $v0,$t0,$ta0
+ $ST $t0,0($a0)
+ $ADDU $v0,$t8
+ beqz $a3,.L_bn_sub_words_return
+
+ $LD $t1,$BNSZ($a1)
+ subu $a3,1
+ $LD $ta1,$BNSZ($a2)
+ sltu $t9,$t1,$ta1
+ $SUBU $ta1,$t1,$ta1
+ $SUBU $t1,$ta1,$v0
+ sgtu $v0,$t1,$ta1
+ $ST $t1,$BNSZ($a0)
+ $ADDU $v0,$t9
+ beqz $a3,.L_bn_sub_words_return
+
+ $LD $t2,2*$BNSZ($a1)
+ $LD $ta2,2*$BNSZ($a2)
+ sltu $t8,$t2,$ta2
+ $SUBU $ta2,$t2,$ta2
+ $SUBU $t2,$ta2,$v0
+ sgtu $v0,$t2,$ta2
+ $ST $t2,2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+.L_bn_sub_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_sub_words_internal
+
+.align 5
+.globl bn_div_3_words
+.ent bn_div_3_words
+bn_div_3_words:
+ .set noreorder
+ move $a3,$a0 # we know that bn_div_words does not
+ # touch $a3, $ta2, $ta3 and preserves $a2
+ # so that we can save two arguments
+ # and return address in registers
+ # instead of stack:-)
+
+ $LD $a0,($a3)
+ move $ta2,$a1
+ bne $a0,$a2,bn_div_3_words_internal
+ $LD $a1,-$BNSZ($a3)
+ li $v0,-1
+ jr $ra
+ move $a0,$v0
+.end bn_div_3_words
+
+.align 5
+.ent bn_div_3_words_internal
+bn_div_3_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ move $ta3,$ra
+ bal bn_div_words
+ move $ra,$ta3
+ $MULTU $ta2,$v0
+ $LD $t2,-2*$BNSZ($a3)
+ move $ta0,$zero
+ mfhi $t1
+ mflo $t0
+ sltu $t8,$t1,$a1
+.L_bn_div_3_words_inner_loop:
+ bnez $t8,.L_bn_div_3_words_inner_loop_done
+ sgeu $at,$t2,$t0
+ seq $t9,$t1,$a1
+ and $at,$t9
+ sltu $t3,$t0,$ta2
+ $ADDU $a1,$a2
+ $SUBU $t1,$t3
+ $SUBU $t0,$ta2
+ sltu $t8,$t1,$a1
+ sltu $ta0,$a1,$a2
+ or $t8,$ta0
+ .set noreorder
+ beqzl $at,.L_bn_div_3_words_inner_loop
+ $SUBU $v0,1
+ .set reorder
+.L_bn_div_3_words_inner_loop_done:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_div_3_words_internal
+
+.align 5
+.globl bn_div_words
+.ent bn_div_words
+bn_div_words:
+ .set noreorder
+ bnez $a2,bn_div_words_internal
+ li $v0,-1 # I would rather signal div-by-zero
+ # which can be done with 'break 7'
+ jr $ra
+ move $a0,$v0
+.end bn_div_words
+
+.align 5
+.ent bn_div_words_internal
+bn_div_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ move $v1,$zero
+ bltz $a2,.L_bn_div_words_body
+ move $t9,$v1
+ $SLL $a2,1
+ bgtz $a2,.-4
+ addu $t9,1
+
+ .set reorder
+ negu $t1,$t9
+ li $t2,-1
+ $SLL $t2,$t1
+ and $t2,$a0
+ $SRL $at,$a1,$t1
+ .set noreorder
+ bnezl $t2,.+8
+ break 6 # signal overflow
+ .set reorder
+ $SLL $a0,$t9
+ $SLL $a1,$t9
+ or $a0,$at
+___
+$QT=$ta0;
+$HH=$ta1;
+$DH=$v1;
+$code.=<<___;
+.L_bn_div_words_body:
+ $SRL $DH,$a2,4*$BNSZ # bits
+ sgeu $at,$a0,$a2
+ .set noreorder
+ bnezl $at,.+8
+ $SUBU $a0,$a2
+ .set reorder
+
+ li $QT,-1
+ $SRL $HH,$a0,4*$BNSZ # bits
+ $SRL $QT,4*$BNSZ # q=0xffffffff
+ beq $DH,$HH,.L_bn_div_words_skip_div1
+ $DIVU $zero,$a0,$DH
+ mflo $QT
+.L_bn_div_words_skip_div1:
+ $MULTU $a2,$QT
+ $SLL $t3,$a0,4*$BNSZ # bits
+ $SRL $at,$a1,4*$BNSZ # bits
+ or $t3,$at
+ mflo $t0
+ mfhi $t1
+.L_bn_div_words_inner_loop1:
+ sltu $t2,$t3,$t0
+ seq $t8,$HH,$t1
+ sltu $at,$HH,$t1
+ and $t2,$t8
+ sltu $v0,$t0,$a2
+ or $at,$t2
+ .set noreorder
+ beqz $at,.L_bn_div_words_inner_loop1_done
+ $SUBU $t1,$v0
+ $SUBU $t0,$a2
+ b .L_bn_div_words_inner_loop1
+ $SUBU $QT,1
+ .set reorder
+.L_bn_div_words_inner_loop1_done:
+
+ $SLL $a1,4*$BNSZ # bits
+ $SUBU $a0,$t3,$t0
+ $SLL $v0,$QT,4*$BNSZ # bits
+
+ li $QT,-1
+ $SRL $HH,$a0,4*$BNSZ # bits
+ $SRL $QT,4*$BNSZ # q=0xffffffff
+ beq $DH,$HH,.L_bn_div_words_skip_div2
+ $DIVU $zero,$a0,$DH
+ mflo $QT
+.L_bn_div_words_skip_div2:
+ $MULTU $a2,$QT
+ $SLL $t3,$a0,4*$BNSZ # bits
+ $SRL $at,$a1,4*$BNSZ # bits
+ or $t3,$at
+ mflo $t0
+ mfhi $t1
+.L_bn_div_words_inner_loop2:
+ sltu $t2,$t3,$t0
+ seq $t8,$HH,$t1
+ sltu $at,$HH,$t1
+ and $t2,$t8
+ sltu $v1,$t0,$a2
+ or $at,$t2
+ .set noreorder
+ beqz $at,.L_bn_div_words_inner_loop2_done
+ $SUBU $t1,$v1
+ $SUBU $t0,$a2
+ b .L_bn_div_words_inner_loop2
+ $SUBU $QT,1
+ .set reorder
+.L_bn_div_words_inner_loop2_done:
+
+ $SUBU $a0,$t3,$t0
+ or $v0,$QT
+ $SRL $v1,$a0,$t9 # $v1 contains remainder if anybody wants it
+ $SRL $a2,$t9 # restore $a2
+
+ .set noreorder
+ move $a1,$v1
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_div_words_internal
+___
+undef $HH; undef $QT; undef $DH;
+
+($a_0,$a_1,$a_2,$a_3)=($t0,$t1,$t2,$t3);
+($b_0,$b_1,$b_2,$b_3)=($ta0,$ta1,$ta2,$ta3);
+
+($a_4,$a_5,$a_6,$a_7)=($s0,$s2,$s4,$a1); # once we load a[7], no use for $a1
+($b_4,$b_5,$b_6,$b_7)=($s1,$s3,$s5,$a2); # once we load b[7], no use for $a2
+
+($t_1,$t_2,$c_1,$c_2,$c_3)=($t8,$t9,$v0,$v1,$a3);
+
+$code.=<<___;
+
+.align 5
+.globl bn_mul_comba8
+.ent bn_mul_comba8
+bn_mul_comba8:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,12*$SZREG,$ra
+ .mask 0x803ff008,-$SZREG
+ $PTR_SUB $sp,12*$SZREG
+ $REG_S $ra,11*$SZREG($sp)
+ $REG_S $s5,10*$SZREG($sp)
+ $REG_S $s4,9*$SZREG($sp)
+ $REG_S $s3,8*$SZREG($sp)
+ $REG_S $s2,7*$SZREG($sp)
+ $REG_S $s1,6*$SZREG($sp)
+ $REG_S $s0,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x003f0000,-$SZREG
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $s5,5*$SZREG($sp)
+ $REG_S $s4,4*$SZREG($sp)
+ $REG_S $s3,3*$SZREG($sp)
+ $REG_S $s2,2*$SZREG($sp)
+ $REG_S $s1,1*$SZREG($sp)
+ $REG_S $s0,0*$SZREG($sp)
+___
+$code.=<<___;
+
+ .set reorder
+ $LD $a_0,0($a1) # If compiled with -mips3 option on
+ # R5000 box assembler barks on this
+ # 1ine with "should not have mult/div
+ # as last instruction in bb (R10K
+ # bug)" warning. If anybody out there
+ # has a clue about how to circumvent
+ # this do send me a note.
+ # <appro\@fy.chalmers.se>
+
+ $LD $b_0,0($a2)
+ $LD $a_1,$BNSZ($a1)
+ $LD $a_2,2*$BNSZ($a1)
+ $MULTU $a_0,$b_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_3,3*$BNSZ($a1)
+ $LD $b_1,$BNSZ($a2)
+ $LD $b_2,2*$BNSZ($a2)
+ $LD $b_3,3*$BNSZ($a2)
+ mflo $c_1
+ mfhi $c_2
+
+ $LD $a_4,4*$BNSZ($a1)
+ $LD $a_5,5*$BNSZ($a1)
+ $MULTU $a_0,$b_1 # mul_add_c(a[0],b[1],c2,c3,c1);
+ $LD $a_6,6*$BNSZ($a1)
+ $LD $a_7,7*$BNSZ($a1)
+ $LD $b_4,4*$BNSZ($a2)
+ $LD $b_5,5*$BNSZ($a2)
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_0 # mul_add_c(a[1],b[0],c2,c3,c1);
+ $ADDU $c_3,$t_2,$at
+ $LD $b_6,6*$BNSZ($a2)
+ $LD $b_7,7*$BNSZ($a2)
+ $ST $c_1,0($a0) # r[0]=c1;
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_0 # mul_add_c(a[2],b[0],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ $ST $c_2,$BNSZ($a0) # r[1]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_1 # mul_add_c(a[1],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_2 # mul_add_c(a[0],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_3 # mul_add_c(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0) # r[2]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_1,$b_2 # mul_add_c(a[1],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_2,$b_1 # mul_add_c(a[2],b[1],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_0 # mul_add_c(a[3],b[0],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_4,$b_0 # mul_add_c(a[4],b[0],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,3*$BNSZ($a0) # r[3]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_3,$b_1 # mul_add_c(a[3],b[1],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_2 # mul_add_c(a[2],b[2],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_3 # mul_add_c(a[1],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_0,$b_4 # mul_add_c(a[0],b[4],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_0,$b_5 # mul_add_c(a[0],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0) # r[4]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_4 # mul_add_c(a[1],b[4],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$b_3 # mul_add_c(a[2],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_2 # mul_add_c(a[3],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_4,$b_1 # mul_add_c(a[4],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_5,$b_0 # mul_add_c(a[5],b[0],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_6,$b_0 # mul_add_c(a[6],b[0],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,5*$BNSZ($a0) # r[5]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_5,$b_1 # mul_add_c(a[5],b[1],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_4,$b_2 # mul_add_c(a[4],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_3 # mul_add_c(a[3],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_2,$b_4 # mul_add_c(a[2],b[4],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_1,$b_5 # mul_add_c(a[1],b[5],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_0,$b_6 # mul_add_c(a[0],b[6],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_0,$b_7 # mul_add_c(a[0],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,6*$BNSZ($a0) # r[6]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_6 # mul_add_c(a[1],b[6],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_5 # mul_add_c(a[2],b[5],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_3,$b_4 # mul_add_c(a[3],b[4],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$b_3 # mul_add_c(a[4],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_5,$b_2 # mul_add_c(a[5],b[2],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_6,$b_1 # mul_add_c(a[6],b[1],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_0 # mul_add_c(a[7],b[0],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_1 # mul_add_c(a[7],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,7*$BNSZ($a0) # r[7]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_6,$b_2 # mul_add_c(a[6],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_5,$b_3 # mul_add_c(a[5],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_4,$b_4 # mul_add_c(a[4],b[4],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_5 # mul_add_c(a[3],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$b_6 # mul_add_c(a[2],b[6],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_7 # mul_add_c(a[1],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$b_7 # mul_add_c(a[2],b[7],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,8*$BNSZ($a0) # r[8]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_6 # mul_add_c(a[3],b[6],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_4,$b_5 # mul_add_c(a[4],b[5],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_5,$b_4 # mul_add_c(a[5],b[4],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$b_3 # mul_add_c(a[6],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_7,$b_2 # mul_add_c(a[7],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_7,$b_3 # mul_add_c(a[7],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,9*$BNSZ($a0) # r[9]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_6,$b_4 # mul_add_c(a[6],b[4],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_5,$b_5 # mul_add_c(a[5],b[5],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$b_6 # mul_add_c(a[4],b[6],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_3,$b_7 # mul_add_c(a[3],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$b_7 # mul_add_c(a[4],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,10*$BNSZ($a0) # r[10]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_5,$b_6 # mul_add_c(a[5],b[6],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_6,$b_5 # mul_add_c(a[6],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_7,$b_4 # mul_add_c(a[7],b[4],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_7,$b_5 # mul_add_c(a[7],b[5],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,11*$BNSZ($a0) # r[11]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$b_6 # mul_add_c(a[6],b[6],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_5,$b_7 # mul_add_c(a[5],b[7],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$b_7 # mul_add_c(a[6],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,12*$BNSZ($a0) # r[12]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_6 # mul_add_c(a[7],b[6],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_7 # mul_add_c(a[7],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,13*$BNSZ($a0) # r[13]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ $ST $c_3,14*$BNSZ($a0) # r[14]=c3;
+ $ST $c_1,15*$BNSZ($a0) # r[15]=c1;
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $s5,10*$SZREG($sp)
+ $REG_L $s4,9*$SZREG($sp)
+ $REG_L $s3,8*$SZREG($sp)
+ $REG_L $s2,7*$SZREG($sp)
+ $REG_L $s1,6*$SZREG($sp)
+ $REG_L $s0,5*$SZREG($sp)
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ jr $ra
+ $PTR_ADD $sp,12*$SZREG
+___
+$code.=<<___ if ($flavour !~ /nubi/i);
+ $REG_L $s5,5*$SZREG($sp)
+ $REG_L $s4,4*$SZREG($sp)
+ $REG_L $s3,3*$SZREG($sp)
+ $REG_L $s2,2*$SZREG($sp)
+ $REG_L $s1,1*$SZREG($sp)
+ $REG_L $s0,0*$SZREG($sp)
+ jr $ra
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+.end bn_mul_comba8
+
+.align 5
+.globl bn_mul_comba4
+.ent bn_mul_comba4
+bn_mul_comba4:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ $LD $a_0,0($a1)
+ $LD $b_0,0($a2)
+ $LD $a_1,$BNSZ($a1)
+ $LD $a_2,2*$BNSZ($a1)
+ $MULTU $a_0,$b_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_3,3*$BNSZ($a1)
+ $LD $b_1,$BNSZ($a2)
+ $LD $b_2,2*$BNSZ($a2)
+ $LD $b_3,3*$BNSZ($a2)
+ mflo $c_1
+ mfhi $c_2
+ $ST $c_1,0($a0)
+
+ $MULTU $a_0,$b_1 # mul_add_c(a[0],b[1],c2,c3,c1);
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_0 # mul_add_c(a[1],b[0],c2,c3,c1);
+ $ADDU $c_3,$t_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_0 # mul_add_c(a[2],b[0],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ $ST $c_2,$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_1 # mul_add_c(a[1],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_2 # mul_add_c(a[0],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_3 # mul_add_c(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_1,$b_2 # mul_add_c(a[1],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_2,$b_1 # mul_add_c(a[2],b[1],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_0 # mul_add_c(a[3],b[0],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_1 # mul_add_c(a[3],b[1],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,3*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_2 # mul_add_c(a[2],b[2],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_3 # mul_add_c(a[1],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_3 # mul_add_c(a[2],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_2 # mul_add_c(a[3],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_3 # mul_add_c(a[3],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,5*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ $ST $c_1,6*$BNSZ($a0)
+ $ST $c_2,7*$BNSZ($a0)
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ nop
+.end bn_mul_comba4
+___
+
+($a_4,$a_5,$a_6,$a_7)=($b_0,$b_1,$b_2,$b_3);
+
+$code.=<<___;
+
+.align 5
+.globl bn_sqr_comba8
+.ent bn_sqr_comba8
+bn_sqr_comba8:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ $LD $a_0,0($a1)
+ $LD $a_1,$BNSZ($a1)
+ $LD $a_2,2*$BNSZ($a1)
+ $LD $a_3,3*$BNSZ($a1)
+
+ $MULTU $a_0,$a_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_4,4*$BNSZ($a1)
+ $LD $a_5,5*$BNSZ($a1)
+ $LD $a_6,6*$BNSZ($a1)
+ $LD $a_7,7*$BNSZ($a1)
+ mflo $c_1
+ mfhi $c_2
+ $ST $c_1,0($a0)
+
+ $MULTU $a_0,$a_1 # mul_add_c2(a[0],b[1],c2,c3,c1);
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_2,$a_0 # mul_add_c2(a[2],b[0],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $c_3,$t_2,$at
+ $ST $c_2,$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_1 # mul_add_c(a[1],b[1],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$a_3 # mul_add_c2(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_2 # mul_add_c2(a[1],b[2],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_4,$a_0 # mul_add_c2(a[4],b[0],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,3*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_3,$a_1 # mul_add_c2(a[3],b[1],c2,c3,c1);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_2,$a_2 # mul_add_c(a[2],b[2],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_0,$a_5 # mul_add_c2(a[0],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_4 # mul_add_c2(a[1],b[4],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_2,$at
+ $MULTU $a_2,$a_3 # mul_add_c2(a[2],b[3],c3,c1,c2);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $MULTU $a_6,$a_0 # mul_add_c2(a[6],b[0],c1,c2,c3);
+ $ADDU $c_2,$at
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,5*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_5,$a_1 # mul_add_c2(a[5],b[1],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_4,$a_2 # mul_add_c2(a[4],b[2],c1,c2,c3);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_3,$a_3 # mul_add_c(a[3],b[3],c1,c2,c3);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_0,$a_7 # mul_add_c2(a[0],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,6*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_6 # mul_add_c2(a[1],b[6],c2,c3,c1);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_2,$a_5 # mul_add_c2(a[2],b[5],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_3,$a_4 # mul_add_c2(a[3],b[4],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_7,$a_1 # mul_add_c2(a[7],b[1],c3,c1,c2);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,7*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_6,$a_2 # mul_add_c2(a[6],b[2],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_2,$at
+ $MULTU $a_5,$a_3 # mul_add_c2(a[5],b[3],c3,c1,c2);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_2,$at
+ $MULTU $a_4,$a_4 # mul_add_c(a[4],b[4],c3,c1,c2);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$a_7 # mul_add_c2(a[2],b[7],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,8*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_3,$a_6 # mul_add_c2(a[3],b[6],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_4,$a_5 # mul_add_c2(a[4],b[5],c1,c2,c3);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_7,$a_3 # mul_add_c2(a[7],b[3],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,9*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_6,$a_4 # mul_add_c2(a[6],b[4],c2,c3,c1);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_5,$a_5 # mul_add_c(a[5],b[5],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$a_7 # mul_add_c2(a[4],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,10*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_5,$a_6 # mul_add_c2(a[5],b[6],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_2,$at
+ $MULTU $a_7,$a_5 # mul_add_c2(a[7],b[5],c1,c2,c3);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,11*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_6,$a_6 # mul_add_c(a[6],b[6],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$a_7 # mul_add_c2(a[6],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,12*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_7,$a_7 # mul_add_c(a[7],b[7],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,13*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ $ST $c_3,14*$BNSZ($a0)
+ $ST $c_1,15*$BNSZ($a0)
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ nop
+.end bn_sqr_comba8
+
+.align 5
+.globl bn_sqr_comba4
+.ent bn_sqr_comba4
+bn_sqr_comba4:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ $LD $a_0,0($a1)
+ $LD $a_1,$BNSZ($a1)
+ $MULTU $a_0,$a_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_2,2*$BNSZ($a1)
+ $LD $a_3,3*$BNSZ($a1)
+ mflo $c_1
+ mfhi $c_2
+ $ST $c_1,0($a0)
+
+ $MULTU $a_0,$a_1 # mul_add_c2(a[0],b[1],c2,c3,c1);
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_2,$a_0 # mul_add_c2(a[2],b[0],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $c_3,$t_2,$at
+ $ST $c_2,$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_1 # mul_add_c(a[1],b[1],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$a_3 # mul_add_c2(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_2 # mul_add_c(a2[1],b[2],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_3,$a_1 # mul_add_c2(a[3],b[1],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,3*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_2,$a_2 # mul_add_c(a[2],b[2],c2,c3,c1);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$a_3 # mul_add_c2(a[2],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_3,$a_3 # mul_add_c(a[3],b[3],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,5*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ $ST $c_1,6*$BNSZ($a0)
+ $ST $c_2,7*$BNSZ($a0)
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ nop
+.end bn_sqr_comba4
+___
+print $code;
+close STDOUT;
diff --git a/crypto/bn/asm/mips3-mont.pl b/crypto/bn/asm/mips3-mont.pl
new file mode 100755
index 000000000000..8f9156e02af3
--- /dev/null
+++ b/crypto/bn/asm/mips3-mont.pl
@@ -0,0 +1,327 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# This module doesn't present direct interest for OpenSSL, because it
+# doesn't provide better performance for longer keys. While 512-bit
+# RSA private key operations are 40% faster, 1024-bit ones are hardly
+# faster at all, while longer key operations are slower by up to 20%.
+# It might be of interest to embedded system developers though, as
+# it's smaller than 1KB, yet offers ~3x improvement over compiler
+# generated code.
+#
+# The module targets N32 and N64 MIPS ABIs and currently is a bit
+# IRIX-centric, i.e. is likely to require adaptation for other OSes.
+
+# int bn_mul_mont(
+$rp="a0"; # BN_ULONG *rp,
+$ap="a1"; # const BN_ULONG *ap,
+$bp="a2"; # const BN_ULONG *bp,
+$np="a3"; # const BN_ULONG *np,
+$n0="a4"; # const BN_ULONG *n0,
+$num="a5"; # int num);
+
+$lo0="a6";
+$hi0="a7";
+$lo1="v0";
+$hi1="v1";
+$aj="t0";
+$bi="t1";
+$nj="t2";
+$tp="t3";
+$alo="s0";
+$ahi="s1";
+$nlo="s2";
+$nhi="s3";
+$tj="s4";
+$i="s5";
+$j="s6";
+$fp="t8";
+$m1="t9";
+
+$FRAME=8*(2+8);
+
+$code=<<___;
+#include <asm.h>
+#include <regdef.h>
+
+.text
+
+.set noat
+.set reorder
+
+.align 5
+.globl bn_mul_mont
+.ent bn_mul_mont
+bn_mul_mont:
+ .set noreorder
+ PTR_SUB sp,64
+ move $fp,sp
+ .frame $fp,64,ra
+ slt AT,$num,4
+ li v0,0
+ beqzl AT,.Lproceed
+ nop
+ jr ra
+ PTR_ADD sp,$fp,64
+ .set reorder
+.align 5
+.Lproceed:
+ ld $n0,0($n0)
+ ld $bi,0($bp) # bp[0]
+ ld $aj,0($ap) # ap[0]
+ ld $nj,0($np) # np[0]
+ PTR_SUB sp,16 # place for two extra words
+ sll $num,3
+ li AT,-4096
+ PTR_SUB sp,$num
+ and sp,AT
+
+ sd s0,0($fp)
+ sd s1,8($fp)
+ sd s2,16($fp)
+ sd s3,24($fp)
+ sd s4,32($fp)
+ sd s5,40($fp)
+ sd s6,48($fp)
+ sd s7,56($fp)
+
+ dmultu $aj,$bi
+ ld $alo,8($ap)
+ ld $nlo,8($np)
+ mflo $lo0
+ mfhi $hi0
+ dmultu $lo0,$n0
+ mflo $m1
+
+ dmultu $alo,$bi
+ mflo $alo
+ mfhi $ahi
+
+ dmultu $nj,$m1
+ mflo $lo1
+ mfhi $hi1
+ dmultu $nlo,$m1
+ daddu $lo1,$lo0
+ sltu AT,$lo1,$lo0
+ daddu $hi1,AT
+ mflo $nlo
+ mfhi $nhi
+
+ move $tp,sp
+ li $j,16
+.align 4
+.L1st:
+ .set noreorder
+ PTR_ADD $aj,$ap,$j
+ ld $aj,($aj)
+ PTR_ADD $nj,$np,$j
+ ld $nj,($nj)
+
+ dmultu $aj,$bi
+ daddu $lo0,$alo,$hi0
+ daddu $lo1,$nlo,$hi1
+ sltu AT,$lo0,$hi0
+ sltu s7,$lo1,$hi1
+ daddu $hi0,$ahi,AT
+ daddu $hi1,$nhi,s7
+ mflo $alo
+ mfhi $ahi
+
+ daddu $lo1,$lo0
+ sltu AT,$lo1,$lo0
+ dmultu $nj,$m1
+ daddu $hi1,AT
+ addu $j,8
+ sd $lo1,($tp)
+ sltu s7,$j,$num
+ mflo $nlo
+ mfhi $nhi
+
+ bnez s7,.L1st
+ PTR_ADD $tp,8
+ .set reorder
+
+ daddu $lo0,$alo,$hi0
+ sltu AT,$lo0,$hi0
+ daddu $hi0,$ahi,AT
+
+ daddu $lo1,$nlo,$hi1
+ sltu s7,$lo1,$hi1
+ daddu $hi1,$nhi,s7
+ daddu $lo1,$lo0
+ sltu AT,$lo1,$lo0
+ daddu $hi1,AT
+
+ sd $lo1,($tp)
+
+ daddu $hi1,$hi0
+ sltu AT,$hi1,$hi0
+ sd $hi1,8($tp)
+ sd AT,16($tp)
+
+ li $i,8
+.align 4
+.Louter:
+ PTR_ADD $bi,$bp,$i
+ ld $bi,($bi)
+ ld $aj,($ap)
+ ld $alo,8($ap)
+ ld $tj,(sp)
+
+ dmultu $aj,$bi
+ ld $nj,($np)
+ ld $nlo,8($np)
+ mflo $lo0
+ mfhi $hi0
+ daddu $lo0,$tj
+ dmultu $lo0,$n0
+ sltu AT,$lo0,$tj
+ daddu $hi0,AT
+ mflo $m1
+
+ dmultu $alo,$bi
+ mflo $alo
+ mfhi $ahi
+
+ dmultu $nj,$m1
+ mflo $lo1
+ mfhi $hi1
+
+ dmultu $nlo,$m1
+ daddu $lo1,$lo0
+ sltu AT,$lo1,$lo0
+ daddu $hi1,AT
+ mflo $nlo
+ mfhi $nhi
+
+ move $tp,sp
+ li $j,16
+ ld $tj,8($tp)
+.align 4
+.Linner:
+ .set noreorder
+ PTR_ADD $aj,$ap,$j
+ ld $aj,($aj)
+ PTR_ADD $nj,$np,$j
+ ld $nj,($nj)
+
+ dmultu $aj,$bi
+ daddu $lo0,$alo,$hi0
+ daddu $lo1,$nlo,$hi1
+ sltu AT,$lo0,$hi0
+ sltu s7,$lo1,$hi1
+ daddu $hi0,$ahi,AT
+ daddu $hi1,$nhi,s7
+ mflo $alo
+ mfhi $ahi
+
+ daddu $lo0,$tj
+ addu $j,8
+ dmultu $nj,$m1
+ sltu AT,$lo0,$tj
+ daddu $lo1,$lo0
+ daddu $hi0,AT
+ sltu s7,$lo1,$lo0
+ ld $tj,16($tp)
+ daddu $hi1,s7
+ sltu AT,$j,$num
+ mflo $nlo
+ mfhi $nhi
+ sd $lo1,($tp)
+ bnez AT,.Linner
+ PTR_ADD $tp,8
+ .set reorder
+
+ daddu $lo0,$alo,$hi0
+ sltu AT,$lo0,$hi0
+ daddu $hi0,$ahi,AT
+ daddu $lo0,$tj
+ sltu s7,$lo0,$tj
+ daddu $hi0,s7
+
+ ld $tj,16($tp)
+ daddu $lo1,$nlo,$hi1
+ sltu AT,$lo1,$hi1
+ daddu $hi1,$nhi,AT
+ daddu $lo1,$lo0
+ sltu s7,$lo1,$lo0
+ daddu $hi1,s7
+ sd $lo1,($tp)
+
+ daddu $lo1,$hi1,$hi0
+ sltu $hi1,$lo1,$hi0
+ daddu $lo1,$tj
+ sltu AT,$lo1,$tj
+ daddu $hi1,AT
+ sd $lo1,8($tp)
+ sd $hi1,16($tp)
+
+ addu $i,8
+ sltu s7,$i,$num
+ bnez s7,.Louter
+
+ .set noreorder
+ PTR_ADD $tj,sp,$num # &tp[num]
+ move $tp,sp
+ move $ap,sp
+ li $hi0,0 # clear borrow bit
+
+.align 4
+.Lsub: ld $lo0,($tp)
+ ld $lo1,($np)
+ PTR_ADD $tp,8
+ PTR_ADD $np,8
+ dsubu $lo1,$lo0,$lo1 # tp[i]-np[i]
+ sgtu AT,$lo1,$lo0
+ dsubu $lo0,$lo1,$hi0
+ sgtu $hi0,$lo0,$lo1
+ sd $lo0,($rp)
+ or $hi0,AT
+ sltu AT,$tp,$tj
+ bnez AT,.Lsub
+ PTR_ADD $rp,8
+
+ dsubu $hi0,$hi1,$hi0 # handle upmost overflow bit
+ move $tp,sp
+ PTR_SUB $rp,$num # restore rp
+ not $hi1,$hi0
+
+ and $ap,$hi0,sp
+ and $bp,$hi1,$rp
+ or $ap,$ap,$bp # ap=borrow?tp:rp
+
+.align 4
+.Lcopy: ld $aj,($ap)
+ PTR_ADD $ap,8
+ PTR_ADD $tp,8
+ sd zero,-8($tp)
+ sltu AT,$tp,$tj
+ sd $aj,($rp)
+ bnez AT,.Lcopy
+ PTR_ADD $rp,8
+
+ ld s0,0($fp)
+ ld s1,8($fp)
+ ld s2,16($fp)
+ ld s3,24($fp)
+ ld s4,32($fp)
+ ld s5,40($fp)
+ ld s6,48($fp)
+ ld s7,56($fp)
+ li v0,1
+ jr ra
+ PTR_ADD sp,$fp,64
+ .set reorder
+END(bn_mul_mont)
+.rdata
+.asciiz "Montgomery Multiplication for MIPS III/IV, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+print $code;
+close STDOUT;
diff --git a/crypto/bn/asm/modexp512-x86_64.pl b/crypto/bn/asm/modexp512-x86_64.pl
new file mode 100755
index 000000000000..54aeb01921e3
--- /dev/null
+++ b/crypto/bn/asm/modexp512-x86_64.pl
@@ -0,0 +1,1496 @@
+#!/usr/bin/env perl
+#
+# Copyright (c) 2010-2011 Intel Corp.
+# Author: Vinodh.Gopal@intel.com
+# Jim Guilford
+# Erdinc.Ozturk@intel.com
+# Maxim.Perminov@intel.com
+#
+# More information about algorithm used can be found at:
+# http://www.cse.buffalo.edu/srds2009/escs2009_submission_Gopal.pdf
+#
+# ====================================================================
+# Copyright (c) 2011 The OpenSSL Project. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# 3. All advertising materials mentioning features or use of this
+# software must display the following acknowledgment:
+# "This product includes software developed by the OpenSSL Project
+# for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
+#
+# 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+# endorse or promote products derived from this software without
+# prior written permission. For written permission, please contact
+# licensing@OpenSSL.org.
+#
+# 5. Products derived from this software may not be called "OpenSSL"
+# nor may "OpenSSL" appear in their names without prior written
+# permission of the OpenSSL Project.
+#
+# 6. Redistributions of any form whatsoever must retain the following
+# acknowledgment:
+# "This product includes software developed by the OpenSSL Project
+# for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
+#
+# THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+# OF THE POSSIBILITY OF SUCH DAMAGE.
+# ====================================================================
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+use strict;
+my $code=".text\n\n";
+my $m=0;
+
+#
+# Define x512 macros
+#
+
+#MULSTEP_512_ADD MACRO x7, x6, x5, x4, x3, x2, x1, x0, dst, src1, src2, add_src, tmp1, tmp2
+#
+# uses rax, rdx, and args
+sub MULSTEP_512_ADD
+{
+ my ($x, $DST, $SRC2, $ASRC, $OP, $TMP)=@_;
+ my @X=@$x; # make a copy
+$code.=<<___;
+ mov (+8*0)($SRC2), %rax
+ mul $OP # rdx:rax = %OP * [0]
+ mov ($ASRC), $X[0]
+ add %rax, $X[0]
+ adc \$0, %rdx
+ mov $X[0], $DST
+___
+for(my $i=1;$i<8;$i++) {
+$code.=<<___;
+ mov %rdx, $TMP
+
+ mov (+8*$i)($SRC2), %rax
+ mul $OP # rdx:rax = %OP * [$i]
+ mov (+8*$i)($ASRC), $X[$i]
+ add %rax, $X[$i]
+ adc \$0, %rdx
+ add $TMP, $X[$i]
+ adc \$0, %rdx
+___
+}
+$code.=<<___;
+ mov %rdx, $X[0]
+___
+}
+
+#MULSTEP_512 MACRO x7, x6, x5, x4, x3, x2, x1, x0, dst, src2, src1_val, tmp
+#
+# uses rax, rdx, and args
+sub MULSTEP_512
+{
+ my ($x, $DST, $SRC2, $OP, $TMP)=@_;
+ my @X=@$x; # make a copy
+$code.=<<___;
+ mov (+8*0)($SRC2), %rax
+ mul $OP # rdx:rax = %OP * [0]
+ add %rax, $X[0]
+ adc \$0, %rdx
+ mov $X[0], $DST
+___
+for(my $i=1;$i<8;$i++) {
+$code.=<<___;
+ mov %rdx, $TMP
+
+ mov (+8*$i)($SRC2), %rax
+ mul $OP # rdx:rax = %OP * [$i]
+ add %rax, $X[$i]
+ adc \$0, %rdx
+ add $TMP, $X[$i]
+ adc \$0, %rdx
+___
+}
+$code.=<<___;
+ mov %rdx, $X[0]
+___
+}
+
+#
+# Swizzle Macros
+#
+
+# macro to copy data from flat space to swizzled table
+#MACRO swizzle pDst, pSrc, tmp1, tmp2
+# pDst and pSrc are modified
+sub swizzle
+{
+ my ($pDst, $pSrc, $cnt, $d0)=@_;
+$code.=<<___;
+ mov \$8, $cnt
+loop_$m:
+ mov ($pSrc), $d0
+ mov $d0#w, ($pDst)
+ shr \$16, $d0
+ mov $d0#w, (+64*1)($pDst)
+ shr \$16, $d0
+ mov $d0#w, (+64*2)($pDst)
+ shr \$16, $d0
+ mov $d0#w, (+64*3)($pDst)
+ lea 8($pSrc), $pSrc
+ lea 64*4($pDst), $pDst
+ dec $cnt
+ jnz loop_$m
+___
+
+ $m++;
+}
+
+# macro to copy data from swizzled table to flat space
+#MACRO unswizzle pDst, pSrc, tmp*3
+sub unswizzle
+{
+ my ($pDst, $pSrc, $cnt, $d0, $d1)=@_;
+$code.=<<___;
+ mov \$4, $cnt
+loop_$m:
+ movzxw (+64*3+256*0)($pSrc), $d0
+ movzxw (+64*3+256*1)($pSrc), $d1
+ shl \$16, $d0
+ shl \$16, $d1
+ mov (+64*2+256*0)($pSrc), $d0#w
+ mov (+64*2+256*1)($pSrc), $d1#w
+ shl \$16, $d0
+ shl \$16, $d1
+ mov (+64*1+256*0)($pSrc), $d0#w
+ mov (+64*1+256*1)($pSrc), $d1#w
+ shl \$16, $d0
+ shl \$16, $d1
+ mov (+64*0+256*0)($pSrc), $d0#w
+ mov (+64*0+256*1)($pSrc), $d1#w
+ mov $d0, (+8*0)($pDst)
+ mov $d1, (+8*1)($pDst)
+ lea 256*2($pSrc), $pSrc
+ lea 8*2($pDst), $pDst
+ sub \$1, $cnt
+ jnz loop_$m
+___
+
+ $m++;
+}
+
+#
+# Data Structures
+#
+
+# Reduce Data
+#
+#
+# Offset Value
+# 0C0 Carries
+# 0B8 X2[10]
+# 0B0 X2[9]
+# 0A8 X2[8]
+# 0A0 X2[7]
+# 098 X2[6]
+# 090 X2[5]
+# 088 X2[4]
+# 080 X2[3]
+# 078 X2[2]
+# 070 X2[1]
+# 068 X2[0]
+# 060 X1[12] P[10]
+# 058 X1[11] P[9] Z[8]
+# 050 X1[10] P[8] Z[7]
+# 048 X1[9] P[7] Z[6]
+# 040 X1[8] P[6] Z[5]
+# 038 X1[7] P[5] Z[4]
+# 030 X1[6] P[4] Z[3]
+# 028 X1[5] P[3] Z[2]
+# 020 X1[4] P[2] Z[1]
+# 018 X1[3] P[1] Z[0]
+# 010 X1[2] P[0] Y[2]
+# 008 X1[1] Q[1] Y[1]
+# 000 X1[0] Q[0] Y[0]
+
+my $X1_offset = 0; # 13 qwords
+my $X2_offset = $X1_offset + 13*8; # 11 qwords
+my $Carries_offset = $X2_offset + 11*8; # 1 qword
+my $Q_offset = 0; # 2 qwords
+my $P_offset = $Q_offset + 2*8; # 11 qwords
+my $Y_offset = 0; # 3 qwords
+my $Z_offset = $Y_offset + 3*8; # 9 qwords
+
+my $Red_Data_Size = $Carries_offset + 1*8; # (25 qwords)
+
+#
+# Stack Frame
+#
+#
+# offset value
+# ... <old stack contents>
+# ...
+# 280 Garray
+
+# 278 tmp16[15]
+# ... ...
+# 200 tmp16[0]
+
+# 1F8 tmp[7]
+# ... ...
+# 1C0 tmp[0]
+
+# 1B8 GT[7]
+# ... ...
+# 180 GT[0]
+
+# 178 Reduce Data
+# ... ...
+# 0B8 Reduce Data
+# 0B0 reserved
+# 0A8 reserved
+# 0A0 reserved
+# 098 reserved
+# 090 reserved
+# 088 reduce result addr
+# 080 exp[8]
+
+# ...
+# 048 exp[1]
+# 040 exp[0]
+
+# 038 reserved
+# 030 loop_idx
+# 028 pg
+# 020 i
+# 018 pData ; arg 4
+# 010 pG ; arg 2
+# 008 pResult ; arg 1
+# 000 rsp ; stack pointer before subtract
+
+my $rsp_offset = 0;
+my $pResult_offset = 8*1 + $rsp_offset;
+my $pG_offset = 8*1 + $pResult_offset;
+my $pData_offset = 8*1 + $pG_offset;
+my $i_offset = 8*1 + $pData_offset;
+my $pg_offset = 8*1 + $i_offset;
+my $loop_idx_offset = 8*1 + $pg_offset;
+my $reserved1_offset = 8*1 + $loop_idx_offset;
+my $exp_offset = 8*1 + $reserved1_offset;
+my $red_result_addr_offset= 8*9 + $exp_offset;
+my $reserved2_offset = 8*1 + $red_result_addr_offset;
+my $Reduce_Data_offset = 8*5 + $reserved2_offset;
+my $GT_offset = $Red_Data_Size + $Reduce_Data_offset;
+my $tmp_offset = 8*8 + $GT_offset;
+my $tmp16_offset = 8*8 + $tmp_offset;
+my $garray_offset = 8*16 + $tmp16_offset;
+my $mem_size = 8*8*32 + $garray_offset;
+
+#
+# Offsets within Reduce Data
+#
+#
+# struct MODF_2FOLD_MONT_512_C1_DATA {
+# UINT64 t[8][8];
+# UINT64 m[8];
+# UINT64 m1[8]; /* 2^768 % m */
+# UINT64 m2[8]; /* 2^640 % m */
+# UINT64 k1[2]; /* (- 1/m) % 2^128 */
+# };
+
+my $T = 0;
+my $M = 512; # = 8 * 8 * 8
+my $M1 = 576; # = 8 * 8 * 9 /* += 8 * 8 */
+my $M2 = 640; # = 8 * 8 * 10 /* += 8 * 8 */
+my $K1 = 704; # = 8 * 8 * 11 /* += 8 * 8 */
+
+#
+# FUNCTIONS
+#
+
+{{{
+#
+# MULADD_128x512 : Function to multiply 128-bits (2 qwords) by 512-bits (8 qwords)
+# and add 512-bits (8 qwords)
+# to get 640 bits (10 qwords)
+# Input: 128-bit mul source: [rdi+8*1], rbp
+# 512-bit mul source: [rsi+8*n]
+# 512-bit add source: r15, r14, ..., r9, r8
+# Output: r9, r8, r15, r14, r13, r12, r11, r10, [rcx+8*1], [rcx+8*0]
+# Clobbers all regs except: rcx, rsi, rdi
+$code.=<<___;
+.type MULADD_128x512,\@abi-omnipotent
+.align 16
+MULADD_128x512:
+___
+ &MULSTEP_512([map("%r$_",(8..15))], "(+8*0)(%rcx)", "%rsi", "%rbp", "%rbx");
+$code.=<<___;
+ mov (+8*1)(%rdi), %rbp
+___
+ &MULSTEP_512([map("%r$_",(9..15,8))], "(+8*1)(%rcx)", "%rsi", "%rbp", "%rbx");
+$code.=<<___;
+ ret
+.size MULADD_128x512,.-MULADD_128x512
+___
+}}}
+
+{{{
+#MULADD_256x512 MACRO pDst, pA, pB, OP, TMP, X7, X6, X5, X4, X3, X2, X1, X0
+#
+# Inputs: pDst: Destination (768 bits, 12 qwords)
+# pA: Multiplicand (1024 bits, 16 qwords)
+# pB: Multiplicand (512 bits, 8 qwords)
+# Dst = Ah * B + Al
+# where Ah is (in qwords) A[15:12] (256 bits) and Al is A[7:0] (512 bits)
+# Results in X3 X2 X1 X0 X7 X6 X5 X4 Dst[3:0]
+# Uses registers: arguments, RAX, RDX
+sub MULADD_256x512
+{
+ my ($pDst, $pA, $pB, $OP, $TMP, $X)=@_;
+$code.=<<___;
+ mov (+8*12)($pA), $OP
+___
+ &MULSTEP_512_ADD($X, "(+8*0)($pDst)", $pB, $pA, $OP, $TMP);
+ push(@$X,shift(@$X));
+
+$code.=<<___;
+ mov (+8*13)($pA), $OP
+___
+ &MULSTEP_512($X, "(+8*1)($pDst)", $pB, $OP, $TMP);
+ push(@$X,shift(@$X));
+
+$code.=<<___;
+ mov (+8*14)($pA), $OP
+___
+ &MULSTEP_512($X, "(+8*2)($pDst)", $pB, $OP, $TMP);
+ push(@$X,shift(@$X));
+
+$code.=<<___;
+ mov (+8*15)($pA), $OP
+___
+ &MULSTEP_512($X, "(+8*3)($pDst)", $pB, $OP, $TMP);
+ push(@$X,shift(@$X));
+}
+
+#
+# mont_reduce(UINT64 *x, /* 1024 bits, 16 qwords */
+# UINT64 *m, /* 512 bits, 8 qwords */
+# MODF_2FOLD_MONT_512_C1_DATA *data,
+# UINT64 *r) /* 512 bits, 8 qwords */
+# Input: x (number to be reduced): tmp16 (Implicit)
+# m (modulus): [pM] (Implicit)
+# data (reduce data): [pData] (Implicit)
+# Output: r (result): Address in [red_res_addr]
+# result also in: r9, r8, r15, r14, r13, r12, r11, r10
+
+my @X=map("%r$_",(8..15));
+
+$code.=<<___;
+.type mont_reduce,\@abi-omnipotent
+.align 16
+mont_reduce:
+___
+
+my $STACK_DEPTH = 8;
+ #
+ # X1 = Xh * M1 + Xl
+$code.=<<___;
+ lea (+$Reduce_Data_offset+$X1_offset+$STACK_DEPTH)(%rsp), %rdi # pX1 (Dst) 769 bits, 13 qwords
+ mov (+$pData_offset+$STACK_DEPTH)(%rsp), %rsi # pM1 (Bsrc) 512 bits, 8 qwords
+ add \$$M1, %rsi
+ lea (+$tmp16_offset+$STACK_DEPTH)(%rsp), %rcx # X (Asrc) 1024 bits, 16 qwords
+
+___
+
+ &MULADD_256x512("%rdi", "%rcx", "%rsi", "%rbp", "%rbx", \@X); # rotates @X 4 times
+ # results in r11, r10, r9, r8, r15, r14, r13, r12, X1[3:0]
+
+$code.=<<___;
+ xor %rax, %rax
+ # X1 += xl
+ add (+8*8)(%rcx), $X[4]
+ adc (+8*9)(%rcx), $X[5]
+ adc (+8*10)(%rcx), $X[6]
+ adc (+8*11)(%rcx), $X[7]
+ adc \$0, %rax
+ # X1 is now rax, r11-r8, r15-r12, tmp16[3:0]
+
+ #
+ # check for carry ;; carry stored in rax
+ mov $X[4], (+8*8)(%rdi) # rdi points to X1
+ mov $X[5], (+8*9)(%rdi)
+ mov $X[6], %rbp
+ mov $X[7], (+8*11)(%rdi)
+
+ mov %rax, (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp)
+
+ mov (+8*0)(%rdi), $X[4]
+ mov (+8*1)(%rdi), $X[5]
+ mov (+8*2)(%rdi), $X[6]
+ mov (+8*3)(%rdi), $X[7]
+
+ # X1 is now stored in: X1[11], rbp, X1[9:8], r15-r8
+ # rdi -> X1
+ # rsi -> M1
+
+ #
+ # X2 = Xh * M2 + Xl
+ # do first part (X2 = Xh * M2)
+ add \$8*10, %rdi # rdi -> pXh ; 128 bits, 2 qwords
+ # Xh is actually { [rdi+8*1], rbp }
+ add \$`$M2-$M1`, %rsi # rsi -> M2
+ lea (+$Reduce_Data_offset+$X2_offset+$STACK_DEPTH)(%rsp), %rcx # rcx -> pX2 ; 641 bits, 11 qwords
+___
+ unshift(@X,pop(@X)); unshift(@X,pop(@X));
+$code.=<<___;
+
+ call MULADD_128x512 # args in rcx, rdi / rbp, rsi, r15-r8
+ # result in r9, r8, r15, r14, r13, r12, r11, r10, X2[1:0]
+ mov (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp), %rax
+
+ # X2 += Xl
+ add (+8*8-8*10)(%rdi), $X[6] # (-8*10) is to adjust rdi -> Xh to Xl
+ adc (+8*9-8*10)(%rdi), $X[7]
+ mov $X[6], (+8*8)(%rcx)
+ mov $X[7], (+8*9)(%rcx)
+
+ adc %rax, %rax
+ mov %rax, (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp)
+
+ lea (+$Reduce_Data_offset+$Q_offset+$STACK_DEPTH)(%rsp), %rdi # rdi -> pQ ; 128 bits, 2 qwords
+ add \$`$K1-$M2`, %rsi # rsi -> pK1 ; 128 bits, 2 qwords
+
+ # MUL_128x128t128 rdi, rcx, rsi ; Q = X2 * K1 (bottom half)
+ # B1:B0 = rsi[1:0] = K1[1:0]
+ # A1:A0 = rcx[1:0] = X2[1:0]
+ # Result = rdi[1],rbp = Q[1],rbp
+ mov (%rsi), %r8 # B0
+ mov (+8*1)(%rsi), %rbx # B1
+
+ mov (%rcx), %rax # A0
+ mul %r8 # B0
+ mov %rax, %rbp
+ mov %rdx, %r9
+
+ mov (+8*1)(%rcx), %rax # A1
+ mul %r8 # B0
+ add %rax, %r9
+
+ mov (%rcx), %rax # A0
+ mul %rbx # B1
+ add %rax, %r9
+
+ mov %r9, (+8*1)(%rdi)
+ # end MUL_128x128t128
+
+ sub \$`$K1-$M`, %rsi
+
+ mov (%rcx), $X[6]
+ mov (+8*1)(%rcx), $X[7] # r9:r8 = X2[1:0]
+
+ call MULADD_128x512 # args in rcx, rdi / rbp, rsi, r15-r8
+ # result in r9, r8, r15, r14, r13, r12, r11, r10, X2[1:0]
+
+ # load first half of m to rdx, rdi, rbx, rax
+ # moved this here for efficiency
+ mov (+8*0)(%rsi), %rax
+ mov (+8*1)(%rsi), %rbx
+ mov (+8*2)(%rsi), %rdi
+ mov (+8*3)(%rsi), %rdx
+
+ # continue with reduction
+ mov (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp), %rbp
+
+ add (+8*8)(%rcx), $X[6]
+ adc (+8*9)(%rcx), $X[7]
+
+ #accumulate the final carry to rbp
+ adc %rbp, %rbp
+
+ # Add in overflow corrections: R = (X2>>128) += T[overflow]
+ # R = {r9, r8, r15, r14, ..., r10}
+ shl \$3, %rbp
+ mov (+$pData_offset+$STACK_DEPTH)(%rsp), %rcx # rsi -> Data (and points to T)
+ add %rcx, %rbp # pT ; 512 bits, 8 qwords, spread out
+
+ # rsi will be used to generate a mask after the addition
+ xor %rsi, %rsi
+
+ add (+8*8*0)(%rbp), $X[0]
+ adc (+8*8*1)(%rbp), $X[1]
+ adc (+8*8*2)(%rbp), $X[2]
+ adc (+8*8*3)(%rbp), $X[3]
+ adc (+8*8*4)(%rbp), $X[4]
+ adc (+8*8*5)(%rbp), $X[5]
+ adc (+8*8*6)(%rbp), $X[6]
+ adc (+8*8*7)(%rbp), $X[7]
+
+ # if there is a carry: rsi = 0xFFFFFFFFFFFFFFFF
+ # if carry is clear: rsi = 0x0000000000000000
+ sbb \$0, %rsi
+
+ # if carry is clear, subtract 0. Otherwise, subtract 256 bits of m
+ and %rsi, %rax
+ and %rsi, %rbx
+ and %rsi, %rdi
+ and %rsi, %rdx
+
+ mov \$1, %rbp
+ sub %rax, $X[0]
+ sbb %rbx, $X[1]
+ sbb %rdi, $X[2]
+ sbb %rdx, $X[3]
+
+ # if there is a borrow: rbp = 0
+ # if there is no borrow: rbp = 1
+ # this is used to save the borrows in between the first half and the 2nd half of the subtraction of m
+ sbb \$0, %rbp
+
+ #load second half of m to rdx, rdi, rbx, rax
+
+ add \$$M, %rcx
+ mov (+8*4)(%rcx), %rax
+ mov (+8*5)(%rcx), %rbx
+ mov (+8*6)(%rcx), %rdi
+ mov (+8*7)(%rcx), %rdx
+
+ # use the rsi mask as before
+ # if carry is clear, subtract 0. Otherwise, subtract 256 bits of m
+ and %rsi, %rax
+ and %rsi, %rbx
+ and %rsi, %rdi
+ and %rsi, %rdx
+
+ # if rbp = 0, there was a borrow before, it is moved to the carry flag
+ # if rbp = 1, there was not a borrow before, carry flag is cleared
+ sub \$1, %rbp
+
+ sbb %rax, $X[4]
+ sbb %rbx, $X[5]
+ sbb %rdi, $X[6]
+ sbb %rdx, $X[7]
+
+ # write R back to memory
+
+ mov (+$red_result_addr_offset+$STACK_DEPTH)(%rsp), %rsi
+ mov $X[0], (+8*0)(%rsi)
+ mov $X[1], (+8*1)(%rsi)
+ mov $X[2], (+8*2)(%rsi)
+ mov $X[3], (+8*3)(%rsi)
+ mov $X[4], (+8*4)(%rsi)
+ mov $X[5], (+8*5)(%rsi)
+ mov $X[6], (+8*6)(%rsi)
+ mov $X[7], (+8*7)(%rsi)
+
+ ret
+.size mont_reduce,.-mont_reduce
+___
+}}}
+
+{{{
+#MUL_512x512 MACRO pDst, pA, pB, x7, x6, x5, x4, x3, x2, x1, x0, tmp*2
+#
+# Inputs: pDst: Destination (1024 bits, 16 qwords)
+# pA: Multiplicand (512 bits, 8 qwords)
+# pB: Multiplicand (512 bits, 8 qwords)
+# Uses registers rax, rdx, args
+# B operand in [pB] and also in x7...x0
+sub MUL_512x512
+{
+ my ($pDst, $pA, $pB, $x, $OP, $TMP, $pDst_o)=@_;
+ my ($pDst, $pDst_o) = ($pDst =~ m/([^+]*)\+?(.*)?/);
+ my @X=@$x; # make a copy
+
+$code.=<<___;
+ mov (+8*0)($pA), $OP
+
+ mov $X[0], %rax
+ mul $OP # rdx:rax = %OP * [0]
+ mov %rax, (+$pDst_o+8*0)($pDst)
+ mov %rdx, $X[0]
+___
+for(my $i=1;$i<8;$i++) {
+$code.=<<___;
+ mov $X[$i], %rax
+ mul $OP # rdx:rax = %OP * [$i]
+ add %rax, $X[$i-1]
+ adc \$0, %rdx
+ mov %rdx, $X[$i]
+___
+}
+
+for(my $i=1;$i<8;$i++) {
+$code.=<<___;
+ mov (+8*$i)($pA), $OP
+___
+
+ &MULSTEP_512(\@X, "(+$pDst_o+8*$i)($pDst)", $pB, $OP, $TMP);
+ push(@X,shift(@X));
+}
+
+$code.=<<___;
+ mov $X[0], (+$pDst_o+8*8)($pDst)
+ mov $X[1], (+$pDst_o+8*9)($pDst)
+ mov $X[2], (+$pDst_o+8*10)($pDst)
+ mov $X[3], (+$pDst_o+8*11)($pDst)
+ mov $X[4], (+$pDst_o+8*12)($pDst)
+ mov $X[5], (+$pDst_o+8*13)($pDst)
+ mov $X[6], (+$pDst_o+8*14)($pDst)
+ mov $X[7], (+$pDst_o+8*15)($pDst)
+___
+}
+
+#
+# mont_mul_a3b : subroutine to compute (Src1 * Src2) % M (all 512-bits)
+# Input: src1: Address of source 1: rdi
+# src2: Address of source 2: rsi
+# Output: dst: Address of destination: [red_res_addr]
+# src2 and result also in: r9, r8, r15, r14, r13, r12, r11, r10
+# Temp: Clobbers [tmp16], all registers
+$code.=<<___;
+.type mont_mul_a3b,\@abi-omnipotent
+.align 16
+mont_mul_a3b:
+ #
+ # multiply tmp = src1 * src2
+ # For multiply: dst = rcx, src1 = rdi, src2 = rsi
+ # stack depth is extra 8 from call
+___
+ &MUL_512x512("%rsp+$tmp16_offset+8", "%rdi", "%rsi", [map("%r$_",(10..15,8..9))], "%rbp", "%rbx");
+$code.=<<___;
+ #
+ # Dst = tmp % m
+ # Call reduce(tmp, m, data, dst)
+
+ # tail recursion optimization: jmp to mont_reduce and return from there
+ jmp mont_reduce
+ # call mont_reduce
+ # ret
+.size mont_mul_a3b,.-mont_mul_a3b
+___
+}}}
+
+{{{
+#SQR_512 MACRO pDest, pA, x7, x6, x5, x4, x3, x2, x1, x0, tmp*4
+#
+# Input in memory [pA] and also in x7...x0
+# Uses all argument registers plus rax and rdx
+#
+# This version computes all of the off-diagonal terms into memory,
+# and then it adds in the diagonal terms
+
+sub SQR_512
+{
+ my ($pDst, $pA, $x, $A, $tmp, $x7, $x6, $pDst_o)=@_;
+ my ($pDst, $pDst_o) = ($pDst =~ m/([^+]*)\+?(.*)?/);
+ my @X=@$x; # make a copy
+$code.=<<___;
+ # ------------------
+ # first pass 01...07
+ # ------------------
+ mov $X[0], $A
+
+ mov $X[1],%rax
+ mul $A
+ mov %rax, (+$pDst_o+8*1)($pDst)
+___
+for(my $i=2;$i<8;$i++) {
+$code.=<<___;
+ mov %rdx, $X[$i-2]
+ mov $X[$i],%rax
+ mul $A
+ add %rax, $X[$i-2]
+ adc \$0, %rdx
+___
+}
+$code.=<<___;
+ mov %rdx, $x7
+
+ mov $X[0], (+$pDst_o+8*2)($pDst)
+
+ # ------------------
+ # second pass 12...17
+ # ------------------
+
+ mov (+8*1)($pA), $A
+
+ mov (+8*2)($pA),%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ mov $X[1], (+$pDst_o+8*3)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*3)($pA),%rax
+ mul $A
+ add %rax, $X[2]
+ adc \$0, %rdx
+ add $X[0], $X[2]
+ adc \$0, %rdx
+ mov $X[2], (+$pDst_o+8*4)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*4)($pA),%rax
+ mul $A
+ add %rax, $X[3]
+ adc \$0, %rdx
+ add $X[0], $X[3]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov (+8*5)($pA),%rax
+ mul $A
+ add %rax, $X[4]
+ adc \$0, %rdx
+ add $X[0], $X[4]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[6],%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ add $X[0], $X[5]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $x7
+ adc \$0, %rdx
+ add $X[0], $x7
+ adc \$0, %rdx
+
+ mov %rdx, $X[1]
+
+ # ------------------
+ # third pass 23...27
+ # ------------------
+ mov (+8*2)($pA), $A
+
+ mov (+8*3)($pA),%rax
+ mul $A
+ add %rax, $X[3]
+ adc \$0, %rdx
+ mov $X[3], (+$pDst_o+8*5)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*4)($pA),%rax
+ mul $A
+ add %rax, $X[4]
+ adc \$0, %rdx
+ add $X[0], $X[4]
+ adc \$0, %rdx
+ mov $X[4], (+$pDst_o+8*6)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*5)($pA),%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ add $X[0], $X[5]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[6],%rax
+ mul $A
+ add %rax, $x7
+ adc \$0, %rdx
+ add $X[0], $x7
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ add $X[0], $X[1]
+ adc \$0, %rdx
+
+ mov %rdx, $X[2]
+
+ # ------------------
+ # fourth pass 34...37
+ # ------------------
+
+ mov (+8*3)($pA), $A
+
+ mov (+8*4)($pA),%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ mov $X[5], (+$pDst_o+8*7)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*5)($pA),%rax
+ mul $A
+ add %rax, $x7
+ adc \$0, %rdx
+ add $X[0], $x7
+ adc \$0, %rdx
+ mov $x7, (+$pDst_o+8*8)($pDst)
+
+ mov %rdx, $X[0]
+ mov $X[6],%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ add $X[0], $X[1]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[2]
+ adc \$0, %rdx
+ add $X[0], $X[2]
+ adc \$0, %rdx
+
+ mov %rdx, $X[5]
+
+ # ------------------
+ # fifth pass 45...47
+ # ------------------
+ mov (+8*4)($pA), $A
+
+ mov (+8*5)($pA),%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ mov $X[1], (+$pDst_o+8*9)($pDst)
+
+ mov %rdx, $X[0]
+ mov $X[6],%rax
+ mul $A
+ add %rax, $X[2]
+ adc \$0, %rdx
+ add $X[0], $X[2]
+ adc \$0, %rdx
+ mov $X[2], (+$pDst_o+8*10)($pDst)
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ add $X[0], $X[5]
+ adc \$0, %rdx
+
+ mov %rdx, $X[1]
+
+ # ------------------
+ # sixth pass 56...57
+ # ------------------
+ mov (+8*5)($pA), $A
+
+ mov $X[6],%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ mov $X[5], (+$pDst_o+8*11)($pDst)
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ add $X[0], $X[1]
+ adc \$0, %rdx
+ mov $X[1], (+$pDst_o+8*12)($pDst)
+
+ mov %rdx, $X[2]
+
+ # ------------------
+ # seventh pass 67
+ # ------------------
+ mov $X[6], $A
+
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[2]
+ adc \$0, %rdx
+ mov $X[2], (+$pDst_o+8*13)($pDst)
+
+ mov %rdx, (+$pDst_o+8*14)($pDst)
+
+ # start finalize (add in squares, and double off-terms)
+ mov (+$pDst_o+8*1)($pDst), $X[0]
+ mov (+$pDst_o+8*2)($pDst), $X[1]
+ mov (+$pDst_o+8*3)($pDst), $X[2]
+ mov (+$pDst_o+8*4)($pDst), $X[3]
+ mov (+$pDst_o+8*5)($pDst), $X[4]
+ mov (+$pDst_o+8*6)($pDst), $X[5]
+
+ mov (+8*3)($pA), %rax
+ mul %rax
+ mov %rax, $x6
+ mov %rdx, $X[6]
+
+ add $X[0], $X[0]
+ adc $X[1], $X[1]
+ adc $X[2], $X[2]
+ adc $X[3], $X[3]
+ adc $X[4], $X[4]
+ adc $X[5], $X[5]
+ adc \$0, $X[6]
+
+ mov (+8*0)($pA), %rax
+ mul %rax
+ mov %rax, (+$pDst_o+8*0)($pDst)
+ mov %rdx, $A
+
+ mov (+8*1)($pA), %rax
+ mul %rax
+
+ add $A, $X[0]
+ adc %rax, $X[1]
+ adc \$0, %rdx
+
+ mov %rdx, $A
+ mov $X[0], (+$pDst_o+8*1)($pDst)
+ mov $X[1], (+$pDst_o+8*2)($pDst)
+
+ mov (+8*2)($pA), %rax
+ mul %rax
+
+ add $A, $X[2]
+ adc %rax, $X[3]
+ adc \$0, %rdx
+
+ mov %rdx, $A
+
+ mov $X[2], (+$pDst_o+8*3)($pDst)
+ mov $X[3], (+$pDst_o+8*4)($pDst)
+
+ xor $tmp, $tmp
+ add $A, $X[4]
+ adc $x6, $X[5]
+ adc \$0, $tmp
+
+ mov $X[4], (+$pDst_o+8*5)($pDst)
+ mov $X[5], (+$pDst_o+8*6)($pDst)
+
+ # %%tmp has 0/1 in column 7
+ # %%A6 has a full value in column 7
+
+ mov (+$pDst_o+8*7)($pDst), $X[0]
+ mov (+$pDst_o+8*8)($pDst), $X[1]
+ mov (+$pDst_o+8*9)($pDst), $X[2]
+ mov (+$pDst_o+8*10)($pDst), $X[3]
+ mov (+$pDst_o+8*11)($pDst), $X[4]
+ mov (+$pDst_o+8*12)($pDst), $X[5]
+ mov (+$pDst_o+8*13)($pDst), $x6
+ mov (+$pDst_o+8*14)($pDst), $x7
+
+ mov $X[7], %rax
+ mul %rax
+ mov %rax, $X[7]
+ mov %rdx, $A
+
+ add $X[0], $X[0]
+ adc $X[1], $X[1]
+ adc $X[2], $X[2]
+ adc $X[3], $X[3]
+ adc $X[4], $X[4]
+ adc $X[5], $X[5]
+ adc $x6, $x6
+ adc $x7, $x7
+ adc \$0, $A
+
+ add $tmp, $X[0]
+
+ mov (+8*4)($pA), %rax
+ mul %rax
+
+ add $X[6], $X[0]
+ adc %rax, $X[1]
+ adc \$0, %rdx
+
+ mov %rdx, $tmp
+
+ mov $X[0], (+$pDst_o+8*7)($pDst)
+ mov $X[1], (+$pDst_o+8*8)($pDst)
+
+ mov (+8*5)($pA), %rax
+ mul %rax
+
+ add $tmp, $X[2]
+ adc %rax, $X[3]
+ adc \$0, %rdx
+
+ mov %rdx, $tmp
+
+ mov $X[2], (+$pDst_o+8*9)($pDst)
+ mov $X[3], (+$pDst_o+8*10)($pDst)
+
+ mov (+8*6)($pA), %rax
+ mul %rax
+
+ add $tmp, $X[4]
+ adc %rax, $X[5]
+ adc \$0, %rdx
+
+ mov $X[4], (+$pDst_o+8*11)($pDst)
+ mov $X[5], (+$pDst_o+8*12)($pDst)
+
+ add %rdx, $x6
+ adc $X[7], $x7
+ adc \$0, $A
+
+ mov $x6, (+$pDst_o+8*13)($pDst)
+ mov $x7, (+$pDst_o+8*14)($pDst)
+ mov $A, (+$pDst_o+8*15)($pDst)
+___
+}
+
+#
+# sqr_reduce: subroutine to compute Result = reduce(Result * Result)
+#
+# input and result also in: r9, r8, r15, r14, r13, r12, r11, r10
+#
+$code.=<<___;
+.type sqr_reduce,\@abi-omnipotent
+.align 16
+sqr_reduce:
+ mov (+$pResult_offset+8)(%rsp), %rcx
+___
+ &SQR_512("%rsp+$tmp16_offset+8", "%rcx", [map("%r$_",(10..15,8..9))], "%rbx", "%rbp", "%rsi", "%rdi");
+$code.=<<___;
+ # tail recursion optimization: jmp to mont_reduce and return from there
+ jmp mont_reduce
+ # call mont_reduce
+ # ret
+.size sqr_reduce,.-sqr_reduce
+___
+}}}
+
+#
+# MAIN FUNCTION
+#
+
+#mod_exp_512(UINT64 *result, /* 512 bits, 8 qwords */
+# UINT64 *g, /* 512 bits, 8 qwords */
+# UINT64 *exp, /* 512 bits, 8 qwords */
+# struct mod_ctx_512 *data)
+
+# window size = 5
+# table size = 2^5 = 32
+#table_entries equ 32
+#table_size equ table_entries * 8
+$code.=<<___;
+.globl mod_exp_512
+.type mod_exp_512,\@function,4
+mod_exp_512:
+ push %rbp
+ push %rbx
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ # adjust stack down and then align it with cache boundary
+ mov %rsp, %r8
+ sub \$$mem_size, %rsp
+ and \$-64, %rsp
+
+ # store previous stack pointer and arguments
+ mov %r8, (+$rsp_offset)(%rsp)
+ mov %rdi, (+$pResult_offset)(%rsp)
+ mov %rsi, (+$pG_offset)(%rsp)
+ mov %rcx, (+$pData_offset)(%rsp)
+.Lbody:
+ # transform g into montgomery space
+ # GT = reduce(g * C2) = reduce(g * (2^256))
+ # reduce expects to have the input in [tmp16]
+ pxor %xmm4, %xmm4
+ movdqu (+16*0)(%rsi), %xmm0
+ movdqu (+16*1)(%rsi), %xmm1
+ movdqu (+16*2)(%rsi), %xmm2
+ movdqu (+16*3)(%rsi), %xmm3
+ movdqa %xmm4, (+$tmp16_offset+16*0)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*1)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*6)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*7)(%rsp)
+ movdqa %xmm0, (+$tmp16_offset+16*2)(%rsp)
+ movdqa %xmm1, (+$tmp16_offset+16*3)(%rsp)
+ movdqa %xmm2, (+$tmp16_offset+16*4)(%rsp)
+ movdqa %xmm3, (+$tmp16_offset+16*5)(%rsp)
+
+ # load pExp before rdx gets blown away
+ movdqu (+16*0)(%rdx), %xmm0
+ movdqu (+16*1)(%rdx), %xmm1
+ movdqu (+16*2)(%rdx), %xmm2
+ movdqu (+16*3)(%rdx), %xmm3
+
+ lea (+$GT_offset)(%rsp), %rbx
+ mov %rbx, (+$red_result_addr_offset)(%rsp)
+ call mont_reduce
+
+ # Initialize tmp = C
+ lea (+$tmp_offset)(%rsp), %rcx
+ xor %rax, %rax
+ mov %rax, (+8*0)(%rcx)
+ mov %rax, (+8*1)(%rcx)
+ mov %rax, (+8*3)(%rcx)
+ mov %rax, (+8*4)(%rcx)
+ mov %rax, (+8*5)(%rcx)
+ mov %rax, (+8*6)(%rcx)
+ mov %rax, (+8*7)(%rcx)
+ mov %rax, (+$exp_offset+8*8)(%rsp)
+ movq \$1, (+8*2)(%rcx)
+
+ lea (+$garray_offset)(%rsp), %rbp
+ mov %rcx, %rsi # pTmp
+ mov %rbp, %rdi # Garray[][0]
+___
+
+ &swizzle("%rdi", "%rcx", "%rax", "%rbx");
+
+ # for (rax = 31; rax != 0; rax--) {
+ # tmp = reduce(tmp * G)
+ # swizzle(pg, tmp);
+ # pg += 2; }
+$code.=<<___;
+ mov \$31, %rax
+ mov %rax, (+$i_offset)(%rsp)
+ mov %rbp, (+$pg_offset)(%rsp)
+ # rsi -> pTmp
+ mov %rsi, (+$red_result_addr_offset)(%rsp)
+ mov (+8*0)(%rsi), %r10
+ mov (+8*1)(%rsi), %r11
+ mov (+8*2)(%rsi), %r12
+ mov (+8*3)(%rsi), %r13
+ mov (+8*4)(%rsi), %r14
+ mov (+8*5)(%rsi), %r15
+ mov (+8*6)(%rsi), %r8
+ mov (+8*7)(%rsi), %r9
+init_loop:
+ lea (+$GT_offset)(%rsp), %rdi
+ call mont_mul_a3b
+ lea (+$tmp_offset)(%rsp), %rsi
+ mov (+$pg_offset)(%rsp), %rbp
+ add \$2, %rbp
+ mov %rbp, (+$pg_offset)(%rsp)
+ mov %rsi, %rcx # rcx = rsi = addr of tmp
+___
+
+ &swizzle("%rbp", "%rcx", "%rax", "%rbx");
+$code.=<<___;
+ mov (+$i_offset)(%rsp), %rax
+ sub \$1, %rax
+ mov %rax, (+$i_offset)(%rsp)
+ jne init_loop
+
+ #
+ # Copy exponent onto stack
+ movdqa %xmm0, (+$exp_offset+16*0)(%rsp)
+ movdqa %xmm1, (+$exp_offset+16*1)(%rsp)
+ movdqa %xmm2, (+$exp_offset+16*2)(%rsp)
+ movdqa %xmm3, (+$exp_offset+16*3)(%rsp)
+
+
+ #
+ # Do exponentiation
+ # Initialize result to G[exp{511:507}]
+ mov (+$exp_offset+62)(%rsp), %eax
+ mov %rax, %rdx
+ shr \$11, %rax
+ and \$0x07FF, %edx
+ mov %edx, (+$exp_offset+62)(%rsp)
+ lea (+$garray_offset)(%rsp,%rax,2), %rsi
+ mov (+$pResult_offset)(%rsp), %rdx
+___
+
+ &unswizzle("%rdx", "%rsi", "%rbp", "%rbx", "%rax");
+
+ #
+ # Loop variables
+ # rcx = [loop_idx] = index: 510-5 to 0 by 5
+$code.=<<___;
+ movq \$505, (+$loop_idx_offset)(%rsp)
+
+ mov (+$pResult_offset)(%rsp), %rcx
+ mov %rcx, (+$red_result_addr_offset)(%rsp)
+ mov (+8*0)(%rcx), %r10
+ mov (+8*1)(%rcx), %r11
+ mov (+8*2)(%rcx), %r12
+ mov (+8*3)(%rcx), %r13
+ mov (+8*4)(%rcx), %r14
+ mov (+8*5)(%rcx), %r15
+ mov (+8*6)(%rcx), %r8
+ mov (+8*7)(%rcx), %r9
+ jmp sqr_2
+
+main_loop_a3b:
+ call sqr_reduce
+ call sqr_reduce
+ call sqr_reduce
+sqr_2:
+ call sqr_reduce
+ call sqr_reduce
+
+ #
+ # Do multiply, first look up proper value in Garray
+ mov (+$loop_idx_offset)(%rsp), %rcx # bit index
+ mov %rcx, %rax
+ shr \$4, %rax # rax is word pointer
+ mov (+$exp_offset)(%rsp,%rax,2), %edx
+ and \$15, %rcx
+ shrq %cl, %rdx
+ and \$0x1F, %rdx
+
+ lea (+$garray_offset)(%rsp,%rdx,2), %rsi
+ lea (+$tmp_offset)(%rsp), %rdx
+ mov %rdx, %rdi
+___
+
+ &unswizzle("%rdx", "%rsi", "%rbp", "%rbx", "%rax");
+ # rdi = tmp = pG
+
+ #
+ # Call mod_mul_a1(pDst, pSrc1, pSrc2, pM, pData)
+ # result result pG M Data
+$code.=<<___;
+ mov (+$pResult_offset)(%rsp), %rsi
+ call mont_mul_a3b
+
+ #
+ # finish loop
+ mov (+$loop_idx_offset)(%rsp), %rcx
+ sub \$5, %rcx
+ mov %rcx, (+$loop_idx_offset)(%rsp)
+ jge main_loop_a3b
+
+ #
+
+end_main_loop_a3b:
+ # transform result out of Montgomery space
+ # result = reduce(result)
+ mov (+$pResult_offset)(%rsp), %rdx
+ pxor %xmm4, %xmm4
+ movdqu (+16*0)(%rdx), %xmm0
+ movdqu (+16*1)(%rdx), %xmm1
+ movdqu (+16*2)(%rdx), %xmm2
+ movdqu (+16*3)(%rdx), %xmm3
+ movdqa %xmm4, (+$tmp16_offset+16*4)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*5)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*6)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*7)(%rsp)
+ movdqa %xmm0, (+$tmp16_offset+16*0)(%rsp)
+ movdqa %xmm1, (+$tmp16_offset+16*1)(%rsp)
+ movdqa %xmm2, (+$tmp16_offset+16*2)(%rsp)
+ movdqa %xmm3, (+$tmp16_offset+16*3)(%rsp)
+ call mont_reduce
+
+ # If result > m, subract m
+ # load result into r15:r8
+ mov (+$pResult_offset)(%rsp), %rax
+ mov (+8*0)(%rax), %r8
+ mov (+8*1)(%rax), %r9
+ mov (+8*2)(%rax), %r10
+ mov (+8*3)(%rax), %r11
+ mov (+8*4)(%rax), %r12
+ mov (+8*5)(%rax), %r13
+ mov (+8*6)(%rax), %r14
+ mov (+8*7)(%rax), %r15
+
+ # subtract m
+ mov (+$pData_offset)(%rsp), %rbx
+ add \$$M, %rbx
+
+ sub (+8*0)(%rbx), %r8
+ sbb (+8*1)(%rbx), %r9
+ sbb (+8*2)(%rbx), %r10
+ sbb (+8*3)(%rbx), %r11
+ sbb (+8*4)(%rbx), %r12
+ sbb (+8*5)(%rbx), %r13
+ sbb (+8*6)(%rbx), %r14
+ sbb (+8*7)(%rbx), %r15
+
+ # if Carry is clear, replace result with difference
+ mov (+8*0)(%rax), %rsi
+ mov (+8*1)(%rax), %rdi
+ mov (+8*2)(%rax), %rcx
+ mov (+8*3)(%rax), %rdx
+ cmovnc %r8, %rsi
+ cmovnc %r9, %rdi
+ cmovnc %r10, %rcx
+ cmovnc %r11, %rdx
+ mov %rsi, (+8*0)(%rax)
+ mov %rdi, (+8*1)(%rax)
+ mov %rcx, (+8*2)(%rax)
+ mov %rdx, (+8*3)(%rax)
+
+ mov (+8*4)(%rax), %rsi
+ mov (+8*5)(%rax), %rdi
+ mov (+8*6)(%rax), %rcx
+ mov (+8*7)(%rax), %rdx
+ cmovnc %r12, %rsi
+ cmovnc %r13, %rdi
+ cmovnc %r14, %rcx
+ cmovnc %r15, %rdx
+ mov %rsi, (+8*4)(%rax)
+ mov %rdi, (+8*5)(%rax)
+ mov %rcx, (+8*6)(%rax)
+ mov %rdx, (+8*7)(%rax)
+
+ mov (+$rsp_offset)(%rsp), %rsi
+ mov 0(%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbx
+ mov 40(%rsi),%rbp
+ lea 48(%rsi),%rsp
+.Lepilogue:
+ ret
+.size mod_exp_512, . - mod_exp_512
+___
+
+if ($win64) {
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+my $rec="%rcx";
+my $frame="%rdx";
+my $context="%r8";
+my $disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type mod_exp_512_se_handler,\@abi-omnipotent
+.align 16
+mod_exp_512_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lbody(%rip),%r10
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lin_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ lea .Lepilogue(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lin_prologue
+
+ mov $rsp_offset(%rax),%rax # pull saved Rsp
+
+ mov 32(%rax),%rbx
+ mov 40(%rax),%rbp
+ mov 24(%rax),%r12
+ mov 16(%rax),%r13
+ mov 8(%rax),%r14
+ mov 0(%rax),%r15
+ lea 48(%rax),%rax
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lin_prologue:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size mod_exp_512_se_handler,.-mod_exp_512_se_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_mod_exp_512
+ .rva .LSEH_end_mod_exp_512
+ .rva .LSEH_info_mod_exp_512
+
+.section .xdata
+.align 8
+.LSEH_info_mod_exp_512:
+ .byte 9,0,0,0
+ .rva mod_exp_512_se_handler
+___
+}
+
+sub reg_part {
+my ($reg,$conv)=@_;
+ if ($reg =~ /%r[0-9]+/) { $reg .= $conv; }
+ elsif ($conv eq "b") { $reg =~ s/%[er]([^x]+)x?/%$1l/; }
+ elsif ($conv eq "w") { $reg =~ s/%[er](.+)/%$1/; }
+ elsif ($conv eq "d") { $reg =~ s/%[er](.+)/%e$1/; }
+ return $reg;
+}
+
+$code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem;
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/(\(\+[^)]+\))/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/crypto/bn/asm/parisc-mont.pl b/crypto/bn/asm/parisc-mont.pl
new file mode 100755
index 000000000000..4a766a87fb2e
--- /dev/null
+++ b/crypto/bn/asm/parisc-mont.pl
@@ -0,0 +1,993 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# On PA-7100LC this module performs ~90-50% better, less for longer
+# keys, than code generated by gcc 3.2 for PA-RISC 1.1. Latter means
+# that compiler utilized xmpyu instruction to perform 32x32=64-bit
+# multiplication, which in turn means that "baseline" performance was
+# optimal in respect to instruction set capabilities. Fair comparison
+# with vendor compiler is problematic, because OpenSSL doesn't define
+# BN_LLONG [presumably] for historical reasons, which drives compiler
+# toward 4 times 16x16=32-bit multiplicatons [plus complementary
+# shifts and additions] instead. This means that you should observe
+# several times improvement over code generated by vendor compiler
+# for PA-RISC 1.1, but the "baseline" is far from optimal. The actual
+# improvement coefficient was never collected on PA-7100LC, or any
+# other 1.1 CPU, because I don't have access to such machine with
+# vendor compiler. But to give you a taste, PA-RISC 1.1 code path
+# reportedly outperformed code generated by cc +DA1.1 +O3 by factor
+# of ~5x on PA-8600.
+#
+# On PA-RISC 2.0 it has to compete with pa-risc2[W].s, which is
+# reportedly ~2x faster than vendor compiler generated code [according
+# to comment in pa-risc2[W].s]. Here comes a catch. Execution core of
+# this implementation is actually 32-bit one, in the sense that it
+# operates on 32-bit values. But pa-risc2[W].s operates on arrays of
+# 64-bit BN_LONGs... How do they interoperate then? No problem. This
+# module picks halves of 64-bit values in reverse order and pretends
+# they were 32-bit BN_LONGs. But can 32-bit core compete with "pure"
+# 64-bit code such as pa-risc2[W].s then? Well, the thing is that
+# 32x32=64-bit multiplication is the best even PA-RISC 2.0 can do,
+# i.e. there is no "wider" multiplication like on most other 64-bit
+# platforms. This means that even being effectively 32-bit, this
+# implementation performs "64-bit" computational task in same amount
+# of arithmetic operations, most notably multiplications. It requires
+# more memory references, most notably to tp[num], but this doesn't
+# seem to exhaust memory port capacity. And indeed, dedicated PA-RISC
+# 2.0 code path, provides virtually same performance as pa-risc2[W].s:
+# it's ~10% better for shortest key length and ~10% worse for longest
+# one.
+#
+# In case it wasn't clear. The module has two distinct code paths:
+# PA-RISC 1.1 and PA-RISC 2.0 ones. Latter features carry-free 64-bit
+# additions and 64-bit integer loads, not to mention specific
+# instruction scheduling. In 64-bit build naturally only 2.0 code path
+# is assembled. In 32-bit application context both code paths are
+# assembled, PA-RISC 2.0 CPU is detected at run-time and proper path
+# is taken automatically. Also, in 32-bit build the module imposes
+# couple of limitations: vector lengths has to be even and vector
+# addresses has to be 64-bit aligned. Normally neither is a problem:
+# most common key lengths are even and vectors are commonly malloc-ed,
+# which ensures alignment.
+#
+# Special thanks to polarhome.com for providing HP-UX account on
+# PA-RISC 1.1 machine, and to correspondent who chose to remain
+# anonymous for testing the code on PA-RISC 2.0 machine.
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+
+$flavour = shift;
+$output = shift;
+
+open STDOUT,">$output";
+
+if ($flavour =~ /64/) {
+ $LEVEL ="2.0W";
+ $SIZE_T =8;
+ $FRAME_MARKER =80;
+ $SAVED_RP =16;
+ $PUSH ="std";
+ $PUSHMA ="std,ma";
+ $POP ="ldd";
+ $POPMB ="ldd,mb";
+ $BN_SZ =$SIZE_T;
+} else {
+ $LEVEL ="1.1"; #$LEVEL.="\n\t.ALLOW\t2.0";
+ $SIZE_T =4;
+ $FRAME_MARKER =48;
+ $SAVED_RP =20;
+ $PUSH ="stw";
+ $PUSHMA ="stwm";
+ $POP ="ldw";
+ $POPMB ="ldwm";
+ $BN_SZ =$SIZE_T;
+ if (open CONF,"<${dir}../../opensslconf.h") {
+ while(<CONF>) {
+ if (m/#\s*define\s+SIXTY_FOUR_BIT/) {
+ $BN_SZ=8;
+ $LEVEL="2.0";
+ last;
+ }
+ }
+ close CONF;
+ }
+}
+
+$FRAME=8*$SIZE_T+$FRAME_MARKER; # 8 saved regs + frame marker
+ # [+ argument transfer]
+$LOCALS=$FRAME-$FRAME_MARKER;
+$FRAME+=32; # local variables
+
+$tp="%r31";
+$ti1="%r29";
+$ti0="%r28";
+
+$rp="%r26";
+$ap="%r25";
+$bp="%r24";
+$np="%r23";
+$n0="%r22"; # passed through stack in 32-bit
+$num="%r21"; # passed through stack in 32-bit
+$idx="%r20";
+$arrsz="%r19";
+
+$nm1="%r7";
+$nm0="%r6";
+$ab1="%r5";
+$ab0="%r4";
+
+$fp="%r3";
+$hi1="%r2";
+$hi0="%r1";
+
+$xfer=$n0; # accomodates [-16..15] offset in fld[dw]s
+
+$fm0="%fr4"; $fti=$fm0;
+$fbi="%fr5L";
+$fn0="%fr5R";
+$fai="%fr6"; $fab0="%fr7"; $fab1="%fr8";
+$fni="%fr9"; $fnm0="%fr10"; $fnm1="%fr11";
+
+$code=<<___;
+ .LEVEL $LEVEL
+ .SPACE \$TEXT\$
+ .SUBSPA \$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
+
+ .EXPORT bn_mul_mont,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR
+ .ALIGN 64
+bn_mul_mont
+ .PROC
+ .CALLINFO FRAME=`$FRAME-8*$SIZE_T`,NO_CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=6
+ .ENTRY
+ $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue
+ $PUSHMA %r3,$FRAME(%sp)
+ $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp)
+ $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp)
+ $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp)
+ $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp)
+ $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp)
+ $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp)
+ $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp)
+ ldo -$FRAME(%sp),$fp
+___
+$code.=<<___ if ($SIZE_T==4);
+ ldw `-$FRAME_MARKER-4`($fp),$n0
+ ldw `-$FRAME_MARKER-8`($fp),$num
+ nop
+ nop ; alignment
+___
+$code.=<<___ if ($BN_SZ==4);
+ comiclr,<= 6,$num,%r0 ; are vectors long enough?
+ b L\$abort
+ ldi 0,%r28 ; signal "unhandled"
+ add,ev %r0,$num,$num ; is $num even?
+ b L\$abort
+ nop
+ or $ap,$np,$ti1
+ extru,= $ti1,31,3,%r0 ; are ap and np 64-bit aligned?
+ b L\$abort
+ nop
+ nop ; alignment
+ nop
+
+ fldws 0($n0),${fn0}
+ fldws,ma 4($bp),${fbi} ; bp[0]
+___
+$code.=<<___ if ($BN_SZ==8);
+ comib,> 3,$num,L\$abort ; are vectors long enough?
+ ldi 0,%r28 ; signal "unhandled"
+ addl $num,$num,$num ; I operate on 32-bit values
+
+ fldws 4($n0),${fn0} ; only low part of n0
+ fldws 4($bp),${fbi} ; bp[0] in flipped word order
+___
+$code.=<<___;
+ fldds 0($ap),${fai} ; ap[0,1]
+ fldds 0($np),${fni} ; np[0,1]
+
+ sh2addl $num,%r0,$arrsz
+ ldi 31,$hi0
+ ldo 36($arrsz),$hi1 ; space for tp[num+1]
+ andcm $hi1,$hi0,$hi1 ; align
+ addl $hi1,%sp,%sp
+ $PUSH $fp,-$SIZE_T(%sp)
+
+ ldo `$LOCALS+16`($fp),$xfer
+ ldo `$LOCALS+32+4`($fp),$tp
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[0]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[0]
+ xmpyu ${fn0},${fab0}R,${fm0}
+
+ addl $arrsz,$ap,$ap ; point at the end
+ addl $arrsz,$np,$np
+ subi 0,$arrsz,$idx ; j=0
+ ldo 8($idx),$idx ; j++++
+
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+ fstds ${fab1},0($xfer)
+ fstds ${fnm1},8($xfer)
+ flddx $idx($ap),${fai} ; ap[2,3]
+ flddx $idx($np),${fni} ; np[2,3]
+___
+$code.=<<___ if ($BN_SZ==4);
+ mtctl $hi0,%cr11 ; $hi0 still holds 31
+ extrd,u,*= $hi0,%sar,1,$hi0 ; executes on PA-RISC 1.0
+ b L\$parisc11
+ nop
+___
+$code.=<<___; # PA-RISC 2.0 code-path
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldd -16($xfer),$ab0
+ fstds ${fab0},-16($xfer)
+
+ extrd,u $ab0,31,32,$hi0
+ extrd,u $ab0,63,32,$ab0
+ ldd -8($xfer),$nm0
+ fstds ${fnm0},-8($xfer)
+ ldo 8($idx),$idx ; j++++
+ addl $ab0,$nm0,$nm0 ; low part is discarded
+ extrd,u $nm0,31,32,$hi1
+
+L\$1st
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,63,32,$ab1
+ addl $hi1,$nm1,$nm1
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ flddx $idx($np),${fni} ; np[j,j+1]
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldd -16($xfer),$ab0
+ fstds ${fab0},-16($xfer)
+ addl $hi0,$ab0,$ab0
+ extrd,u $ab0,31,32,$hi0
+ ldd -8($xfer),$nm0
+ fstds ${fnm0},-8($xfer)
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ stw $nm1,-4($tp) ; tp[j-1]
+ addl $ab0,$nm0,$nm0
+ stw,ma $nm0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$1st ; j++++
+ extrd,u $nm0,31,32,$hi1
+
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,63,32,$ab1
+ addl $hi1,$nm1,$nm1
+ ldd -16($xfer),$ab0
+ addl $ab1,$nm1,$nm1
+ ldd -8($xfer),$nm0
+ extrd,u $nm1,31,32,$hi1
+
+ addl $hi0,$ab0,$ab0
+ extrd,u $ab0,31,32,$hi0
+ stw $nm1,-4($tp) ; tp[j-1]
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ ldd 0($xfer),$ab1
+ addl $ab0,$nm0,$nm0
+ ldd,mb 8($xfer),$nm1
+ extrd,u $nm0,31,32,$hi1
+ stw,ma $nm0,8($tp) ; tp[j-1]
+
+ ldo -1($num),$num ; i--
+ subi 0,$arrsz,$idx ; j=0
+___
+$code.=<<___ if ($BN_SZ==4);
+ fldws,ma 4($bp),${fbi} ; bp[1]
+___
+$code.=<<___ if ($BN_SZ==8);
+ fldws 0($bp),${fbi} ; bp[1] in flipped word order
+___
+$code.=<<___;
+ flddx $idx($ap),${fai} ; ap[0,1]
+ flddx $idx($np),${fni} ; np[0,1]
+ fldws 8($xfer),${fti}R ; tp[0]
+ addl $hi0,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ addl $hi1,$hi0,$hi0
+ extrd,u $hi0,31,32,$hi1
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ xmpyu ${fn0},${fab0}R,${fm0}
+ ldo `$LOCALS+32+4`($fp),$tp
+L\$outer
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
+ fstds ${fab0},-16($xfer) ; 33-bit value
+ fstds ${fnm0},-8($xfer)
+ flddx $idx($ap),${fai} ; ap[2]
+ flddx $idx($np),${fni} ; np[2]
+ ldo 8($idx),$idx ; j++++
+ ldd -16($xfer),$ab0 ; 33-bit value
+ ldd -8($xfer),$nm0
+ ldw 0($xfer),$hi0 ; high part
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ extrd,u $ab0,31,32,$ti0 ; carry bit
+ extrd,u $ab0,63,32,$ab0
+ fstds ${fab1},0($xfer)
+ addl $ti0,$hi0,$hi0 ; account carry bit
+ fstds ${fnm1},8($xfer)
+ addl $ab0,$nm0,$nm0 ; low part is discarded
+ ldw 0($tp),$ti1 ; tp[1]
+ extrd,u $nm0,31,32,$hi1
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+
+L\$inner
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ti1,$ti1
+ addl $ti1,$ab1,$ab1
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ flddx $idx($np),${fni} ; np[j,j+1]
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ ldw 4($tp),$ti0 ; tp[j]
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldd -16($xfer),$ab0
+ fstds ${fab0},-16($xfer)
+ addl $hi0,$ti0,$ti0
+ addl $ti0,$ab0,$ab0
+ ldd -8($xfer),$nm0
+ fstds ${fnm0},-8($xfer)
+ extrd,u $ab0,31,32,$hi0
+ extrd,u $nm1,31,32,$hi1
+ ldw 8($tp),$ti1 ; tp[j]
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ addl $ab0,$nm0,$nm0
+ stw,ma $nm0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$inner ; j++++
+ extrd,u $nm0,31,32,$hi1
+
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ti1,$ti1
+ addl $ti1,$ab1,$ab1
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+ ldw 4($tp),$ti0 ; tp[j]
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ ldd -16($xfer),$ab0
+ ldd -8($xfer),$nm0
+ extrd,u $nm1,31,32,$hi1
+
+ addl $hi0,$ab0,$ab0
+ addl $ti0,$ab0,$ab0
+ stw $nm1,-4($tp) ; tp[j-1]
+ extrd,u $ab0,31,32,$hi0
+ ldw 8($tp),$ti1 ; tp[j]
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ ldd 0($xfer),$ab1
+ addl $ab0,$nm0,$nm0
+ ldd,mb 8($xfer),$nm1
+ extrd,u $nm0,31,32,$hi1
+ stw,ma $nm0,8($tp) ; tp[j-1]
+
+ addib,= -1,$num,L\$outerdone ; i--
+ subi 0,$arrsz,$idx ; j=0
+___
+$code.=<<___ if ($BN_SZ==4);
+ fldws,ma 4($bp),${fbi} ; bp[i]
+___
+$code.=<<___ if ($BN_SZ==8);
+ ldi 12,$ti0 ; bp[i] in flipped word order
+ addl,ev %r0,$num,$num
+ ldi -4,$ti0
+ addl $ti0,$bp,$bp
+ fldws 0($bp),${fbi}
+___
+$code.=<<___;
+ flddx $idx($ap),${fai} ; ap[0]
+ addl $hi0,$ab1,$ab1
+ flddx $idx($np),${fni} ; np[0]
+ fldws 8($xfer),${fti}R ; tp[0]
+ addl $ti1,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
+ ldw 4($tp),$ti0 ; tp[j]
+
+ addl $hi1,$nm1,$nm1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ addl $hi1,$hi0,$hi0
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ addl $ti0,$hi0,$hi0
+ extrd,u $hi0,31,32,$hi1
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+ xmpyu ${fn0},${fab0}R,${fm0}
+
+ b L\$outer
+ ldo `$LOCALS+32+4`($fp),$tp
+
+L\$outerdone
+ addl $hi0,$ab1,$ab1
+ addl $ti1,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+
+ ldw 4($tp),$ti0 ; tp[j]
+
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ addl $hi1,$hi0,$hi0
+ addl $ti0,$hi0,$hi0
+ extrd,u $hi0,31,32,$hi1
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ ldo `$LOCALS+32`($fp),$tp
+ sub %r0,%r0,%r0 ; clear borrow
+___
+$code.=<<___ if ($BN_SZ==4);
+ ldws,ma 4($tp),$ti0
+ extru,= $rp,31,3,%r0 ; is rp 64-bit aligned?
+ b L\$sub_pa11
+ addl $tp,$arrsz,$tp
+L\$sub
+ ldwx $idx($np),$hi0
+ subb $ti0,$hi0,$hi1
+ ldwx $idx($tp),$ti0
+ addib,<> 4,$idx,L\$sub
+ stws,ma $hi1,4($rp)
+
+ subb $ti0,%r0,$hi1
+ ldo -4($tp),$tp
+___
+$code.=<<___ if ($BN_SZ==8);
+ ldd,ma 8($tp),$ti0
+L\$sub
+ ldd $idx($np),$hi0
+ shrpd $ti0,$ti0,32,$ti0 ; flip word order
+ std $ti0,-8($tp) ; save flipped value
+ sub,db $ti0,$hi0,$hi1
+ ldd,ma 8($tp),$ti0
+ addib,<> 8,$idx,L\$sub
+ std,ma $hi1,8($rp)
+
+ extrd,u $ti0,31,32,$ti0 ; carry in flipped word order
+ sub,db $ti0,%r0,$hi1
+ ldo -8($tp),$tp
+___
+$code.=<<___;
+ and $tp,$hi1,$ap
+ andcm $rp,$hi1,$bp
+ or $ap,$bp,$np
+
+ sub $rp,$arrsz,$rp ; rewind rp
+ subi 0,$arrsz,$idx
+ ldo `$LOCALS+32`($fp),$tp
+L\$copy
+ ldd $idx($np),$hi0
+ std,ma %r0,8($tp)
+ addib,<> 8,$idx,.-8 ; L\$copy
+ std,ma $hi0,8($rp)
+___
+
+if ($BN_SZ==4) { # PA-RISC 1.1 code-path
+$ablo=$ab0;
+$abhi=$ab1;
+$nmlo0=$nm0;
+$nmhi0=$nm1;
+$nmlo1="%r9";
+$nmhi1="%r8";
+
+$code.=<<___;
+ b L\$done
+ nop
+
+ .ALIGN 8
+L\$parisc11
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldw -12($xfer),$ablo
+ ldw -16($xfer),$hi0
+ ldw -4($xfer),$nmlo0
+ ldw -8($xfer),$nmhi0
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+
+ ldo 8($idx),$idx ; j++++
+ add $ablo,$nmlo0,$nmlo0 ; discarded
+ addc %r0,$nmhi0,$hi1
+ ldw 4($xfer),$ablo
+ ldw 0($xfer),$abhi
+ nop
+
+L\$1st_pa11
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ flddx $idx($np),${fni} ; np[j,j+1]
+ add $hi0,$ablo,$ablo
+ ldw 12($xfer),$nmlo1
+ addc %r0,$abhi,$hi0
+ ldw 8($xfer),$nmhi1
+ add $ablo,$nmlo1,$nmlo1
+ fstds ${fab1},0($xfer)
+ addc %r0,$nmhi1,$nmhi1
+ fstds ${fnm1},8($xfer)
+ add $hi1,$nmlo1,$nmlo1
+ ldw -12($xfer),$ablo
+ addc %r0,$nmhi1,$hi1
+ ldw -16($xfer),$abhi
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ ldw -4($xfer),$nmlo0
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldw -8($xfer),$nmhi0
+ add $hi0,$ablo,$ablo
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ addc %r0,$abhi,$hi0
+ fstds ${fab0},-16($xfer)
+ add $ablo,$nmlo0,$nmlo0
+ fstds ${fnm0},-8($xfer)
+ addc %r0,$nmhi0,$nmhi0
+ ldw 0($xfer),$abhi
+ add $hi1,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$1st_pa11 ; j++++
+ addc %r0,$nmhi0,$hi1
+
+ ldw 8($xfer),$nmhi1
+ ldw 12($xfer),$nmlo1
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ add $hi0,$ablo,$ablo
+ fstds ${fab1},0($xfer)
+ addc %r0,$abhi,$hi0
+ fstds ${fnm1},8($xfer)
+ add $ablo,$nmlo1,$nmlo1
+ ldw -16($xfer),$abhi
+ addc %r0,$nmhi1,$nmhi1
+ ldw -12($xfer),$ablo
+ add $hi1,$nmlo1,$nmlo1
+ ldw -8($xfer),$nmhi0
+ addc %r0,$nmhi1,$hi1
+ ldw -4($xfer),$nmlo0
+
+ add $hi0,$ablo,$ablo
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ addc %r0,$abhi,$hi0
+ ldw 0($xfer),$abhi
+ add $ablo,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ addc %r0,$nmhi0,$nmhi0
+ ldws,mb 8($xfer),$nmhi1
+ add $hi1,$nmlo0,$nmlo0
+ ldw 4($xfer),$nmlo1
+ addc %r0,$nmhi0,$hi1
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+
+ ldo -1($num),$num ; i--
+ subi 0,$arrsz,$idx ; j=0
+
+ fldws,ma 4($bp),${fbi} ; bp[1]
+ flddx $idx($ap),${fai} ; ap[0,1]
+ flddx $idx($np),${fni} ; np[0,1]
+ fldws 8($xfer),${fti}R ; tp[0]
+ add $hi0,$ablo,$ablo
+ addc %r0,$abhi,$hi0
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
+ add $hi1,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$nmhi1
+ add $ablo,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$hi1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ stw $nmlo1,-4($tp) ; tp[j-1]
+
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ add $hi1,$hi0,$hi0
+ addc %r0,%r0,$hi1
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ xmpyu ${fn0},${fab0}R,${fm0}
+ ldo `$LOCALS+32+4`($fp),$tp
+L\$outer_pa11
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
+ fstds ${fab0},-16($xfer) ; 33-bit value
+ fstds ${fnm0},-8($xfer)
+ flddx $idx($ap),${fai} ; ap[2,3]
+ flddx $idx($np),${fni} ; np[2,3]
+ ldw -16($xfer),$abhi ; carry bit actually
+ ldo 8($idx),$idx ; j++++
+ ldw -12($xfer),$ablo
+ ldw -8($xfer),$nmhi0
+ ldw -4($xfer),$nmlo0
+ ldw 0($xfer),$hi0 ; high part
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ fstds ${fab1},0($xfer)
+ addl $abhi,$hi0,$hi0 ; account carry bit
+ fstds ${fnm1},8($xfer)
+ add $ablo,$nmlo0,$nmlo0 ; discarded
+ ldw 0($tp),$ti1 ; tp[1]
+ addc %r0,$nmhi0,$hi1
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+ ldw 4($xfer),$ablo
+ ldw 0($xfer),$abhi
+
+L\$inner_pa11
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ flddx $idx($np),${fni} ; np[j,j+1]
+ add $hi0,$ablo,$ablo
+ ldw 4($tp),$ti0 ; tp[j]
+ addc %r0,$abhi,$abhi
+ ldw 12($xfer),$nmlo1
+ add $ti1,$ablo,$ablo
+ ldw 8($xfer),$nmhi1
+ addc %r0,$abhi,$hi0
+ fstds ${fab1},0($xfer)
+ add $ablo,$nmlo1,$nmlo1
+ fstds ${fnm1},8($xfer)
+ addc %r0,$nmhi1,$nmhi1
+ ldw -12($xfer),$ablo
+ add $hi1,$nmlo1,$nmlo1
+ ldw -16($xfer),$abhi
+ addc %r0,$nmhi1,$hi1
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ ldw 8($tp),$ti1 ; tp[j]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldw -4($xfer),$nmlo0
+ add $hi0,$ablo,$ablo
+ ldw -8($xfer),$nmhi0
+ addc %r0,$abhi,$abhi
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ add $ti0,$ablo,$ablo
+ fstds ${fab0},-16($xfer)
+ addc %r0,$abhi,$hi0
+ fstds ${fnm0},-8($xfer)
+ add $ablo,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ addc %r0,$nmhi0,$nmhi0
+ ldw 0($xfer),$abhi
+ add $hi1,$nmlo0,$nmlo0
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$inner_pa11 ; j++++
+ addc %r0,$nmhi0,$hi1
+
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
+ ldw 12($xfer),$nmlo1
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ ldw 8($xfer),$nmhi1
+ add $hi0,$ablo,$ablo
+ ldw 4($tp),$ti0 ; tp[j]
+ addc %r0,$abhi,$abhi
+ fstds ${fab1},0($xfer)
+ add $ti1,$ablo,$ablo
+ fstds ${fnm1},8($xfer)
+ addc %r0,$abhi,$hi0
+ ldw -16($xfer),$abhi
+ add $ablo,$nmlo1,$nmlo1
+ ldw -12($xfer),$ablo
+ addc %r0,$nmhi1,$nmhi1
+ ldw -8($xfer),$nmhi0
+ add $hi1,$nmlo1,$nmlo1
+ ldw -4($xfer),$nmlo0
+ addc %r0,$nmhi1,$hi1
+
+ add $hi0,$ablo,$ablo
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ addc %r0,$abhi,$abhi
+ add $ti0,$ablo,$ablo
+ ldw 8($tp),$ti1 ; tp[j]
+ addc %r0,$abhi,$hi0
+ ldw 0($xfer),$abhi
+ add $ablo,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ addc %r0,$nmhi0,$nmhi0
+ ldws,mb 8($xfer),$nmhi1
+ add $hi1,$nmlo0,$nmlo0
+ ldw 4($xfer),$nmlo1
+ addc %r0,$nmhi0,$hi1
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+
+ addib,= -1,$num,L\$outerdone_pa11; i--
+ subi 0,$arrsz,$idx ; j=0
+
+ fldws,ma 4($bp),${fbi} ; bp[i]
+ flddx $idx($ap),${fai} ; ap[0]
+ add $hi0,$ablo,$ablo
+ addc %r0,$abhi,$abhi
+ flddx $idx($np),${fni} ; np[0]
+ fldws 8($xfer),${fti}R ; tp[0]
+ add $ti1,$ablo,$ablo
+ addc %r0,$abhi,$hi0
+
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
+ ldw 4($tp),$ti0 ; tp[j]
+
+ add $hi1,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$nmhi1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ add $ablo,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$hi1
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ stw $nmlo1,-4($tp) ; tp[j-1]
+
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ add $hi1,$hi0,$hi0
+ addc %r0,%r0,$hi1
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ add $ti0,$hi0,$hi0
+ addc %r0,$hi1,$hi1
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+ xmpyu ${fn0},${fab0}R,${fm0}
+
+ b L\$outer_pa11
+ ldo `$LOCALS+32+4`($fp),$tp
+
+L\$outerdone_pa11
+ add $hi0,$ablo,$ablo
+ addc %r0,$abhi,$abhi
+ add $ti1,$ablo,$ablo
+ addc %r0,$abhi,$hi0
+
+ ldw 4($tp),$ti0 ; tp[j]
+
+ add $hi1,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$nmhi1
+ add $ablo,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$hi1
+ stw $nmlo1,-4($tp) ; tp[j-1]
+
+ add $hi1,$hi0,$hi0
+ addc %r0,%r0,$hi1
+ add $ti0,$hi0,$hi0
+ addc %r0,$hi1,$hi1
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ ldo `$LOCALS+32+4`($fp),$tp
+ sub %r0,%r0,%r0 ; clear borrow
+ ldw -4($tp),$ti0
+ addl $tp,$arrsz,$tp
+L\$sub_pa11
+ ldwx $idx($np),$hi0
+ subb $ti0,$hi0,$hi1
+ ldwx $idx($tp),$ti0
+ addib,<> 4,$idx,L\$sub_pa11
+ stws,ma $hi1,4($rp)
+
+ subb $ti0,%r0,$hi1
+ ldo -4($tp),$tp
+ and $tp,$hi1,$ap
+ andcm $rp,$hi1,$bp
+ or $ap,$bp,$np
+
+ sub $rp,$arrsz,$rp ; rewind rp
+ subi 0,$arrsz,$idx
+ ldo `$LOCALS+32`($fp),$tp
+L\$copy_pa11
+ ldwx $idx($np),$hi0
+ stws,ma %r0,4($tp)
+ addib,<> 4,$idx,L\$copy_pa11
+ stws,ma $hi0,4($rp)
+
+ nop ; alignment
+L\$done
+___
+}
+
+$code.=<<___;
+ ldi 1,%r28 ; signal "handled"
+ ldo $FRAME($fp),%sp ; destroy tp[num+1]
+
+ $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue
+ $POP `-$FRAME+1*$SIZE_T`(%sp),%r4
+ $POP `-$FRAME+2*$SIZE_T`(%sp),%r5
+ $POP `-$FRAME+3*$SIZE_T`(%sp),%r6
+ $POP `-$FRAME+4*$SIZE_T`(%sp),%r7
+ $POP `-$FRAME+5*$SIZE_T`(%sp),%r8
+ $POP `-$FRAME+6*$SIZE_T`(%sp),%r9
+ $POP `-$FRAME+7*$SIZE_T`(%sp),%r10
+L\$abort
+ bv (%r2)
+ .EXIT
+ $POPMB -$FRAME(%sp),%r3
+ .PROCEND
+ .STRINGZ "Montgomery Multiplication for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+# Explicitly encode PA-RISC 2.0 instructions used in this module, so
+# that it can be compiled with .LEVEL 1.0. It should be noted that I
+# wouldn't have to do this, if GNU assembler understood .ALLOW 2.0
+# directive...
+
+my $ldd = sub {
+ my ($mod,$args) = @_;
+ my $orig = "ldd$mod\t$args";
+
+ if ($args =~ /%r([0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 4
+ { my $opcode=(0x03<<26)|($2<<21)|($1<<16)|(3<<6)|$3;
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ elsif ($args =~ /(\-?[0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 5
+ { my $opcode=(0x03<<26)|($2<<21)|(1<<12)|(3<<6)|$3;
+ $opcode|=(($1&0xF)<<17)|(($1&0x10)<<12); # encode offset
+ $opcode|=(1<<5) if ($mod =~ /^,m/);
+ $opcode|=(1<<13) if ($mod =~ /^,mb/);
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $std = sub {
+ my ($mod,$args) = @_;
+ my $orig = "std$mod\t$args";
+
+ if ($args =~ /%r([0-9]+),(\-?[0-9]+)\(%r([0-9]+)\)/) # format 6
+ { my $opcode=(0x03<<26)|($3<<21)|($1<<16)|(1<<12)|(0xB<<6);
+ $opcode|=(($2&0xF)<<1)|(($2&0x10)>>4); # encode offset
+ $opcode|=(1<<5) if ($mod =~ /^,m/);
+ $opcode|=(1<<13) if ($mod =~ /^,mb/);
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $extrd = sub {
+ my ($mod,$args) = @_;
+ my $orig = "extrd$mod\t$args";
+
+ # I only have ",u" completer, it's implicitly encoded...
+ if ($args =~ /%r([0-9]+),([0-9]+),([0-9]+),%r([0-9]+)/) # format 15
+ { my $opcode=(0x36<<26)|($1<<21)|($4<<16);
+ my $len=32-$3;
+ $opcode |= (($2&0x20)<<6)|(($2&0x1f)<<5); # encode pos
+ $opcode |= (($len&0x20)<<7)|($len&0x1f); # encode len
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ elsif ($args =~ /%r([0-9]+),%sar,([0-9]+),%r([0-9]+)/) # format 12
+ { my $opcode=(0x34<<26)|($1<<21)|($3<<16)|(2<<11)|(1<<9);
+ my $len=32-$2;
+ $opcode |= (($len&0x20)<<3)|($len&0x1f); # encode len
+ $opcode |= (1<<13) if ($mod =~ /,\**=/);
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $shrpd = sub {
+ my ($mod,$args) = @_;
+ my $orig = "shrpd$mod\t$args";
+
+ if ($args =~ /%r([0-9]+),%r([0-9]+),([0-9]+),%r([0-9]+)/) # format 14
+ { my $opcode=(0x34<<26)|($2<<21)|($1<<16)|(1<<10)|$4;
+ my $cpos=63-$3;
+ $opcode |= (($cpos&0x20)<<6)|(($cpos&0x1f)<<5); # encode sa
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $sub = sub {
+ my ($mod,$args) = @_;
+ my $orig = "sub$mod\t$args";
+
+ if ($mod eq ",db" && $args =~ /%r([0-9]+),%r([0-9]+),%r([0-9]+)/) {
+ my $opcode=(0x02<<26)|($2<<21)|($1<<16)|$3;
+ $opcode|=(1<<10); # e1
+ $opcode|=(1<<8); # e2
+ $opcode|=(1<<5); # d
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig
+ }
+ else { "\t".$orig; }
+};
+
+sub assemble {
+ my ($mnemonic,$mod,$args)=@_;
+ my $opcode = eval("\$$mnemonic");
+
+ ref($opcode) eq 'CODE' ? &$opcode($mod,$args) : "\t$mnemonic$mod\t$args";
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+ # flip word order in 64-bit mode...
+ s/(xmpyu\s+)($fai|$fni)([LR])/$1.$2.($3 eq "L"?"R":"L")/e if ($BN_SZ==8);
+ # assemble 2.0 instructions in 32-bit mode...
+ s/^\s+([a-z]+)([\S]*)\s+([\S]*)/&assemble($1,$2,$3)/e if ($BN_SZ==4);
+
+ print $_,"\n";
+}
+close STDOUT;
diff --git a/crypto/bn/asm/ppc-mont.pl b/crypto/bn/asm/ppc-mont.pl
new file mode 100755
index 000000000000..f9b6992ccc82
--- /dev/null
+++ b/crypto/bn/asm/ppc-mont.pl
@@ -0,0 +1,334 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# April 2006
+
+# "Teaser" Montgomery multiplication module for PowerPC. It's possible
+# to gain a bit more by modulo-scheduling outer loop, then dedicated
+# squaring procedure should give further 20% and code can be adapted
+# for 32-bit application running on 64-bit CPU. As for the latter.
+# It won't be able to achieve "native" 64-bit performance, because in
+# 32-bit application context every addc instruction will have to be
+# expanded as addc, twice right shift by 32 and finally adde, etc.
+# So far RSA *sign* performance improvement over pre-bn_mul_mont asm
+# for 64-bit application running on PPC970/G5 is:
+#
+# 512-bit +65%
+# 1024-bit +35%
+# 2048-bit +18%
+# 4096-bit +4%
+
+$flavour = shift;
+
+if ($flavour =~ /32/) {
+ $BITS= 32;
+ $BNSZ= $BITS/8;
+ $SIZE_T=4;
+ $RZONE= 224;
+
+ $LD= "lwz"; # load
+ $LDU= "lwzu"; # load and update
+ $LDX= "lwzx"; # load indexed
+ $ST= "stw"; # store
+ $STU= "stwu"; # store and update
+ $STX= "stwx"; # store indexed
+ $STUX= "stwux"; # store indexed and update
+ $UMULL= "mullw"; # unsigned multiply low
+ $UMULH= "mulhwu"; # unsigned multiply high
+ $UCMP= "cmplw"; # unsigned compare
+ $SHRI= "srwi"; # unsigned shift right by immediate
+ $PUSH= $ST;
+ $POP= $LD;
+} elsif ($flavour =~ /64/) {
+ $BITS= 64;
+ $BNSZ= $BITS/8;
+ $SIZE_T=8;
+ $RZONE= 288;
+
+ # same as above, but 64-bit mnemonics...
+ $LD= "ld"; # load
+ $LDU= "ldu"; # load and update
+ $LDX= "ldx"; # load indexed
+ $ST= "std"; # store
+ $STU= "stdu"; # store and update
+ $STX= "stdx"; # store indexed
+ $STUX= "stdux"; # store indexed and update
+ $UMULL= "mulld"; # unsigned multiply low
+ $UMULH= "mulhdu"; # unsigned multiply high
+ $UCMP= "cmpld"; # unsigned compare
+ $SHRI= "srdi"; # unsigned shift right by immediate
+ $PUSH= $ST;
+ $POP= $LD;
+} else { die "nonsense $flavour"; }
+
+$FRAME=8*$SIZE_T+$RZONE;
+$LOCALS=8*$SIZE_T;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$sp="r1";
+$toc="r2";
+$rp="r3"; $ovf="r3";
+$ap="r4";
+$bp="r5";
+$np="r6";
+$n0="r7";
+$num="r8";
+$rp="r9"; # $rp is reassigned
+$aj="r10";
+$nj="r11";
+$tj="r12";
+# non-volatile registers
+$i="r20";
+$j="r21";
+$tp="r22";
+$m0="r23";
+$m1="r24";
+$lo0="r25";
+$hi0="r26";
+$lo1="r27";
+$hi1="r28";
+$alo="r29";
+$ahi="r30";
+$nlo="r31";
+#
+$nhi="r0";
+
+$code=<<___;
+.machine "any"
+.text
+
+.globl .bn_mul_mont_int
+.align 4
+.bn_mul_mont_int:
+ cmpwi $num,4
+ mr $rp,r3 ; $rp is reassigned
+ li r3,0
+ bltlr
+___
+$code.=<<___ if ($BNSZ==4);
+ cmpwi $num,32 ; longer key performance is not better
+ bgelr
+___
+$code.=<<___;
+ slwi $num,$num,`log($BNSZ)/log(2)`
+ li $tj,-4096
+ addi $ovf,$num,$FRAME
+ subf $ovf,$ovf,$sp ; $sp-$ovf
+ and $ovf,$ovf,$tj ; minimize TLB usage
+ subf $ovf,$sp,$ovf ; $ovf-$sp
+ mr $tj,$sp
+ srwi $num,$num,`log($BNSZ)/log(2)`
+ $STUX $sp,$sp,$ovf
+
+ $PUSH r20,`-12*$SIZE_T`($tj)
+ $PUSH r21,`-11*$SIZE_T`($tj)
+ $PUSH r22,`-10*$SIZE_T`($tj)
+ $PUSH r23,`-9*$SIZE_T`($tj)
+ $PUSH r24,`-8*$SIZE_T`($tj)
+ $PUSH r25,`-7*$SIZE_T`($tj)
+ $PUSH r26,`-6*$SIZE_T`($tj)
+ $PUSH r27,`-5*$SIZE_T`($tj)
+ $PUSH r28,`-4*$SIZE_T`($tj)
+ $PUSH r29,`-3*$SIZE_T`($tj)
+ $PUSH r30,`-2*$SIZE_T`($tj)
+ $PUSH r31,`-1*$SIZE_T`($tj)
+
+ $LD $n0,0($n0) ; pull n0[0] value
+ addi $num,$num,-2 ; adjust $num for counter register
+
+ $LD $m0,0($bp) ; m0=bp[0]
+ $LD $aj,0($ap) ; ap[0]
+ addi $tp,$sp,$LOCALS
+ $UMULL $lo0,$aj,$m0 ; ap[0]*bp[0]
+ $UMULH $hi0,$aj,$m0
+
+ $LD $aj,$BNSZ($ap) ; ap[1]
+ $LD $nj,0($np) ; np[0]
+
+ $UMULL $m1,$lo0,$n0 ; "tp[0]"*n0
+
+ $UMULL $alo,$aj,$m0 ; ap[1]*bp[0]
+ $UMULH $ahi,$aj,$m0
+
+ $UMULL $lo1,$nj,$m1 ; np[0]*m1
+ $UMULH $hi1,$nj,$m1
+ $LD $nj,$BNSZ($np) ; np[1]
+ addc $lo1,$lo1,$lo0
+ addze $hi1,$hi1
+
+ $UMULL $nlo,$nj,$m1 ; np[1]*m1
+ $UMULH $nhi,$nj,$m1
+
+ mtctr $num
+ li $j,`2*$BNSZ`
+.align 4
+L1st:
+ $LDX $aj,$ap,$j ; ap[j]
+ addc $lo0,$alo,$hi0
+ $LDX $nj,$np,$j ; np[j]
+ addze $hi0,$ahi
+ $UMULL $alo,$aj,$m0 ; ap[j]*bp[0]
+ addc $lo1,$nlo,$hi1
+ $UMULH $ahi,$aj,$m0
+ addze $hi1,$nhi
+ $UMULL $nlo,$nj,$m1 ; np[j]*m1
+ addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
+ $UMULH $nhi,$nj,$m1
+ addze $hi1,$hi1
+ $ST $lo1,0($tp) ; tp[j-1]
+
+ addi $j,$j,$BNSZ ; j++
+ addi $tp,$tp,$BNSZ ; tp++
+ bdnz- L1st
+;L1st
+ addc $lo0,$alo,$hi0
+ addze $hi0,$ahi
+
+ addc $lo1,$nlo,$hi1
+ addze $hi1,$nhi
+ addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
+ addze $hi1,$hi1
+ $ST $lo1,0($tp) ; tp[j-1]
+
+ li $ovf,0
+ addc $hi1,$hi1,$hi0
+ addze $ovf,$ovf ; upmost overflow bit
+ $ST $hi1,$BNSZ($tp)
+
+ li $i,$BNSZ
+.align 4
+Louter:
+ $LDX $m0,$bp,$i ; m0=bp[i]
+ $LD $aj,0($ap) ; ap[0]
+ addi $tp,$sp,$LOCALS
+ $LD $tj,$LOCALS($sp); tp[0]
+ $UMULL $lo0,$aj,$m0 ; ap[0]*bp[i]
+ $UMULH $hi0,$aj,$m0
+ $LD $aj,$BNSZ($ap) ; ap[1]
+ $LD $nj,0($np) ; np[0]
+ addc $lo0,$lo0,$tj ; ap[0]*bp[i]+tp[0]
+ $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
+ addze $hi0,$hi0
+ $UMULL $m1,$lo0,$n0 ; tp[0]*n0
+ $UMULH $ahi,$aj,$m0
+ $UMULL $lo1,$nj,$m1 ; np[0]*m1
+ $UMULH $hi1,$nj,$m1
+ $LD $nj,$BNSZ($np) ; np[1]
+ addc $lo1,$lo1,$lo0
+ $UMULL $nlo,$nj,$m1 ; np[1]*m1
+ addze $hi1,$hi1
+ $UMULH $nhi,$nj,$m1
+
+ mtctr $num
+ li $j,`2*$BNSZ`
+.align 4
+Linner:
+ $LDX $aj,$ap,$j ; ap[j]
+ addc $lo0,$alo,$hi0
+ $LD $tj,$BNSZ($tp) ; tp[j]
+ addze $hi0,$ahi
+ $LDX $nj,$np,$j ; np[j]
+ addc $lo1,$nlo,$hi1
+ $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
+ addze $hi1,$nhi
+ $UMULH $ahi,$aj,$m0
+ addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
+ $UMULL $nlo,$nj,$m1 ; np[j]*m1
+ addze $hi0,$hi0
+ $UMULH $nhi,$nj,$m1
+ addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
+ addi $j,$j,$BNSZ ; j++
+ addze $hi1,$hi1
+ $ST $lo1,0($tp) ; tp[j-1]
+ addi $tp,$tp,$BNSZ ; tp++
+ bdnz- Linner
+;Linner
+ $LD $tj,$BNSZ($tp) ; tp[j]
+ addc $lo0,$alo,$hi0
+ addze $hi0,$ahi
+ addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
+ addze $hi0,$hi0
+
+ addc $lo1,$nlo,$hi1
+ addze $hi1,$nhi
+ addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
+ addze $hi1,$hi1
+ $ST $lo1,0($tp) ; tp[j-1]
+
+ addic $ovf,$ovf,-1 ; move upmost overflow to XER[CA]
+ li $ovf,0
+ adde $hi1,$hi1,$hi0
+ addze $ovf,$ovf
+ $ST $hi1,$BNSZ($tp)
+;
+ slwi $tj,$num,`log($BNSZ)/log(2)`
+ $UCMP $i,$tj
+ addi $i,$i,$BNSZ
+ ble- Louter
+
+ addi $num,$num,2 ; restore $num
+ subfc $j,$j,$j ; j=0 and "clear" XER[CA]
+ addi $tp,$sp,$LOCALS
+ mtctr $num
+
+.align 4
+Lsub: $LDX $tj,$tp,$j
+ $LDX $nj,$np,$j
+ subfe $aj,$nj,$tj ; tp[j]-np[j]
+ $STX $aj,$rp,$j
+ addi $j,$j,$BNSZ
+ bdnz- Lsub
+
+ li $j,0
+ mtctr $num
+ subfe $ovf,$j,$ovf ; handle upmost overflow bit
+ and $ap,$tp,$ovf
+ andc $np,$rp,$ovf
+ or $ap,$ap,$np ; ap=borrow?tp:rp
+
+.align 4
+Lcopy: ; copy or in-place refresh
+ $LDX $tj,$ap,$j
+ $STX $tj,$rp,$j
+ $STX $j,$tp,$j ; zap at once
+ addi $j,$j,$BNSZ
+ bdnz- Lcopy
+
+ $POP $tj,0($sp)
+ li r3,1
+ $POP r20,`-12*$SIZE_T`($tj)
+ $POP r21,`-11*$SIZE_T`($tj)
+ $POP r22,`-10*$SIZE_T`($tj)
+ $POP r23,`-9*$SIZE_T`($tj)
+ $POP r24,`-8*$SIZE_T`($tj)
+ $POP r25,`-7*$SIZE_T`($tj)
+ $POP r26,`-6*$SIZE_T`($tj)
+ $POP r27,`-5*$SIZE_T`($tj)
+ $POP r28,`-4*$SIZE_T`($tj)
+ $POP r29,`-3*$SIZE_T`($tj)
+ $POP r30,`-2*$SIZE_T`($tj)
+ $POP r31,`-1*$SIZE_T`($tj)
+ mr $sp,$tj
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,12,6,0
+ .long 0
+
+.asciz "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/crypto/bn/asm/ppc.pl b/crypto/bn/asm/ppc.pl
index 84448836e3dd..1249ce229988 100644
--- a/crypto/bn/asm/ppc.pl
+++ b/crypto/bn/asm/ppc.pl
@@ -100,9 +100,9 @@
# me a note at schari@us.ibm.com
#
-$opf = shift;
+$flavour = shift;
-if ($opf =~ /32\.s/) {
+if ($flavour =~ /32/) {
$BITS= 32;
$BNSZ= $BITS/8;
$ISA= "\"ppc\"";
@@ -125,7 +125,7 @@ if ($opf =~ /32\.s/) {
$INSR= "insrwi"; # insert right
$ROTL= "rotlwi"; # rotate left by immediate
$TR= "tw"; # conditional trap
-} elsif ($opf =~ /64\.s/) {
+} elsif ($flavour =~ /64/) {
$BITS= 64;
$BNSZ= $BITS/8;
$ISA= "\"ppc64\"";
@@ -149,93 +149,16 @@ if ($opf =~ /32\.s/) {
$INSR= "insrdi"; # insert right
$ROTL= "rotldi"; # rotate left by immediate
$TR= "td"; # conditional trap
-} else { die "nonsense $opf"; }
+} else { die "nonsense $flavour"; }
-( defined shift || open STDOUT,">$opf" ) || die "can't open $opf: $!";
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
-# function entry points from the AIX code
-#
-# There are other, more elegant, ways to handle this. We (IBM) chose
-# this approach as it plays well with scripts we run to 'namespace'
-# OpenSSL .i.e. we add a prefix to all the public symbols so we can
-# co-exist in the same process with other implementations of OpenSSL.
-# 'cleverer' ways of doing these substitutions tend to hide data we
-# need to be obvious.
-#
-my @items = ("bn_sqr_comba4",
- "bn_sqr_comba8",
- "bn_mul_comba4",
- "bn_mul_comba8",
- "bn_sub_words",
- "bn_add_words",
- "bn_div_words",
- "bn_sqr_words",
- "bn_mul_words",
- "bn_mul_add_words");
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
-if ($opf =~ /linux/) { do_linux(); }
-elsif ($opf =~ /aix/) { do_aix(); }
-elsif ($opf =~ /osx/) { do_osx(); }
-else { do_bsd(); }
-
-sub do_linux {
- $d=&data();
-
- if ($BITS==64) {
- foreach $t (@items) {
- $d =~ s/\.$t:/\
-\t.section\t".opd","aw"\
-\t.align\t3\
-\t.globl\t$t\
-$t:\
-\t.quad\t.$t,.TOC.\@tocbase,0\
-\t.size\t$t,24\
-\t.previous\n\
-\t.type\t.$t,\@function\
-\t.globl\t.$t\
-.$t:/g;
- }
- }
- else {
- foreach $t (@items) {
- $d=~s/\.$t/$t/g;
- }
- }
- # hide internal labels to avoid pollution of name table...
- $d=~s/Lppcasm_/.Lppcasm_/gm;
- print $d;
-}
-
-sub do_aix {
- # AIX assembler is smart enough to please the linker without
- # making us do something special...
- print &data();
-}
-
-# MacOSX 32 bit
-sub do_osx {
- $d=&data();
- # Change the bn symbol prefix from '.' to '_'
- foreach $t (@items) {
- $d=~s/\.$t/_$t/g;
- }
- # Change .machine to something OS X asm will accept
- $d=~s/\.machine.*/.text/g;
- $d=~s/\#/;/g; # change comment from '#' to ';'
- print $d;
-}
-
-# BSD (Untested)
-sub do_bsd {
- $d=&data();
- foreach $t (@items) {
- $d=~s/\.$t/_$t/g;
- }
- print $d;
-}
-
-sub data {
- local($data)=<<EOF;
+$data=<<EOF;
#--------------------------------------------------------------------
#
#
@@ -297,33 +220,20 @@ sub data {
#
# Defines to be used in the assembly code.
#
-.set r0,0 # we use it as storage for value of 0
-.set SP,1 # preserved
-.set RTOC,2 # preserved
-.set r3,3 # 1st argument/return value
-.set r4,4 # 2nd argument/volatile register
-.set r5,5 # 3rd argument/volatile register
-.set r6,6 # ...
-.set r7,7
-.set r8,8
-.set r9,9
-.set r10,10
-.set r11,11
-.set r12,12
-.set r13,13 # not used, nor any other "below" it...
-
-.set BO_IF_NOT,4
-.set BO_IF,12
-.set BO_dCTR_NZERO,16
-.set BO_dCTR_ZERO,18
-.set BO_ALWAYS,20
-.set CR0_LT,0;
-.set CR0_GT,1;
-.set CR0_EQ,2
-.set CR1_FX,4;
-.set CR1_FEX,5;
-.set CR1_VX,6
-.set LR,8
+#.set r0,0 # we use it as storage for value of 0
+#.set SP,1 # preserved
+#.set RTOC,2 # preserved
+#.set r3,3 # 1st argument/return value
+#.set r4,4 # 2nd argument/volatile register
+#.set r5,5 # 3rd argument/volatile register
+#.set r6,6 # ...
+#.set r7,7
+#.set r8,8
+#.set r9,9
+#.set r10,10
+#.set r11,11
+#.set r12,12
+#.set r13,13 # not used, nor any other "below" it...
# Declare function names to be global
# NOTE: For gcc these names MUST be changed to remove
@@ -344,7 +254,7 @@ sub data {
# .text section
- .machine $ISA
+ .machine "any"
#
# NOTE: The following label name should be changed to
@@ -478,8 +388,10 @@ sub data {
$ST r9,`6*$BNSZ`(r3) #r[6]=c1
$ST r10,`7*$BNSZ`(r3) #r[7]=c2
- bclr BO_ALWAYS,CR0_LT
- .long 0x00000000
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -903,9 +815,10 @@ sub data {
$ST r9, `15*$BNSZ`(r3) #r[15]=c1;
- bclr BO_ALWAYS,CR0_LT
-
- .long 0x00000000
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1055,8 +968,10 @@ sub data {
$ST r10,`6*$BNSZ`(r3) #r[6]=c1
$ST r11,`7*$BNSZ`(r3) #r[7]=c2
- bclr BO_ALWAYS,CR0_LT
- .long 0x00000000
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1591,8 +1506,10 @@ sub data {
adde r10,r10,r9
$ST r12,`14*$BNSZ`(r3) #r[14]=c3;
$ST r10,`15*$BNSZ`(r3) #r[15]=c1;
- bclr BO_ALWAYS,CR0_LT
- .long 0x00000000
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1623,7 +1540,7 @@ sub data {
subfc. r7,r0,r6 # If r6 is 0 then result is 0.
# if r6 > 0 then result !=0
# In either case carry bit is set.
- bc BO_IF,CR0_EQ,Lppcasm_sub_adios
+ beq Lppcasm_sub_adios
addi r4,r4,-$BNSZ
addi r3,r3,-$BNSZ
addi r5,r5,-$BNSZ
@@ -1635,13 +1552,14 @@ Lppcasm_sub_mainloop:
# if carry = 1 this is r7-r8. Else it
# is r7-r8 -1 as we need.
$STU r6,$BNSZ(r3)
- bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_sub_mainloop
+ bdnz- Lppcasm_sub_mainloop
Lppcasm_sub_adios:
subfze r3,r0 # if carry bit is set then r3 = 0 else -1
andi. r3,r3,1 # keep only last bit.
- bclr BO_ALWAYS,CR0_LT
- .long 0x00000000
-
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1670,7 +1588,7 @@ Lppcasm_sub_adios:
# check for r6 = 0. Is this needed?
#
addic. r6,r6,0 #test r6 and clear carry bit.
- bc BO_IF,CR0_EQ,Lppcasm_add_adios
+ beq Lppcasm_add_adios
addi r4,r4,-$BNSZ
addi r3,r3,-$BNSZ
addi r5,r5,-$BNSZ
@@ -1680,11 +1598,13 @@ Lppcasm_add_mainloop:
$LDU r8,$BNSZ(r5)
adde r8,r7,r8
$STU r8,$BNSZ(r3)
- bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_add_mainloop
+ bdnz- Lppcasm_add_mainloop
Lppcasm_add_adios:
addze r3,r0 #return carry bit.
- bclr BO_ALWAYS,CR0_LT
- .long 0x00000000
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1707,24 +1627,24 @@ Lppcasm_add_adios:
# r5 = d
$UCMPI 0,r5,0 # compare r5 and 0
- bc BO_IF_NOT,CR0_EQ,Lppcasm_div1 # proceed if d!=0
+ bne Lppcasm_div1 # proceed if d!=0
li r3,-1 # d=0 return -1
- bclr BO_ALWAYS,CR0_LT
+ blr
Lppcasm_div1:
xor r0,r0,r0 #r0=0
li r8,$BITS
$CNTLZ. r7,r5 #r7 = num leading 0s in d.
- bc BO_IF,CR0_EQ,Lppcasm_div2 #proceed if no leading zeros
+ beq Lppcasm_div2 #proceed if no leading zeros
subf r8,r7,r8 #r8 = BN_num_bits_word(d)
$SHR. r9,r3,r8 #are there any bits above r8'th?
$TR 16,r9,r0 #if there're, signal to dump core...
Lppcasm_div2:
$UCMP 0,r3,r5 #h>=d?
- bc BO_IF,CR0_LT,Lppcasm_div3 #goto Lppcasm_div3 if not
+ blt Lppcasm_div3 #goto Lppcasm_div3 if not
subf r3,r5,r3 #h-=d ;
Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i
cmpi 0,0,r7,0 # is (i == 0)?
- bc BO_IF,CR0_EQ,Lppcasm_div4
+ beq Lppcasm_div4
$SHL r3,r3,r7 # h = (h<< i)
$SHR r8,r4,r8 # r8 = (l >> BN_BITS2 -i)
$SHL r5,r5,r7 # d<<=i
@@ -1741,7 +1661,7 @@ Lppcasm_divouterloop:
$SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4
# compute here for innerloop.
$UCMP 0,r8,r9 # is (h>>BN_BITS4)==dh
- bc BO_IF_NOT,CR0_EQ,Lppcasm_div5 # goto Lppcasm_div5 if not
+ bne Lppcasm_div5 # goto Lppcasm_div5 if not
li r8,-1
$CLRU r8,r8,`$BITS/2` #q = BN_MASK2l
@@ -1762,9 +1682,9 @@ Lppcasm_divinnerloop:
# the following 2 instructions do that
$SHLI r7,r10,`$BITS/2` # r7 = (t<<BN_BITS4)
or r7,r7,r11 # r7|=((l&BN_MASK2h)>>BN_BITS4)
- $UCMP 1,r6,r7 # compare (tl <= r7)
- bc BO_IF_NOT,CR0_EQ,Lppcasm_divinnerexit
- bc BO_IF_NOT,CR1_FEX,Lppcasm_divinnerexit
+ $UCMP cr1,r6,r7 # compare (tl <= r7)
+ bne Lppcasm_divinnerexit
+ ble cr1,Lppcasm_divinnerexit
addi r8,r8,-1 #q--
subf r12,r9,r12 #th -=dh
$CLRU r10,r5,`$BITS/2` #r10=dl. t is no longer needed in loop.
@@ -1773,14 +1693,14 @@ Lppcasm_divinnerloop:
Lppcasm_divinnerexit:
$SHRI r10,r6,`$BITS/2` #t=(tl>>BN_BITS4)
$SHLI r11,r6,`$BITS/2` #tl=(tl<<BN_BITS4)&BN_MASK2h;
- $UCMP 1,r4,r11 # compare l and tl
+ $UCMP cr1,r4,r11 # compare l and tl
add r12,r12,r10 # th+=t
- bc BO_IF_NOT,CR1_FX,Lppcasm_div7 # if (l>=tl) goto Lppcasm_div7
+ bge cr1,Lppcasm_div7 # if (l>=tl) goto Lppcasm_div7
addi r12,r12,1 # th++
Lppcasm_div7:
subf r11,r11,r4 #r11=l-tl
- $UCMP 1,r3,r12 #compare h and th
- bc BO_IF_NOT,CR1_FX,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8
+ $UCMP cr1,r3,r12 #compare h and th
+ bge cr1,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8
addi r8,r8,-1 # q--
add r3,r5,r3 # h+=d
Lppcasm_div8:
@@ -1791,13 +1711,15 @@ Lppcasm_div8:
# the following 2 instructions will do this.
$INSR r11,r12,`$BITS/2`,`$BITS/2` # r11 is the value we want rotated $BITS/2.
$ROTL r3,r11,`$BITS/2` # rotate by $BITS/2 and store in r3
- bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_div9#if (count==0) break ;
+ bdz Lppcasm_div9 #if (count==0) break ;
$SHLI r0,r8,`$BITS/2` #ret =q<<BN_BITS4
b Lppcasm_divouterloop
Lppcasm_div9:
or r3,r8,r0
- bclr BO_ALWAYS,CR0_LT
- .long 0x00000000
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1822,7 +1744,7 @@ Lppcasm_div9:
# No unrolling done here. Not performance critical.
addic. r5,r5,0 #test r5.
- bc BO_IF,CR0_EQ,Lppcasm_sqr_adios
+ beq Lppcasm_sqr_adios
addi r4,r4,-$BNSZ
addi r3,r3,-$BNSZ
mtctr r5
@@ -1833,11 +1755,12 @@ Lppcasm_sqr_mainloop:
$UMULH r8,r6,r6
$STU r7,$BNSZ(r3)
$STU r8,$BNSZ(r3)
- bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_sqr_mainloop
+ bdnz- Lppcasm_sqr_mainloop
Lppcasm_sqr_adios:
- bclr BO_ALWAYS,CR0_LT
- .long 0x00000000
-
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1858,7 +1781,7 @@ Lppcasm_sqr_adios:
xor r0,r0,r0
xor r12,r12,r12 # used for carry
rlwinm. r7,r5,30,2,31 # num >> 2
- bc BO_IF,CR0_EQ,Lppcasm_mw_REM
+ beq Lppcasm_mw_REM
mtctr r7
Lppcasm_mw_LOOP:
#mul(rp[0],ap[0],w,c1);
@@ -1896,11 +1819,11 @@ Lppcasm_mw_LOOP:
addi r3,r3,`4*$BNSZ`
addi r4,r4,`4*$BNSZ`
- bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_mw_LOOP
+ bdnz- Lppcasm_mw_LOOP
Lppcasm_mw_REM:
andi. r5,r5,0x3
- bc BO_IF,CR0_EQ,Lppcasm_mw_OVER
+ beq Lppcasm_mw_OVER
#mul(rp[0],ap[0],w,c1);
$LD r8,`0*$BNSZ`(r4)
$UMULL r9,r6,r8
@@ -1912,7 +1835,7 @@ Lppcasm_mw_REM:
addi r5,r5,-1
cmpli 0,0,r5,0
- bc BO_IF,CR0_EQ,Lppcasm_mw_OVER
+ beq Lppcasm_mw_OVER
#mul(rp[1],ap[1],w,c1);
@@ -1926,7 +1849,7 @@ Lppcasm_mw_REM:
addi r5,r5,-1
cmpli 0,0,r5,0
- bc BO_IF,CR0_EQ,Lppcasm_mw_OVER
+ beq Lppcasm_mw_OVER
#mul_add(rp[2],ap[2],w,c1);
$LD r8,`2*$BNSZ`(r4)
@@ -1939,8 +1862,10 @@ Lppcasm_mw_REM:
Lppcasm_mw_OVER:
addi r3,r12,0
- bclr BO_ALWAYS,CR0_LT
- .long 0x00000000
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
#
# NOTE: The following label name should be changed to
@@ -1964,7 +1889,7 @@ Lppcasm_mw_OVER:
xor r0,r0,r0 #r0 = 0
xor r12,r12,r12 #r12 = 0 . used for carry
rlwinm. r7,r5,30,2,31 # num >> 2
- bc BO_IF,CR0_EQ,Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover
+ beq Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover
mtctr r7
Lppcasm_maw_mainloop:
#mul_add(rp[0],ap[0],w,c1);
@@ -2017,11 +1942,11 @@ Lppcasm_maw_mainloop:
$ST r11,`3*$BNSZ`(r3)
addi r3,r3,`4*$BNSZ`
addi r4,r4,`4*$BNSZ`
- bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_maw_mainloop
+ bdnz- Lppcasm_maw_mainloop
Lppcasm_maw_leftover:
andi. r5,r5,0x3
- bc BO_IF,CR0_EQ,Lppcasm_maw_adios
+ beq Lppcasm_maw_adios
addi r3,r3,-$BNSZ
addi r4,r4,-$BNSZ
#mul_add(rp[0],ap[0],w,c1);
@@ -2036,7 +1961,7 @@ Lppcasm_maw_leftover:
addze r12,r10
$ST r9,0(r3)
- bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_maw_adios
+ bdz Lppcasm_maw_adios
#mul_add(rp[1],ap[1],w,c1);
$LDU r8,$BNSZ(r4)
$UMULL r9,r6,r8
@@ -2048,7 +1973,7 @@ Lppcasm_maw_leftover:
addze r12,r10
$ST r9,0(r3)
- bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_maw_adios
+ bdz Lppcasm_maw_adios
#mul_add(rp[2],ap[2],w,c1);
$LDU r8,$BNSZ(r4)
$UMULL r9,r6,r8
@@ -2062,19 +1987,12 @@ Lppcasm_maw_leftover:
Lppcasm_maw_adios:
addi r3,r12,0
- bclr BO_ALWAYS,CR0_LT
- .long 0x00000000
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
.align 4
EOF
- $data =~ s/\`([^\`]*)\`/eval $1/gem;
-
- # if some assembler chokes on some simplified mnemonic,
- # this is the spot to fix it up, e.g.:
- # GNU as doesn't seem to accept cmplw, 32-bit unsigned compare
- $data =~ s/^(\s*)cmplw(\s+)([^,]+),(.*)/$1cmpl$2$3,0,$4/gm;
- # assembler X doesn't accept li, load immediate value
- #$data =~ s/^(\s*)li(\s+)([^,]+),(.*)/$1addi$2$3,0,$4/gm;
- # assembler Y chokes on apostrophes in comments
- $data =~ s/'//gm;
- return($data);
-}
+$data =~ s/\`([^\`]*)\`/eval $1/gem;
+print $data;
+close STDOUT;
diff --git a/crypto/bn/asm/ppc64-mont.pl b/crypto/bn/asm/ppc64-mont.pl
new file mode 100755
index 000000000000..a14e769ad055
--- /dev/null
+++ b/crypto/bn/asm/ppc64-mont.pl
@@ -0,0 +1,1088 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# December 2007
+
+# The reason for undertaken effort is basically following. Even though
+# Power 6 CPU operates at incredible 4.7GHz clock frequency, its PKI
+# performance was observed to be less than impressive, essentially as
+# fast as 1.8GHz PPC970, or 2.6 times(!) slower than one would hope.
+# Well, it's not surprising that IBM had to make some sacrifices to
+# boost the clock frequency that much, but no overall improvement?
+# Having observed how much difference did switching to FPU make on
+# UltraSPARC, playing same stunt on Power 6 appeared appropriate...
+# Unfortunately the resulting performance improvement is not as
+# impressive, ~30%, and in absolute terms is still very far from what
+# one would expect from 4.7GHz CPU. There is a chance that I'm doing
+# something wrong, but in the lack of assembler level micro-profiling
+# data or at least decent platform guide I can't tell... Or better
+# results might be achieved with VMX... Anyway, this module provides
+# *worse* performance on other PowerPC implementations, ~40-15% slower
+# on PPC970 depending on key length and ~40% slower on Power 5 for all
+# key lengths. As it's obviously inappropriate as "best all-round"
+# alternative, it has to be complemented with run-time CPU family
+# detection. Oh! It should also be noted that unlike other PowerPC
+# implementation IALU ppc-mont.pl module performs *suboptimaly* on
+# >=1024-bit key lengths on Power 6. It should also be noted that
+# *everything* said so far applies to 64-bit builds! As far as 32-bit
+# application executed on 64-bit CPU goes, this module is likely to
+# become preferred choice, because it's easy to adapt it for such
+# case and *is* faster than 32-bit ppc-mont.pl on *all* processors.
+
+# February 2008
+
+# Micro-profiling assisted optimization results in ~15% improvement
+# over original ppc64-mont.pl version, or overall ~50% improvement
+# over ppc.pl module on Power 6. If compared to ppc-mont.pl on same
+# Power 6 CPU, this module is 5-150% faster depending on key length,
+# [hereafter] more for longer keys. But if compared to ppc-mont.pl
+# on 1.8GHz PPC970, it's only 5-55% faster. Still far from impressive
+# in absolute terms, but it's apparently the way Power 6 is...
+
+# December 2009
+
+# Adapted for 32-bit build this module delivers 25-120%, yes, more
+# than *twice* for longer keys, performance improvement over 32-bit
+# ppc-mont.pl on 1.8GHz PPC970. However! This implementation utilizes
+# even 64-bit integer operations and the trouble is that most PPC
+# operating systems don't preserve upper halves of general purpose
+# registers upon 32-bit signal delivery. They do preserve them upon
+# context switch, but not signalling:-( This means that asynchronous
+# signals have to be blocked upon entry to this subroutine. Signal
+# masking (and of course complementary unmasking) has quite an impact
+# on performance, naturally larger for shorter keys. It's so severe
+# that 512-bit key performance can be as low as 1/3 of expected one.
+# This is why this routine can be engaged for longer key operations
+# only on these OSes, see crypto/ppccap.c for further details. MacOS X
+# is an exception from this and doesn't require signal masking, and
+# that's where above improvement coefficients were collected. For
+# others alternative would be to break dependence on upper halves of
+# GPRs by sticking to 32-bit integer operations...
+
+$flavour = shift;
+
+if ($flavour =~ /32/) {
+ $SIZE_T=4;
+ $RZONE= 224;
+ $fname= "bn_mul_mont_fpu64";
+
+ $STUX= "stwux"; # store indexed and update
+ $PUSH= "stw";
+ $POP= "lwz";
+} elsif ($flavour =~ /64/) {
+ $SIZE_T=8;
+ $RZONE= 288;
+ $fname= "bn_mul_mont_fpu64";
+
+ # same as above, but 64-bit mnemonics...
+ $STUX= "stdux"; # store indexed and update
+ $PUSH= "std";
+ $POP= "ld";
+} else { die "nonsense $flavour"; }
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=64; # padded frame header
+$TRANSFER=16*8;
+
+$carry="r0";
+$sp="r1";
+$toc="r2";
+$rp="r3"; $ovf="r3";
+$ap="r4";
+$bp="r5";
+$np="r6";
+$n0="r7";
+$num="r8";
+$rp="r9"; # $rp is reassigned
+$tp="r10";
+$j="r11";
+$i="r12";
+# non-volatile registers
+$nap_d="r22"; # interleaved ap and np in double format
+$a0="r23"; # ap[0]
+$t0="r24"; # temporary registers
+$t1="r25";
+$t2="r26";
+$t3="r27";
+$t4="r28";
+$t5="r29";
+$t6="r30";
+$t7="r31";
+
+# PPC offers enough register bank capacity to unroll inner loops twice
+#
+# ..A3A2A1A0
+# dcba
+# -----------
+# A0a
+# A0b
+# A0c
+# A0d
+# A1a
+# A1b
+# A1c
+# A1d
+# A2a
+# A2b
+# A2c
+# A2d
+# A3a
+# A3b
+# A3c
+# A3d
+# ..a
+# ..b
+#
+$ba="f0"; $bb="f1"; $bc="f2"; $bd="f3";
+$na="f4"; $nb="f5"; $nc="f6"; $nd="f7";
+$dota="f8"; $dotb="f9";
+$A0="f10"; $A1="f11"; $A2="f12"; $A3="f13";
+$N0="f20"; $N1="f21"; $N2="f22"; $N3="f23";
+$T0a="f24"; $T0b="f25";
+$T1a="f26"; $T1b="f27";
+$T2a="f28"; $T2b="f29";
+$T3a="f30"; $T3b="f31";
+
+# sp----------->+-------------------------------+
+# | saved sp |
+# +-------------------------------+
+# . .
+# +64 +-------------------------------+
+# | 16 gpr<->fpr transfer zone |
+# . .
+# . .
+# +16*8 +-------------------------------+
+# | __int64 tmp[-1] |
+# +-------------------------------+
+# | __int64 tmp[num] |
+# . .
+# . .
+# . .
+# +(num+1)*8 +-------------------------------+
+# | padding to 64 byte boundary |
+# . .
+# +X +-------------------------------+
+# | double nap_d[4*num] |
+# . .
+# . .
+# . .
+# +-------------------------------+
+# . .
+# -12*size_t +-------------------------------+
+# | 10 saved gpr, r22-r31 |
+# . .
+# . .
+# -12*8 +-------------------------------+
+# | 12 saved fpr, f20-f31 |
+# . .
+# . .
+# +-------------------------------+
+
+$code=<<___;
+.machine "any"
+.text
+
+.globl .$fname
+.align 5
+.$fname:
+ cmpwi $num,`3*8/$SIZE_T`
+ mr $rp,r3 ; $rp is reassigned
+ li r3,0 ; possible "not handled" return code
+ bltlr-
+ andi. r0,$num,`16/$SIZE_T-1` ; $num has to be "even"
+ bnelr-
+
+ slwi $num,$num,`log($SIZE_T)/log(2)` ; num*=sizeof(BN_LONG)
+ li $i,-4096
+ slwi $tp,$num,2 ; place for {an}p_{lh}[num], i.e. 4*num
+ add $tp,$tp,$num ; place for tp[num+1]
+ addi $tp,$tp,`$FRAME+$TRANSFER+8+64+$RZONE`
+ subf $tp,$tp,$sp ; $sp-$tp
+ and $tp,$tp,$i ; minimize TLB usage
+ subf $tp,$sp,$tp ; $tp-$sp
+ mr $i,$sp
+ $STUX $sp,$sp,$tp ; alloca
+
+ $PUSH r22,`-12*8-10*$SIZE_T`($i)
+ $PUSH r23,`-12*8-9*$SIZE_T`($i)
+ $PUSH r24,`-12*8-8*$SIZE_T`($i)
+ $PUSH r25,`-12*8-7*$SIZE_T`($i)
+ $PUSH r26,`-12*8-6*$SIZE_T`($i)
+ $PUSH r27,`-12*8-5*$SIZE_T`($i)
+ $PUSH r28,`-12*8-4*$SIZE_T`($i)
+ $PUSH r29,`-12*8-3*$SIZE_T`($i)
+ $PUSH r30,`-12*8-2*$SIZE_T`($i)
+ $PUSH r31,`-12*8-1*$SIZE_T`($i)
+ stfd f20,`-12*8`($i)
+ stfd f21,`-11*8`($i)
+ stfd f22,`-10*8`($i)
+ stfd f23,`-9*8`($i)
+ stfd f24,`-8*8`($i)
+ stfd f25,`-7*8`($i)
+ stfd f26,`-6*8`($i)
+ stfd f27,`-5*8`($i)
+ stfd f28,`-4*8`($i)
+ stfd f29,`-3*8`($i)
+ stfd f30,`-2*8`($i)
+ stfd f31,`-1*8`($i)
+___
+$code.=<<___ if ($SIZE_T==8);
+ ld $a0,0($ap) ; pull ap[0] value
+ ld $n0,0($n0) ; pull n0[0] value
+ ld $t3,0($bp) ; bp[0]
+___
+$code.=<<___ if ($SIZE_T==4);
+ mr $t1,$n0
+ lwz $a0,0($ap) ; pull ap[0,1] value
+ lwz $t0,4($ap)
+ lwz $n0,0($t1) ; pull n0[0,1] value
+ lwz $t1,4($t1)
+ lwz $t3,0($bp) ; bp[0,1]
+ lwz $t2,4($bp)
+ insrdi $a0,$t0,32,0
+ insrdi $n0,$t1,32,0
+ insrdi $t3,$t2,32,0
+___
+$code.=<<___;
+ addi $tp,$sp,`$FRAME+$TRANSFER+8+64`
+ li $i,-64
+ add $nap_d,$tp,$num
+ and $nap_d,$nap_d,$i ; align to 64 bytes
+
+ mulld $t7,$a0,$t3 ; ap[0]*bp[0]
+ ; nap_d is off by 1, because it's used with stfdu/lfdu
+ addi $nap_d,$nap_d,-8
+ srwi $j,$num,`3+1` ; counter register, num/2
+ mulld $t7,$t7,$n0 ; tp[0]*n0
+ addi $j,$j,-1
+ addi $tp,$sp,`$FRAME+$TRANSFER-8`
+ li $carry,0
+ mtctr $j
+
+ ; transfer bp[0] to FPU as 4x16-bit values
+ extrdi $t0,$t3,16,48
+ extrdi $t1,$t3,16,32
+ extrdi $t2,$t3,16,16
+ extrdi $t3,$t3,16,0
+ std $t0,`$FRAME+0`($sp)
+ std $t1,`$FRAME+8`($sp)
+ std $t2,`$FRAME+16`($sp)
+ std $t3,`$FRAME+24`($sp)
+ ; transfer (ap[0]*bp[0])*n0 to FPU as 4x16-bit values
+ extrdi $t4,$t7,16,48
+ extrdi $t5,$t7,16,32
+ extrdi $t6,$t7,16,16
+ extrdi $t7,$t7,16,0
+ std $t4,`$FRAME+32`($sp)
+ std $t5,`$FRAME+40`($sp)
+ std $t6,`$FRAME+48`($sp)
+ std $t7,`$FRAME+56`($sp)
+___
+$code.=<<___ if ($SIZE_T==8);
+ lwz $t0,4($ap) ; load a[j] as 32-bit word pair
+ lwz $t1,0($ap)
+ lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair
+ lwz $t3,8($ap)
+ lwz $t4,4($np) ; load n[j] as 32-bit word pair
+ lwz $t5,0($np)
+ lwz $t6,12($np) ; load n[j+1] as 32-bit word pair
+ lwz $t7,8($np)
+___
+$code.=<<___ if ($SIZE_T==4);
+ lwz $t0,0($ap) ; load a[j..j+3] as 32-bit word pairs
+ lwz $t1,4($ap)
+ lwz $t2,8($ap)
+ lwz $t3,12($ap)
+ lwz $t4,0($np) ; load n[j..j+3] as 32-bit word pairs
+ lwz $t5,4($np)
+ lwz $t6,8($np)
+ lwz $t7,12($np)
+___
+$code.=<<___;
+ lfd $ba,`$FRAME+0`($sp)
+ lfd $bb,`$FRAME+8`($sp)
+ lfd $bc,`$FRAME+16`($sp)
+ lfd $bd,`$FRAME+24`($sp)
+ lfd $na,`$FRAME+32`($sp)
+ lfd $nb,`$FRAME+40`($sp)
+ lfd $nc,`$FRAME+48`($sp)
+ lfd $nd,`$FRAME+56`($sp)
+ std $t0,`$FRAME+64`($sp)
+ std $t1,`$FRAME+72`($sp)
+ std $t2,`$FRAME+80`($sp)
+ std $t3,`$FRAME+88`($sp)
+ std $t4,`$FRAME+96`($sp)
+ std $t5,`$FRAME+104`($sp)
+ std $t6,`$FRAME+112`($sp)
+ std $t7,`$FRAME+120`($sp)
+ fcfid $ba,$ba
+ fcfid $bb,$bb
+ fcfid $bc,$bc
+ fcfid $bd,$bd
+ fcfid $na,$na
+ fcfid $nb,$nb
+ fcfid $nc,$nc
+ fcfid $nd,$nd
+
+ lfd $A0,`$FRAME+64`($sp)
+ lfd $A1,`$FRAME+72`($sp)
+ lfd $A2,`$FRAME+80`($sp)
+ lfd $A3,`$FRAME+88`($sp)
+ lfd $N0,`$FRAME+96`($sp)
+ lfd $N1,`$FRAME+104`($sp)
+ lfd $N2,`$FRAME+112`($sp)
+ lfd $N3,`$FRAME+120`($sp)
+ fcfid $A0,$A0
+ fcfid $A1,$A1
+ fcfid $A2,$A2
+ fcfid $A3,$A3
+ fcfid $N0,$N0
+ fcfid $N1,$N1
+ fcfid $N2,$N2
+ fcfid $N3,$N3
+ addi $ap,$ap,16
+ addi $np,$np,16
+
+ fmul $T1a,$A1,$ba
+ fmul $T1b,$A1,$bb
+ stfd $A0,8($nap_d) ; save a[j] in double format
+ stfd $A1,16($nap_d)
+ fmul $T2a,$A2,$ba
+ fmul $T2b,$A2,$bb
+ stfd $A2,24($nap_d) ; save a[j+1] in double format
+ stfd $A3,32($nap_d)
+ fmul $T3a,$A3,$ba
+ fmul $T3b,$A3,$bb
+ stfd $N0,40($nap_d) ; save n[j] in double format
+ stfd $N1,48($nap_d)
+ fmul $T0a,$A0,$ba
+ fmul $T0b,$A0,$bb
+ stfd $N2,56($nap_d) ; save n[j+1] in double format
+ stfdu $N3,64($nap_d)
+
+ fmadd $T1a,$A0,$bc,$T1a
+ fmadd $T1b,$A0,$bd,$T1b
+ fmadd $T2a,$A1,$bc,$T2a
+ fmadd $T2b,$A1,$bd,$T2b
+ fmadd $T3a,$A2,$bc,$T3a
+ fmadd $T3b,$A2,$bd,$T3b
+ fmul $dota,$A3,$bc
+ fmul $dotb,$A3,$bd
+
+ fmadd $T1a,$N1,$na,$T1a
+ fmadd $T1b,$N1,$nb,$T1b
+ fmadd $T2a,$N2,$na,$T2a
+ fmadd $T2b,$N2,$nb,$T2b
+ fmadd $T3a,$N3,$na,$T3a
+ fmadd $T3b,$N3,$nb,$T3b
+ fmadd $T0a,$N0,$na,$T0a
+ fmadd $T0b,$N0,$nb,$T0b
+
+ fmadd $T1a,$N0,$nc,$T1a
+ fmadd $T1b,$N0,$nd,$T1b
+ fmadd $T2a,$N1,$nc,$T2a
+ fmadd $T2b,$N1,$nd,$T2b
+ fmadd $T3a,$N2,$nc,$T3a
+ fmadd $T3b,$N2,$nd,$T3b
+ fmadd $dota,$N3,$nc,$dota
+ fmadd $dotb,$N3,$nd,$dotb
+
+ fctid $T0a,$T0a
+ fctid $T0b,$T0b
+ fctid $T1a,$T1a
+ fctid $T1b,$T1b
+ fctid $T2a,$T2a
+ fctid $T2b,$T2b
+ fctid $T3a,$T3a
+ fctid $T3b,$T3b
+
+ stfd $T0a,`$FRAME+0`($sp)
+ stfd $T0b,`$FRAME+8`($sp)
+ stfd $T1a,`$FRAME+16`($sp)
+ stfd $T1b,`$FRAME+24`($sp)
+ stfd $T2a,`$FRAME+32`($sp)
+ stfd $T2b,`$FRAME+40`($sp)
+ stfd $T3a,`$FRAME+48`($sp)
+ stfd $T3b,`$FRAME+56`($sp)
+
+.align 5
+L1st:
+___
+$code.=<<___ if ($SIZE_T==8);
+ lwz $t0,4($ap) ; load a[j] as 32-bit word pair
+ lwz $t1,0($ap)
+ lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair
+ lwz $t3,8($ap)
+ lwz $t4,4($np) ; load n[j] as 32-bit word pair
+ lwz $t5,0($np)
+ lwz $t6,12($np) ; load n[j+1] as 32-bit word pair
+ lwz $t7,8($np)
+___
+$code.=<<___ if ($SIZE_T==4);
+ lwz $t0,0($ap) ; load a[j..j+3] as 32-bit word pairs
+ lwz $t1,4($ap)
+ lwz $t2,8($ap)
+ lwz $t3,12($ap)
+ lwz $t4,0($np) ; load n[j..j+3] as 32-bit word pairs
+ lwz $t5,4($np)
+ lwz $t6,8($np)
+ lwz $t7,12($np)
+___
+$code.=<<___;
+ std $t0,`$FRAME+64`($sp)
+ std $t1,`$FRAME+72`($sp)
+ std $t2,`$FRAME+80`($sp)
+ std $t3,`$FRAME+88`($sp)
+ std $t4,`$FRAME+96`($sp)
+ std $t5,`$FRAME+104`($sp)
+ std $t6,`$FRAME+112`($sp)
+ std $t7,`$FRAME+120`($sp)
+ ld $t0,`$FRAME+0`($sp)
+ ld $t1,`$FRAME+8`($sp)
+ ld $t2,`$FRAME+16`($sp)
+ ld $t3,`$FRAME+24`($sp)
+ ld $t4,`$FRAME+32`($sp)
+ ld $t5,`$FRAME+40`($sp)
+ ld $t6,`$FRAME+48`($sp)
+ ld $t7,`$FRAME+56`($sp)
+ lfd $A0,`$FRAME+64`($sp)
+ lfd $A1,`$FRAME+72`($sp)
+ lfd $A2,`$FRAME+80`($sp)
+ lfd $A3,`$FRAME+88`($sp)
+ lfd $N0,`$FRAME+96`($sp)
+ lfd $N1,`$FRAME+104`($sp)
+ lfd $N2,`$FRAME+112`($sp)
+ lfd $N3,`$FRAME+120`($sp)
+ fcfid $A0,$A0
+ fcfid $A1,$A1
+ fcfid $A2,$A2
+ fcfid $A3,$A3
+ fcfid $N0,$N0
+ fcfid $N1,$N1
+ fcfid $N2,$N2
+ fcfid $N3,$N3
+ addi $ap,$ap,16
+ addi $np,$np,16
+
+ fmul $T1a,$A1,$ba
+ fmul $T1b,$A1,$bb
+ fmul $T2a,$A2,$ba
+ fmul $T2b,$A2,$bb
+ stfd $A0,8($nap_d) ; save a[j] in double format
+ stfd $A1,16($nap_d)
+ fmul $T3a,$A3,$ba
+ fmul $T3b,$A3,$bb
+ fmadd $T0a,$A0,$ba,$dota
+ fmadd $T0b,$A0,$bb,$dotb
+ stfd $A2,24($nap_d) ; save a[j+1] in double format
+ stfd $A3,32($nap_d)
+
+ fmadd $T1a,$A0,$bc,$T1a
+ fmadd $T1b,$A0,$bd,$T1b
+ fmadd $T2a,$A1,$bc,$T2a
+ fmadd $T2b,$A1,$bd,$T2b
+ stfd $N0,40($nap_d) ; save n[j] in double format
+ stfd $N1,48($nap_d)
+ fmadd $T3a,$A2,$bc,$T3a
+ fmadd $T3b,$A2,$bd,$T3b
+ add $t0,$t0,$carry ; can not overflow
+ fmul $dota,$A3,$bc
+ fmul $dotb,$A3,$bd
+ stfd $N2,56($nap_d) ; save n[j+1] in double format
+ stfdu $N3,64($nap_d)
+ srdi $carry,$t0,16
+ add $t1,$t1,$carry
+ srdi $carry,$t1,16
+
+ fmadd $T1a,$N1,$na,$T1a
+ fmadd $T1b,$N1,$nb,$T1b
+ insrdi $t0,$t1,16,32
+ fmadd $T2a,$N2,$na,$T2a
+ fmadd $T2b,$N2,$nb,$T2b
+ add $t2,$t2,$carry
+ fmadd $T3a,$N3,$na,$T3a
+ fmadd $T3b,$N3,$nb,$T3b
+ srdi $carry,$t2,16
+ fmadd $T0a,$N0,$na,$T0a
+ fmadd $T0b,$N0,$nb,$T0b
+ insrdi $t0,$t2,16,16
+ add $t3,$t3,$carry
+ srdi $carry,$t3,16
+
+ fmadd $T1a,$N0,$nc,$T1a
+ fmadd $T1b,$N0,$nd,$T1b
+ insrdi $t0,$t3,16,0 ; 0..63 bits
+ fmadd $T2a,$N1,$nc,$T2a
+ fmadd $T2b,$N1,$nd,$T2b
+ add $t4,$t4,$carry
+ fmadd $T3a,$N2,$nc,$T3a
+ fmadd $T3b,$N2,$nd,$T3b
+ srdi $carry,$t4,16
+ fmadd $dota,$N3,$nc,$dota
+ fmadd $dotb,$N3,$nd,$dotb
+ add $t5,$t5,$carry
+ srdi $carry,$t5,16
+ insrdi $t4,$t5,16,32
+
+ fctid $T0a,$T0a
+ fctid $T0b,$T0b
+ add $t6,$t6,$carry
+ fctid $T1a,$T1a
+ fctid $T1b,$T1b
+ srdi $carry,$t6,16
+ fctid $T2a,$T2a
+ fctid $T2b,$T2b
+ insrdi $t4,$t6,16,16
+ fctid $T3a,$T3a
+ fctid $T3b,$T3b
+ add $t7,$t7,$carry
+ insrdi $t4,$t7,16,0 ; 64..127 bits
+ srdi $carry,$t7,16 ; upper 33 bits
+
+ stfd $T0a,`$FRAME+0`($sp)
+ stfd $T0b,`$FRAME+8`($sp)
+ stfd $T1a,`$FRAME+16`($sp)
+ stfd $T1b,`$FRAME+24`($sp)
+ stfd $T2a,`$FRAME+32`($sp)
+ stfd $T2b,`$FRAME+40`($sp)
+ stfd $T3a,`$FRAME+48`($sp)
+ stfd $T3b,`$FRAME+56`($sp)
+ std $t0,8($tp) ; tp[j-1]
+ stdu $t4,16($tp) ; tp[j]
+ bdnz- L1st
+
+ fctid $dota,$dota
+ fctid $dotb,$dotb
+
+ ld $t0,`$FRAME+0`($sp)
+ ld $t1,`$FRAME+8`($sp)
+ ld $t2,`$FRAME+16`($sp)
+ ld $t3,`$FRAME+24`($sp)
+ ld $t4,`$FRAME+32`($sp)
+ ld $t5,`$FRAME+40`($sp)
+ ld $t6,`$FRAME+48`($sp)
+ ld $t7,`$FRAME+56`($sp)
+ stfd $dota,`$FRAME+64`($sp)
+ stfd $dotb,`$FRAME+72`($sp)
+
+ add $t0,$t0,$carry ; can not overflow
+ srdi $carry,$t0,16
+ add $t1,$t1,$carry
+ srdi $carry,$t1,16
+ insrdi $t0,$t1,16,32
+ add $t2,$t2,$carry
+ srdi $carry,$t2,16
+ insrdi $t0,$t2,16,16
+ add $t3,$t3,$carry
+ srdi $carry,$t3,16
+ insrdi $t0,$t3,16,0 ; 0..63 bits
+ add $t4,$t4,$carry
+ srdi $carry,$t4,16
+ add $t5,$t5,$carry
+ srdi $carry,$t5,16
+ insrdi $t4,$t5,16,32
+ add $t6,$t6,$carry
+ srdi $carry,$t6,16
+ insrdi $t4,$t6,16,16
+ add $t7,$t7,$carry
+ insrdi $t4,$t7,16,0 ; 64..127 bits
+ srdi $carry,$t7,16 ; upper 33 bits
+ ld $t6,`$FRAME+64`($sp)
+ ld $t7,`$FRAME+72`($sp)
+
+ std $t0,8($tp) ; tp[j-1]
+ stdu $t4,16($tp) ; tp[j]
+
+ add $t6,$t6,$carry ; can not overflow
+ srdi $carry,$t6,16
+ add $t7,$t7,$carry
+ insrdi $t6,$t7,48,0
+ srdi $ovf,$t7,48
+ std $t6,8($tp) ; tp[num-1]
+
+ slwi $t7,$num,2
+ subf $nap_d,$t7,$nap_d ; rewind pointer
+
+ li $i,8 ; i=1
+.align 5
+Louter:
+___
+$code.=<<___ if ($SIZE_T==8);
+ ldx $t3,$bp,$i ; bp[i]
+___
+$code.=<<___ if ($SIZE_T==4);
+ add $t0,$bp,$i
+ lwz $t3,0($t0) ; bp[i,i+1]
+ lwz $t0,4($t0)
+ insrdi $t3,$t0,32,0
+___
+$code.=<<___;
+ ld $t6,`$FRAME+$TRANSFER+8`($sp) ; tp[0]
+ mulld $t7,$a0,$t3 ; ap[0]*bp[i]
+
+ addi $tp,$sp,`$FRAME+$TRANSFER`
+ add $t7,$t7,$t6 ; ap[0]*bp[i]+tp[0]
+ li $carry,0
+ mulld $t7,$t7,$n0 ; tp[0]*n0
+ mtctr $j
+
+ ; transfer bp[i] to FPU as 4x16-bit values
+ extrdi $t0,$t3,16,48
+ extrdi $t1,$t3,16,32
+ extrdi $t2,$t3,16,16
+ extrdi $t3,$t3,16,0
+ std $t0,`$FRAME+0`($sp)
+ std $t1,`$FRAME+8`($sp)
+ std $t2,`$FRAME+16`($sp)
+ std $t3,`$FRAME+24`($sp)
+ ; transfer (ap[0]*bp[i]+tp[0])*n0 to FPU as 4x16-bit values
+ extrdi $t4,$t7,16,48
+ extrdi $t5,$t7,16,32
+ extrdi $t6,$t7,16,16
+ extrdi $t7,$t7,16,0
+ std $t4,`$FRAME+32`($sp)
+ std $t5,`$FRAME+40`($sp)
+ std $t6,`$FRAME+48`($sp)
+ std $t7,`$FRAME+56`($sp)
+
+ lfd $A0,8($nap_d) ; load a[j] in double format
+ lfd $A1,16($nap_d)
+ lfd $A2,24($nap_d) ; load a[j+1] in double format
+ lfd $A3,32($nap_d)
+ lfd $N0,40($nap_d) ; load n[j] in double format
+ lfd $N1,48($nap_d)
+ lfd $N2,56($nap_d) ; load n[j+1] in double format
+ lfdu $N3,64($nap_d)
+
+ lfd $ba,`$FRAME+0`($sp)
+ lfd $bb,`$FRAME+8`($sp)
+ lfd $bc,`$FRAME+16`($sp)
+ lfd $bd,`$FRAME+24`($sp)
+ lfd $na,`$FRAME+32`($sp)
+ lfd $nb,`$FRAME+40`($sp)
+ lfd $nc,`$FRAME+48`($sp)
+ lfd $nd,`$FRAME+56`($sp)
+
+ fcfid $ba,$ba
+ fcfid $bb,$bb
+ fcfid $bc,$bc
+ fcfid $bd,$bd
+ fcfid $na,$na
+ fcfid $nb,$nb
+ fcfid $nc,$nc
+ fcfid $nd,$nd
+
+ fmul $T1a,$A1,$ba
+ fmul $T1b,$A1,$bb
+ fmul $T2a,$A2,$ba
+ fmul $T2b,$A2,$bb
+ fmul $T3a,$A3,$ba
+ fmul $T3b,$A3,$bb
+ fmul $T0a,$A0,$ba
+ fmul $T0b,$A0,$bb
+
+ fmadd $T1a,$A0,$bc,$T1a
+ fmadd $T1b,$A0,$bd,$T1b
+ fmadd $T2a,$A1,$bc,$T2a
+ fmadd $T2b,$A1,$bd,$T2b
+ fmadd $T3a,$A2,$bc,$T3a
+ fmadd $T3b,$A2,$bd,$T3b
+ fmul $dota,$A3,$bc
+ fmul $dotb,$A3,$bd
+
+ fmadd $T1a,$N1,$na,$T1a
+ fmadd $T1b,$N1,$nb,$T1b
+ lfd $A0,8($nap_d) ; load a[j] in double format
+ lfd $A1,16($nap_d)
+ fmadd $T2a,$N2,$na,$T2a
+ fmadd $T2b,$N2,$nb,$T2b
+ lfd $A2,24($nap_d) ; load a[j+1] in double format
+ lfd $A3,32($nap_d)
+ fmadd $T3a,$N3,$na,$T3a
+ fmadd $T3b,$N3,$nb,$T3b
+ fmadd $T0a,$N0,$na,$T0a
+ fmadd $T0b,$N0,$nb,$T0b
+
+ fmadd $T1a,$N0,$nc,$T1a
+ fmadd $T1b,$N0,$nd,$T1b
+ fmadd $T2a,$N1,$nc,$T2a
+ fmadd $T2b,$N1,$nd,$T2b
+ fmadd $T3a,$N2,$nc,$T3a
+ fmadd $T3b,$N2,$nd,$T3b
+ fmadd $dota,$N3,$nc,$dota
+ fmadd $dotb,$N3,$nd,$dotb
+
+ fctid $T0a,$T0a
+ fctid $T0b,$T0b
+ fctid $T1a,$T1a
+ fctid $T1b,$T1b
+ fctid $T2a,$T2a
+ fctid $T2b,$T2b
+ fctid $T3a,$T3a
+ fctid $T3b,$T3b
+
+ stfd $T0a,`$FRAME+0`($sp)
+ stfd $T0b,`$FRAME+8`($sp)
+ stfd $T1a,`$FRAME+16`($sp)
+ stfd $T1b,`$FRAME+24`($sp)
+ stfd $T2a,`$FRAME+32`($sp)
+ stfd $T2b,`$FRAME+40`($sp)
+ stfd $T3a,`$FRAME+48`($sp)
+ stfd $T3b,`$FRAME+56`($sp)
+
+.align 5
+Linner:
+ fmul $T1a,$A1,$ba
+ fmul $T1b,$A1,$bb
+ fmul $T2a,$A2,$ba
+ fmul $T2b,$A2,$bb
+ lfd $N0,40($nap_d) ; load n[j] in double format
+ lfd $N1,48($nap_d)
+ fmul $T3a,$A3,$ba
+ fmul $T3b,$A3,$bb
+ fmadd $T0a,$A0,$ba,$dota
+ fmadd $T0b,$A0,$bb,$dotb
+ lfd $N2,56($nap_d) ; load n[j+1] in double format
+ lfdu $N3,64($nap_d)
+
+ fmadd $T1a,$A0,$bc,$T1a
+ fmadd $T1b,$A0,$bd,$T1b
+ fmadd $T2a,$A1,$bc,$T2a
+ fmadd $T2b,$A1,$bd,$T2b
+ lfd $A0,8($nap_d) ; load a[j] in double format
+ lfd $A1,16($nap_d)
+ fmadd $T3a,$A2,$bc,$T3a
+ fmadd $T3b,$A2,$bd,$T3b
+ fmul $dota,$A3,$bc
+ fmul $dotb,$A3,$bd
+ lfd $A2,24($nap_d) ; load a[j+1] in double format
+ lfd $A3,32($nap_d)
+
+ fmadd $T1a,$N1,$na,$T1a
+ fmadd $T1b,$N1,$nb,$T1b
+ ld $t0,`$FRAME+0`($sp)
+ ld $t1,`$FRAME+8`($sp)
+ fmadd $T2a,$N2,$na,$T2a
+ fmadd $T2b,$N2,$nb,$T2b
+ ld $t2,`$FRAME+16`($sp)
+ ld $t3,`$FRAME+24`($sp)
+ fmadd $T3a,$N3,$na,$T3a
+ fmadd $T3b,$N3,$nb,$T3b
+ add $t0,$t0,$carry ; can not overflow
+ ld $t4,`$FRAME+32`($sp)
+ ld $t5,`$FRAME+40`($sp)
+ fmadd $T0a,$N0,$na,$T0a
+ fmadd $T0b,$N0,$nb,$T0b
+ srdi $carry,$t0,16
+ add $t1,$t1,$carry
+ srdi $carry,$t1,16
+ ld $t6,`$FRAME+48`($sp)
+ ld $t7,`$FRAME+56`($sp)
+
+ fmadd $T1a,$N0,$nc,$T1a
+ fmadd $T1b,$N0,$nd,$T1b
+ insrdi $t0,$t1,16,32
+ ld $t1,8($tp) ; tp[j]
+ fmadd $T2a,$N1,$nc,$T2a
+ fmadd $T2b,$N1,$nd,$T2b
+ add $t2,$t2,$carry
+ fmadd $T3a,$N2,$nc,$T3a
+ fmadd $T3b,$N2,$nd,$T3b
+ srdi $carry,$t2,16
+ insrdi $t0,$t2,16,16
+ fmadd $dota,$N3,$nc,$dota
+ fmadd $dotb,$N3,$nd,$dotb
+ add $t3,$t3,$carry
+ ldu $t2,16($tp) ; tp[j+1]
+ srdi $carry,$t3,16
+ insrdi $t0,$t3,16,0 ; 0..63 bits
+ add $t4,$t4,$carry
+
+ fctid $T0a,$T0a
+ fctid $T0b,$T0b
+ srdi $carry,$t4,16
+ fctid $T1a,$T1a
+ fctid $T1b,$T1b
+ add $t5,$t5,$carry
+ fctid $T2a,$T2a
+ fctid $T2b,$T2b
+ srdi $carry,$t5,16
+ insrdi $t4,$t5,16,32
+ fctid $T3a,$T3a
+ fctid $T3b,$T3b
+ add $t6,$t6,$carry
+ srdi $carry,$t6,16
+ insrdi $t4,$t6,16,16
+
+ stfd $T0a,`$FRAME+0`($sp)
+ stfd $T0b,`$FRAME+8`($sp)
+ add $t7,$t7,$carry
+ addc $t3,$t0,$t1
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t0,$t0,32,0
+ extrdi $t1,$t1,32,0
+ adde $t0,$t0,$t1
+___
+$code.=<<___;
+ stfd $T1a,`$FRAME+16`($sp)
+ stfd $T1b,`$FRAME+24`($sp)
+ insrdi $t4,$t7,16,0 ; 64..127 bits
+ srdi $carry,$t7,16 ; upper 33 bits
+ stfd $T2a,`$FRAME+32`($sp)
+ stfd $T2b,`$FRAME+40`($sp)
+ adde $t5,$t4,$t2
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t4,$t4,32,0
+ extrdi $t2,$t2,32,0
+ adde $t4,$t4,$t2
+___
+$code.=<<___;
+ stfd $T3a,`$FRAME+48`($sp)
+ stfd $T3b,`$FRAME+56`($sp)
+ addze $carry,$carry
+ std $t3,-16($tp) ; tp[j-1]
+ std $t5,-8($tp) ; tp[j]
+ bdnz- Linner
+
+ fctid $dota,$dota
+ fctid $dotb,$dotb
+ ld $t0,`$FRAME+0`($sp)
+ ld $t1,`$FRAME+8`($sp)
+ ld $t2,`$FRAME+16`($sp)
+ ld $t3,`$FRAME+24`($sp)
+ ld $t4,`$FRAME+32`($sp)
+ ld $t5,`$FRAME+40`($sp)
+ ld $t6,`$FRAME+48`($sp)
+ ld $t7,`$FRAME+56`($sp)
+ stfd $dota,`$FRAME+64`($sp)
+ stfd $dotb,`$FRAME+72`($sp)
+
+ add $t0,$t0,$carry ; can not overflow
+ srdi $carry,$t0,16
+ add $t1,$t1,$carry
+ srdi $carry,$t1,16
+ insrdi $t0,$t1,16,32
+ add $t2,$t2,$carry
+ ld $t1,8($tp) ; tp[j]
+ srdi $carry,$t2,16
+ insrdi $t0,$t2,16,16
+ add $t3,$t3,$carry
+ ldu $t2,16($tp) ; tp[j+1]
+ srdi $carry,$t3,16
+ insrdi $t0,$t3,16,0 ; 0..63 bits
+ add $t4,$t4,$carry
+ srdi $carry,$t4,16
+ add $t5,$t5,$carry
+ srdi $carry,$t5,16
+ insrdi $t4,$t5,16,32
+ add $t6,$t6,$carry
+ srdi $carry,$t6,16
+ insrdi $t4,$t6,16,16
+ add $t7,$t7,$carry
+ insrdi $t4,$t7,16,0 ; 64..127 bits
+ srdi $carry,$t7,16 ; upper 33 bits
+ ld $t6,`$FRAME+64`($sp)
+ ld $t7,`$FRAME+72`($sp)
+
+ addc $t3,$t0,$t1
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t0,$t0,32,0
+ extrdi $t1,$t1,32,0
+ adde $t0,$t0,$t1
+___
+$code.=<<___;
+ adde $t5,$t4,$t2
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t4,$t4,32,0
+ extrdi $t2,$t2,32,0
+ adde $t4,$t4,$t2
+___
+$code.=<<___;
+ addze $carry,$carry
+
+ std $t3,-16($tp) ; tp[j-1]
+ std $t5,-8($tp) ; tp[j]
+
+ add $carry,$carry,$ovf ; comsume upmost overflow
+ add $t6,$t6,$carry ; can not overflow
+ srdi $carry,$t6,16
+ add $t7,$t7,$carry
+ insrdi $t6,$t7,48,0
+ srdi $ovf,$t7,48
+ std $t6,0($tp) ; tp[num-1]
+
+ slwi $t7,$num,2
+ addi $i,$i,8
+ subf $nap_d,$t7,$nap_d ; rewind pointer
+ cmpw $i,$num
+ blt- Louter
+___
+
+$code.=<<___ if ($SIZE_T==8);
+ subf $np,$num,$np ; rewind np
+ addi $j,$j,1 ; restore counter
+ subfc $i,$i,$i ; j=0 and "clear" XER[CA]
+ addi $tp,$sp,`$FRAME+$TRANSFER+8`
+ addi $t4,$sp,`$FRAME+$TRANSFER+16`
+ addi $t5,$np,8
+ addi $t6,$rp,8
+ mtctr $j
+
+.align 4
+Lsub: ldx $t0,$tp,$i
+ ldx $t1,$np,$i
+ ldx $t2,$t4,$i
+ ldx $t3,$t5,$i
+ subfe $t0,$t1,$t0 ; tp[j]-np[j]
+ subfe $t2,$t3,$t2 ; tp[j+1]-np[j+1]
+ stdx $t0,$rp,$i
+ stdx $t2,$t6,$i
+ addi $i,$i,16
+ bdnz- Lsub
+
+ li $i,0
+ subfe $ovf,$i,$ovf ; handle upmost overflow bit
+ and $ap,$tp,$ovf
+ andc $np,$rp,$ovf
+ or $ap,$ap,$np ; ap=borrow?tp:rp
+ addi $t7,$ap,8
+ mtctr $j
+
+.align 4
+Lcopy: ; copy or in-place refresh
+ ldx $t0,$ap,$i
+ ldx $t1,$t7,$i
+ std $i,8($nap_d) ; zap nap_d
+ std $i,16($nap_d)
+ std $i,24($nap_d)
+ std $i,32($nap_d)
+ std $i,40($nap_d)
+ std $i,48($nap_d)
+ std $i,56($nap_d)
+ stdu $i,64($nap_d)
+ stdx $t0,$rp,$i
+ stdx $t1,$t6,$i
+ stdx $i,$tp,$i ; zap tp at once
+ stdx $i,$t4,$i
+ addi $i,$i,16
+ bdnz- Lcopy
+___
+$code.=<<___ if ($SIZE_T==4);
+ subf $np,$num,$np ; rewind np
+ addi $j,$j,1 ; restore counter
+ subfc $i,$i,$i ; j=0 and "clear" XER[CA]
+ addi $tp,$sp,`$FRAME+$TRANSFER`
+ addi $np,$np,-4
+ addi $rp,$rp,-4
+ addi $ap,$sp,`$FRAME+$TRANSFER+4`
+ mtctr $j
+
+.align 4
+Lsub: ld $t0,8($tp) ; load tp[j..j+3] in 64-bit word order
+ ldu $t2,16($tp)
+ lwz $t4,4($np) ; load np[j..j+3] in 32-bit word order
+ lwz $t5,8($np)
+ lwz $t6,12($np)
+ lwzu $t7,16($np)
+ extrdi $t1,$t0,32,0
+ extrdi $t3,$t2,32,0
+ subfe $t4,$t4,$t0 ; tp[j]-np[j]
+ stw $t0,4($ap) ; save tp[j..j+3] in 32-bit word order
+ subfe $t5,$t5,$t1 ; tp[j+1]-np[j+1]
+ stw $t1,8($ap)
+ subfe $t6,$t6,$t2 ; tp[j+2]-np[j+2]
+ stw $t2,12($ap)
+ subfe $t7,$t7,$t3 ; tp[j+3]-np[j+3]
+ stwu $t3,16($ap)
+ stw $t4,4($rp)
+ stw $t5,8($rp)
+ stw $t6,12($rp)
+ stwu $t7,16($rp)
+ bdnz- Lsub
+
+ li $i,0
+ subfe $ovf,$i,$ovf ; handle upmost overflow bit
+ addi $tp,$sp,`$FRAME+$TRANSFER+4`
+ subf $rp,$num,$rp ; rewind rp
+ and $ap,$tp,$ovf
+ andc $np,$rp,$ovf
+ or $ap,$ap,$np ; ap=borrow?tp:rp
+ addi $tp,$sp,`$FRAME+$TRANSFER`
+ mtctr $j
+
+.align 4
+Lcopy: ; copy or in-place refresh
+ lwz $t0,4($ap)
+ lwz $t1,8($ap)
+ lwz $t2,12($ap)
+ lwzu $t3,16($ap)
+ std $i,8($nap_d) ; zap nap_d
+ std $i,16($nap_d)
+ std $i,24($nap_d)
+ std $i,32($nap_d)
+ std $i,40($nap_d)
+ std $i,48($nap_d)
+ std $i,56($nap_d)
+ stdu $i,64($nap_d)
+ stw $t0,4($rp)
+ stw $t1,8($rp)
+ stw $t2,12($rp)
+ stwu $t3,16($rp)
+ std $i,8($tp) ; zap tp at once
+ stdu $i,16($tp)
+ bdnz- Lcopy
+___
+
+$code.=<<___;
+ $POP $i,0($sp)
+ li r3,1 ; signal "handled"
+ $POP r22,`-12*8-10*$SIZE_T`($i)
+ $POP r23,`-12*8-9*$SIZE_T`($i)
+ $POP r24,`-12*8-8*$SIZE_T`($i)
+ $POP r25,`-12*8-7*$SIZE_T`($i)
+ $POP r26,`-12*8-6*$SIZE_T`($i)
+ $POP r27,`-12*8-5*$SIZE_T`($i)
+ $POP r28,`-12*8-4*$SIZE_T`($i)
+ $POP r29,`-12*8-3*$SIZE_T`($i)
+ $POP r30,`-12*8-2*$SIZE_T`($i)
+ $POP r31,`-12*8-1*$SIZE_T`($i)
+ lfd f20,`-12*8`($i)
+ lfd f21,`-11*8`($i)
+ lfd f22,`-10*8`($i)
+ lfd f23,`-9*8`($i)
+ lfd f24,`-8*8`($i)
+ lfd f25,`-7*8`($i)
+ lfd f26,`-6*8`($i)
+ lfd f27,`-5*8`($i)
+ lfd f28,`-4*8`($i)
+ lfd f29,`-3*8`($i)
+ lfd f30,`-2*8`($i)
+ lfd f31,`-1*8`($i)
+ mr $sp,$i
+ blr
+ .long 0
+ .byte 0,12,4,0,0x8c,10,6,0
+ .long 0
+
+.asciz "Montgomery Multiplication for PPC64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/crypto/bn/asm/s390x-gf2m.pl b/crypto/bn/asm/s390x-gf2m.pl
new file mode 100755
index 000000000000..cd9f13eca292
--- /dev/null
+++ b/crypto/bn/asm/s390x-gf2m.pl
@@ -0,0 +1,221 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
+# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
+# the time being... gcc 4.3 appeared to generate poor code, therefore
+# the effort. And indeed, the module delivers 55%-90%(*) improvement
+# on haviest ECDSA verify and ECDH benchmarks for 163- and 571-bit
+# key lengths on z990, 30%-55%(*) - on z10, and 70%-110%(*) - on z196.
+# This is for 64-bit build. In 32-bit "highgprs" case improvement is
+# even higher, for example on z990 it was measured 80%-150%. ECDSA
+# sign is modest 9%-12% faster. Keep in mind that these coefficients
+# are not ones for bn_GF2m_mul_2x2 itself, as not all CPU time is
+# burnt in it...
+#
+# (*) gcc 4.1 was observed to deliver better results than gcc 4.3,
+# so that improvement coefficients can vary from one specific
+# setup to another.
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+ $SIZE_T=4;
+ $g="";
+} else {
+ $SIZE_T=8;
+ $g="g";
+}
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$stdframe=16*$SIZE_T+4*8;
+
+$rp="%r2";
+$a1="%r3";
+$a0="%r4";
+$b1="%r5";
+$b0="%r6";
+
+$ra="%r14";
+$sp="%r15";
+
+@T=("%r0","%r1");
+@i=("%r12","%r13");
+
+($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(6..11));
+($lo,$hi,$b)=map("%r$_",(3..5)); $a=$lo; $mask=$a8;
+
+$code.=<<___;
+.text
+
+.type _mul_1x1,\@function
+.align 16
+_mul_1x1:
+ lgr $a1,$a
+ sllg $a2,$a,1
+ sllg $a4,$a,2
+ sllg $a8,$a,3
+
+ srag $lo,$a1,63 # broadcast 63rd bit
+ nihh $a1,0x1fff
+ srag @i[0],$a2,63 # broadcast 62nd bit
+ nihh $a2,0x3fff
+ srag @i[1],$a4,63 # broadcast 61st bit
+ nihh $a4,0x7fff
+ ngr $lo,$b
+ ngr @i[0],$b
+ ngr @i[1],$b
+
+ lghi @T[0],0
+ lgr $a12,$a1
+ stg @T[0],`$stdframe+0*8`($sp) # tab[0]=0
+ xgr $a12,$a2
+ stg $a1,`$stdframe+1*8`($sp) # tab[1]=a1
+ lgr $a48,$a4
+ stg $a2,`$stdframe+2*8`($sp) # tab[2]=a2
+ xgr $a48,$a8
+ stg $a12,`$stdframe+3*8`($sp) # tab[3]=a1^a2
+ xgr $a1,$a4
+
+ stg $a4,`$stdframe+4*8`($sp) # tab[4]=a4
+ xgr $a2,$a4
+ stg $a1,`$stdframe+5*8`($sp) # tab[5]=a1^a4
+ xgr $a12,$a4
+ stg $a2,`$stdframe+6*8`($sp) # tab[6]=a2^a4
+ xgr $a1,$a48
+ stg $a12,`$stdframe+7*8`($sp) # tab[7]=a1^a2^a4
+ xgr $a2,$a48
+
+ stg $a8,`$stdframe+8*8`($sp) # tab[8]=a8
+ xgr $a12,$a48
+ stg $a1,`$stdframe+9*8`($sp) # tab[9]=a1^a8
+ xgr $a1,$a4
+ stg $a2,`$stdframe+10*8`($sp) # tab[10]=a2^a8
+ xgr $a2,$a4
+ stg $a12,`$stdframe+11*8`($sp) # tab[11]=a1^a2^a8
+
+ xgr $a12,$a4
+ stg $a48,`$stdframe+12*8`($sp) # tab[12]=a4^a8
+ srlg $hi,$lo,1
+ stg $a1,`$stdframe+13*8`($sp) # tab[13]=a1^a4^a8
+ sllg $lo,$lo,63
+ stg $a2,`$stdframe+14*8`($sp) # tab[14]=a2^a4^a8
+ srlg @T[0],@i[0],2
+ stg $a12,`$stdframe+15*8`($sp) # tab[15]=a1^a2^a4^a8
+
+ lghi $mask,`0xf<<3`
+ sllg $a1,@i[0],62
+ sllg @i[0],$b,3
+ srlg @T[1],@i[1],3
+ ngr @i[0],$mask
+ sllg $a2,@i[1],61
+ srlg @i[1],$b,4-3
+ xgr $hi,@T[0]
+ ngr @i[1],$mask
+ xgr $lo,$a1
+ xgr $hi,@T[1]
+ xgr $lo,$a2
+
+ xg $lo,$stdframe(@i[0],$sp)
+ srlg @i[0],$b,8-3
+ ngr @i[0],$mask
+___
+for($n=1;$n<14;$n++) {
+$code.=<<___;
+ lg @T[1],$stdframe(@i[1],$sp)
+ srlg @i[1],$b,`($n+2)*4`-3
+ sllg @T[0],@T[1],`$n*4`
+ ngr @i[1],$mask
+ srlg @T[1],@T[1],`64-$n*4`
+ xgr $lo,@T[0]
+ xgr $hi,@T[1]
+___
+ push(@i,shift(@i)); push(@T,shift(@T));
+}
+$code.=<<___;
+ lg @T[1],$stdframe(@i[1],$sp)
+ sllg @T[0],@T[1],`$n*4`
+ srlg @T[1],@T[1],`64-$n*4`
+ xgr $lo,@T[0]
+ xgr $hi,@T[1]
+
+ lg @T[0],$stdframe(@i[0],$sp)
+ sllg @T[1],@T[0],`($n+1)*4`
+ srlg @T[0],@T[0],`64-($n+1)*4`
+ xgr $lo,@T[1]
+ xgr $hi,@T[0]
+
+ br $ra
+.size _mul_1x1,.-_mul_1x1
+
+.globl bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,\@function
+.align 16
+bn_GF2m_mul_2x2:
+ stm${g} %r3,%r15,3*$SIZE_T($sp)
+
+ lghi %r1,-$stdframe-128
+ la %r0,0($sp)
+ la $sp,0(%r1,$sp) # alloca
+ st${g} %r0,0($sp) # back chain
+___
+if ($SIZE_T==8) {
+my @r=map("%r$_",(6..9));
+$code.=<<___;
+ bras $ra,_mul_1x1 # a1·b1
+ stmg $lo,$hi,16($rp)
+
+ lg $a,`$stdframe+128+4*$SIZE_T`($sp)
+ lg $b,`$stdframe+128+6*$SIZE_T`($sp)
+ bras $ra,_mul_1x1 # a0·b0
+ stmg $lo,$hi,0($rp)
+
+ lg $a,`$stdframe+128+3*$SIZE_T`($sp)
+ lg $b,`$stdframe+128+5*$SIZE_T`($sp)
+ xg $a,`$stdframe+128+4*$SIZE_T`($sp)
+ xg $b,`$stdframe+128+6*$SIZE_T`($sp)
+ bras $ra,_mul_1x1 # (a0+a1)·(b0+b1)
+ lmg @r[0],@r[3],0($rp)
+
+ xgr $lo,$hi
+ xgr $hi,@r[1]
+ xgr $lo,@r[0]
+ xgr $hi,@r[2]
+ xgr $lo,@r[3]
+ xgr $hi,@r[3]
+ xgr $lo,$hi
+ stg $hi,16($rp)
+ stg $lo,8($rp)
+___
+} else {
+$code.=<<___;
+ sllg %r3,%r3,32
+ sllg %r5,%r5,32
+ or %r3,%r4
+ or %r5,%r6
+ bras $ra,_mul_1x1
+ rllg $lo,$lo,32
+ rllg $hi,$hi,32
+ stmg $lo,$hi,0($rp)
+___
+}
+$code.=<<___;
+ lm${g} %r6,%r15,`$stdframe+128+6*$SIZE_T`($sp)
+ br $ra
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+.string "GF(2^m) Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+print $code;
+close STDOUT;
diff --git a/crypto/bn/asm/s390x-mont.pl b/crypto/bn/asm/s390x-mont.pl
new file mode 100755
index 000000000000..9fd64e81eef3
--- /dev/null
+++ b/crypto/bn/asm/s390x-mont.pl
@@ -0,0 +1,277 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# April 2007.
+#
+# Performance improvement over vanilla C code varies from 85% to 45%
+# depending on key length and benchmark. Unfortunately in this context
+# these are not very impressive results [for code that utilizes "wide"
+# 64x64=128-bit multiplication, which is not commonly available to C
+# programmers], at least hand-coded bn_asm.c replacement is known to
+# provide 30-40% better results for longest keys. Well, on a second
+# thought it's not very surprising, because z-CPUs are single-issue
+# and _strictly_ in-order execution, while bn_mul_mont is more or less
+# dependent on CPU ability to pipe-line instructions and have several
+# of them "in-flight" at the same time. I mean while other methods,
+# for example Karatsuba, aim to minimize amount of multiplications at
+# the cost of other operations increase, bn_mul_mont aim to neatly
+# "overlap" multiplications and the other operations [and on most
+# platforms even minimize the amount of the other operations, in
+# particular references to memory]. But it's possible to improve this
+# module performance by implementing dedicated squaring code-path and
+# possibly by unrolling loops...
+
+# January 2009.
+#
+# Reschedule to minimize/avoid Address Generation Interlock hazard,
+# make inner loops counter-based.
+
+# November 2010.
+#
+# Adapt for -m31 build. If kernel supports what's called "highgprs"
+# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit
+# instructions and achieve "64-bit" performance even in 31-bit legacy
+# application context. The feature is not specific to any particular
+# processor, as long as it's "z-CPU". Latter implies that the code
+# remains z/Architecture specific. Compatibility with 32-bit BN_ULONG
+# is achieved by swapping words after 64-bit loads, follow _dswap-s.
+# On z990 it was measured to perform 2.6-2.2 times better than
+# compiler-generated code, less for longer keys...
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+ $SIZE_T=4;
+ $g="";
+} else {
+ $SIZE_T=8;
+ $g="g";
+}
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$stdframe=16*$SIZE_T+4*8;
+
+$mn0="%r0";
+$num="%r1";
+
+# int bn_mul_mont(
+$rp="%r2"; # BN_ULONG *rp,
+$ap="%r3"; # const BN_ULONG *ap,
+$bp="%r4"; # const BN_ULONG *bp,
+$np="%r5"; # const BN_ULONG *np,
+$n0="%r6"; # const BN_ULONG *n0,
+#$num="160(%r15)" # int num);
+
+$bi="%r2"; # zaps rp
+$j="%r7";
+
+$ahi="%r8";
+$alo="%r9";
+$nhi="%r10";
+$nlo="%r11";
+$AHI="%r12";
+$NHI="%r13";
+$count="%r14";
+$sp="%r15";
+
+$code.=<<___;
+.text
+.globl bn_mul_mont
+.type bn_mul_mont,\@function
+bn_mul_mont:
+ lgf $num,`$stdframe+$SIZE_T-4`($sp) # pull $num
+ sla $num,`log($SIZE_T)/log(2)` # $num to enumerate bytes
+ la $bp,0($num,$bp)
+
+ st${g} %r2,2*$SIZE_T($sp)
+
+ cghi $num,16 #
+ lghi %r2,0 #
+ blr %r14 # if($num<16) return 0;
+___
+$code.=<<___ if ($flavour =~ /3[12]/);
+ tmll $num,4
+ bnzr %r14 # if ($num&1) return 0;
+___
+$code.=<<___ if ($flavour !~ /3[12]/);
+ cghi $num,96 #
+ bhr %r14 # if($num>96) return 0;
+___
+$code.=<<___;
+ stm${g} %r3,%r15,3*$SIZE_T($sp)
+
+ lghi $rp,-$stdframe-8 # leave room for carry bit
+ lcgr $j,$num # -$num
+ lgr %r0,$sp
+ la $rp,0($rp,$sp)
+ la $sp,0($j,$rp) # alloca
+ st${g} %r0,0($sp) # back chain
+
+ sra $num,3 # restore $num
+ la $bp,0($j,$bp) # restore $bp
+ ahi $num,-1 # adjust $num for inner loop
+ lg $n0,0($n0) # pull n0
+ _dswap $n0
+
+ lg $bi,0($bp)
+ _dswap $bi
+ lg $alo,0($ap)
+ _dswap $alo
+ mlgr $ahi,$bi # ap[0]*bp[0]
+ lgr $AHI,$ahi
+
+ lgr $mn0,$alo # "tp[0]"*n0
+ msgr $mn0,$n0
+
+ lg $nlo,0($np) #
+ _dswap $nlo
+ mlgr $nhi,$mn0 # np[0]*m1
+ algr $nlo,$alo # +="tp[0]"
+ lghi $NHI,0
+ alcgr $NHI,$nhi
+
+ la $j,8(%r0) # j=1
+ lr $count,$num
+
+.align 16
+.L1st:
+ lg $alo,0($j,$ap)
+ _dswap $alo
+ mlgr $ahi,$bi # ap[j]*bp[0]
+ algr $alo,$AHI
+ lghi $AHI,0
+ alcgr $AHI,$ahi
+
+ lg $nlo,0($j,$np)
+ _dswap $nlo
+ mlgr $nhi,$mn0 # np[j]*m1
+ algr $nlo,$NHI
+ lghi $NHI,0
+ alcgr $nhi,$NHI # +="tp[j]"
+ algr $nlo,$alo
+ alcgr $NHI,$nhi
+
+ stg $nlo,$stdframe-8($j,$sp) # tp[j-1]=
+ la $j,8($j) # j++
+ brct $count,.L1st
+
+ algr $NHI,$AHI
+ lghi $AHI,0
+ alcgr $AHI,$AHI # upmost overflow bit
+ stg $NHI,$stdframe-8($j,$sp)
+ stg $AHI,$stdframe($j,$sp)
+ la $bp,8($bp) # bp++
+
+.Louter:
+ lg $bi,0($bp) # bp[i]
+ _dswap $bi
+ lg $alo,0($ap)
+ _dswap $alo
+ mlgr $ahi,$bi # ap[0]*bp[i]
+ alg $alo,$stdframe($sp) # +=tp[0]
+ lghi $AHI,0
+ alcgr $AHI,$ahi
+
+ lgr $mn0,$alo
+ msgr $mn0,$n0 # tp[0]*n0
+
+ lg $nlo,0($np) # np[0]
+ _dswap $nlo
+ mlgr $nhi,$mn0 # np[0]*m1
+ algr $nlo,$alo # +="tp[0]"
+ lghi $NHI,0
+ alcgr $NHI,$nhi
+
+ la $j,8(%r0) # j=1
+ lr $count,$num
+
+.align 16
+.Linner:
+ lg $alo,0($j,$ap)
+ _dswap $alo
+ mlgr $ahi,$bi # ap[j]*bp[i]
+ algr $alo,$AHI
+ lghi $AHI,0
+ alcgr $ahi,$AHI
+ alg $alo,$stdframe($j,$sp)# +=tp[j]
+ alcgr $AHI,$ahi
+
+ lg $nlo,0($j,$np)
+ _dswap $nlo
+ mlgr $nhi,$mn0 # np[j]*m1
+ algr $nlo,$NHI
+ lghi $NHI,0
+ alcgr $nhi,$NHI
+ algr $nlo,$alo # +="tp[j]"
+ alcgr $NHI,$nhi
+
+ stg $nlo,$stdframe-8($j,$sp) # tp[j-1]=
+ la $j,8($j) # j++
+ brct $count,.Linner
+
+ algr $NHI,$AHI
+ lghi $AHI,0
+ alcgr $AHI,$AHI
+ alg $NHI,$stdframe($j,$sp)# accumulate previous upmost overflow bit
+ lghi $ahi,0
+ alcgr $AHI,$ahi # new upmost overflow bit
+ stg $NHI,$stdframe-8($j,$sp)
+ stg $AHI,$stdframe($j,$sp)
+
+ la $bp,8($bp) # bp++
+ cl${g} $bp,`$stdframe+8+4*$SIZE_T`($j,$sp) # compare to &bp[num]
+ jne .Louter
+
+ l${g} $rp,`$stdframe+8+2*$SIZE_T`($j,$sp) # reincarnate rp
+ la $ap,$stdframe($sp)
+ ahi $num,1 # restore $num, incidentally clears "borrow"
+
+ la $j,0(%r0)
+ lr $count,$num
+.Lsub: lg $alo,0($j,$ap)
+ lg $nlo,0($j,$np)
+ _dswap $nlo
+ slbgr $alo,$nlo
+ stg $alo,0($j,$rp)
+ la $j,8($j)
+ brct $count,.Lsub
+ lghi $ahi,0
+ slbgr $AHI,$ahi # handle upmost carry
+
+ ngr $ap,$AHI
+ lghi $np,-1
+ xgr $np,$AHI
+ ngr $np,$rp
+ ogr $ap,$np # ap=borrow?tp:rp
+
+ la $j,0(%r0)
+ lgr $count,$num
+.Lcopy: lg $alo,0($j,$ap) # copy or in-place refresh
+ _dswap $alo
+ stg $j,$stdframe($j,$sp) # zap tp
+ stg $alo,0($j,$rp)
+ la $j,8($j)
+ brct $count,.Lcopy
+
+ la %r1,`$stdframe+8+6*$SIZE_T`($j,$sp)
+ lm${g} %r6,%r15,0(%r1)
+ lghi %r2,1 # signal "processed"
+ br %r14
+.size bn_mul_mont,.-bn_mul_mont
+.string "Montgomery Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+ s/_dswap\s+(%r[0-9]+)/sprintf("rllg\t%s,%s,32",$1,$1) if($SIZE_T==4)/e;
+ print $_,"\n";
+}
+close STDOUT;
diff --git a/crypto/bn/asm/s390x.S b/crypto/bn/asm/s390x.S
new file mode 100755
index 000000000000..43fcb79bc011
--- /dev/null
+++ b/crypto/bn/asm/s390x.S
@@ -0,0 +1,678 @@
+.ident "s390x.S, version 1.1"
+// ====================================================================
+// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+// project.
+//
+// Rights for redistribution and usage in source and binary forms are
+// granted according to the OpenSSL license. Warranty of any kind is
+// disclaimed.
+// ====================================================================
+
+.text
+
+#define zero %r0
+
+// BN_ULONG bn_mul_add_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5);
+.globl bn_mul_add_words
+.type bn_mul_add_words,@function
+.align 4
+bn_mul_add_words:
+ lghi zero,0 // zero = 0
+ la %r1,0(%r2) // put rp aside
+ lghi %r2,0 // i=0;
+ ltgfr %r4,%r4
+ bler %r14 // if (len<=0) return 0;
+
+ stmg %r6,%r10,48(%r15)
+ lghi %r10,3
+ lghi %r8,0 // carry = 0
+ nr %r10,%r4 // len%4
+ sra %r4,2 // cnt=len/4
+ jz .Loop1_madd // carry is incidentally cleared if branch taken
+ algr zero,zero // clear carry
+
+.Loop4_madd:
+ lg %r7,0(%r2,%r3) // ap[i]
+ mlgr %r6,%r5 // *=w
+ alcgr %r7,%r8 // +=carry
+ alcgr %r6,zero
+ alg %r7,0(%r2,%r1) // +=rp[i]
+ stg %r7,0(%r2,%r1) // rp[i]=
+
+ lg %r9,8(%r2,%r3)
+ mlgr %r8,%r5
+ alcgr %r9,%r6
+ alcgr %r8,zero
+ alg %r9,8(%r2,%r1)
+ stg %r9,8(%r2,%r1)
+
+ lg %r7,16(%r2,%r3)
+ mlgr %r6,%r5
+ alcgr %r7,%r8
+ alcgr %r6,zero
+ alg %r7,16(%r2,%r1)
+ stg %r7,16(%r2,%r1)
+
+ lg %r9,24(%r2,%r3)
+ mlgr %r8,%r5
+ alcgr %r9,%r6
+ alcgr %r8,zero
+ alg %r9,24(%r2,%r1)
+ stg %r9,24(%r2,%r1)
+
+ la %r2,32(%r2) // i+=4
+ brct %r4,.Loop4_madd
+
+ la %r10,1(%r10) // see if len%4 is zero ...
+ brct %r10,.Loop1_madd // without touching condition code:-)
+
+.Lend_madd:
+ alcgr %r8,zero // collect carry bit
+ lgr %r2,%r8
+ lmg %r6,%r10,48(%r15)
+ br %r14
+
+.Loop1_madd:
+ lg %r7,0(%r2,%r3) // ap[i]
+ mlgr %r6,%r5 // *=w
+ alcgr %r7,%r8 // +=carry
+ alcgr %r6,zero
+ alg %r7,0(%r2,%r1) // +=rp[i]
+ stg %r7,0(%r2,%r1) // rp[i]=
+
+ lgr %r8,%r6
+ la %r2,8(%r2) // i++
+ brct %r10,.Loop1_madd
+
+ j .Lend_madd
+.size bn_mul_add_words,.-bn_mul_add_words
+
+// BN_ULONG bn_mul_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5);
+.globl bn_mul_words
+.type bn_mul_words,@function
+.align 4
+bn_mul_words:
+ lghi zero,0 // zero = 0
+ la %r1,0(%r2) // put rp aside
+ lghi %r2,0 // i=0;
+ ltgfr %r4,%r4
+ bler %r14 // if (len<=0) return 0;
+
+ stmg %r6,%r10,48(%r15)
+ lghi %r10,3
+ lghi %r8,0 // carry = 0
+ nr %r10,%r4 // len%4
+ sra %r4,2 // cnt=len/4
+ jz .Loop1_mul // carry is incidentally cleared if branch taken
+ algr zero,zero // clear carry
+
+.Loop4_mul:
+ lg %r7,0(%r2,%r3) // ap[i]
+ mlgr %r6,%r5 // *=w
+ alcgr %r7,%r8 // +=carry
+ stg %r7,0(%r2,%r1) // rp[i]=
+
+ lg %r9,8(%r2,%r3)
+ mlgr %r8,%r5
+ alcgr %r9,%r6
+ stg %r9,8(%r2,%r1)
+
+ lg %r7,16(%r2,%r3)
+ mlgr %r6,%r5
+ alcgr %r7,%r8
+ stg %r7,16(%r2,%r1)
+
+ lg %r9,24(%r2,%r3)
+ mlgr %r8,%r5
+ alcgr %r9,%r6
+ stg %r9,24(%r2,%r1)
+
+ la %r2,32(%r2) // i+=4
+ brct %r4,.Loop4_mul
+
+ la %r10,1(%r10) // see if len%4 is zero ...
+ brct %r10,.Loop1_mul // without touching condition code:-)
+
+.Lend_mul:
+ alcgr %r8,zero // collect carry bit
+ lgr %r2,%r8
+ lmg %r6,%r10,48(%r15)
+ br %r14
+
+.Loop1_mul:
+ lg %r7,0(%r2,%r3) // ap[i]
+ mlgr %r6,%r5 // *=w
+ alcgr %r7,%r8 // +=carry
+ stg %r7,0(%r2,%r1) // rp[i]=
+
+ lgr %r8,%r6
+ la %r2,8(%r2) // i++
+ brct %r10,.Loop1_mul
+
+ j .Lend_mul
+.size bn_mul_words,.-bn_mul_words
+
+// void bn_sqr_words(BN_ULONG *r2,BN_ULONG *r2,int r4)
+.globl bn_sqr_words
+.type bn_sqr_words,@function
+.align 4
+bn_sqr_words:
+ ltgfr %r4,%r4
+ bler %r14
+
+ stmg %r6,%r7,48(%r15)
+ srag %r1,%r4,2 // cnt=len/4
+ jz .Loop1_sqr
+
+.Loop4_sqr:
+ lg %r7,0(%r3)
+ mlgr %r6,%r7
+ stg %r7,0(%r2)
+ stg %r6,8(%r2)
+
+ lg %r7,8(%r3)
+ mlgr %r6,%r7
+ stg %r7,16(%r2)
+ stg %r6,24(%r2)
+
+ lg %r7,16(%r3)
+ mlgr %r6,%r7
+ stg %r7,32(%r2)
+ stg %r6,40(%r2)
+
+ lg %r7,24(%r3)
+ mlgr %r6,%r7
+ stg %r7,48(%r2)
+ stg %r6,56(%r2)
+
+ la %r3,32(%r3)
+ la %r2,64(%r2)
+ brct %r1,.Loop4_sqr
+
+ lghi %r1,3
+ nr %r4,%r1 // cnt=len%4
+ jz .Lend_sqr
+
+.Loop1_sqr:
+ lg %r7,0(%r3)
+ mlgr %r6,%r7
+ stg %r7,0(%r2)
+ stg %r6,8(%r2)
+
+ la %r3,8(%r3)
+ la %r2,16(%r2)
+ brct %r4,.Loop1_sqr
+
+.Lend_sqr:
+ lmg %r6,%r7,48(%r15)
+ br %r14
+.size bn_sqr_words,.-bn_sqr_words
+
+// BN_ULONG bn_div_words(BN_ULONG h,BN_ULONG l,BN_ULONG d);
+.globl bn_div_words
+.type bn_div_words,@function
+.align 4
+bn_div_words:
+ dlgr %r2,%r4
+ lgr %r2,%r3
+ br %r14
+.size bn_div_words,.-bn_div_words
+
+// BN_ULONG bn_add_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5);
+.globl bn_add_words
+.type bn_add_words,@function
+.align 4
+bn_add_words:
+ la %r1,0(%r2) // put rp aside
+ lghi %r2,0 // i=0
+ ltgfr %r5,%r5
+ bler %r14 // if (len<=0) return 0;
+
+ stg %r6,48(%r15)
+ lghi %r6,3
+ nr %r6,%r5 // len%4
+ sra %r5,2 // len/4, use sra because it sets condition code
+ jz .Loop1_add // carry is incidentally cleared if branch taken
+ algr %r2,%r2 // clear carry
+
+.Loop4_add:
+ lg %r0,0(%r2,%r3)
+ alcg %r0,0(%r2,%r4)
+ stg %r0,0(%r2,%r1)
+ lg %r0,8(%r2,%r3)
+ alcg %r0,8(%r2,%r4)
+ stg %r0,8(%r2,%r1)
+ lg %r0,16(%r2,%r3)
+ alcg %r0,16(%r2,%r4)
+ stg %r0,16(%r2,%r1)
+ lg %r0,24(%r2,%r3)
+ alcg %r0,24(%r2,%r4)
+ stg %r0,24(%r2,%r1)
+
+ la %r2,32(%r2) // i+=4
+ brct %r5,.Loop4_add
+
+ la %r6,1(%r6) // see if len%4 is zero ...
+ brct %r6,.Loop1_add // without touching condition code:-)
+
+.Lexit_add:
+ lghi %r2,0
+ alcgr %r2,%r2
+ lg %r6,48(%r15)
+ br %r14
+
+.Loop1_add:
+ lg %r0,0(%r2,%r3)
+ alcg %r0,0(%r2,%r4)
+ stg %r0,0(%r2,%r1)
+
+ la %r2,8(%r2) // i++
+ brct %r6,.Loop1_add
+
+ j .Lexit_add
+.size bn_add_words,.-bn_add_words
+
+// BN_ULONG bn_sub_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5);
+.globl bn_sub_words
+.type bn_sub_words,@function
+.align 4
+bn_sub_words:
+ la %r1,0(%r2) // put rp aside
+ lghi %r2,0 // i=0
+ ltgfr %r5,%r5
+ bler %r14 // if (len<=0) return 0;
+
+ stg %r6,48(%r15)
+ lghi %r6,3
+ nr %r6,%r5 // len%4
+ sra %r5,2 // len/4, use sra because it sets condition code
+ jnz .Loop4_sub // borrow is incidentally cleared if branch taken
+ slgr %r2,%r2 // clear borrow
+
+.Loop1_sub:
+ lg %r0,0(%r2,%r3)
+ slbg %r0,0(%r2,%r4)
+ stg %r0,0(%r2,%r1)
+
+ la %r2,8(%r2) // i++
+ brct %r6,.Loop1_sub
+ j .Lexit_sub
+
+.Loop4_sub:
+ lg %r0,0(%r2,%r3)
+ slbg %r0,0(%r2,%r4)
+ stg %r0,0(%r2,%r1)
+ lg %r0,8(%r2,%r3)
+ slbg %r0,8(%r2,%r4)
+ stg %r0,8(%r2,%r1)
+ lg %r0,16(%r2,%r3)
+ slbg %r0,16(%r2,%r4)
+ stg %r0,16(%r2,%r1)
+ lg %r0,24(%r2,%r3)
+ slbg %r0,24(%r2,%r4)
+ stg %r0,24(%r2,%r1)
+
+ la %r2,32(%r2) // i+=4
+ brct %r5,.Loop4_sub
+
+ la %r6,1(%r6) // see if len%4 is zero ...
+ brct %r6,.Loop1_sub // without touching condition code:-)
+
+.Lexit_sub:
+ lghi %r2,0
+ slbgr %r2,%r2
+ lcgr %r2,%r2
+ lg %r6,48(%r15)
+ br %r14
+.size bn_sub_words,.-bn_sub_words
+
+#define c1 %r1
+#define c2 %r5
+#define c3 %r8
+
+#define mul_add_c(ai,bi,c1,c2,c3) \
+ lg %r7,ai*8(%r3); \
+ mlg %r6,bi*8(%r4); \
+ algr c1,%r7; \
+ alcgr c2,%r6; \
+ alcgr c3,zero
+
+// void bn_mul_comba8(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4);
+.globl bn_mul_comba8
+.type bn_mul_comba8,@function
+.align 4
+bn_mul_comba8:
+ stmg %r6,%r8,48(%r15)
+
+ lghi c1,0
+ lghi c2,0
+ lghi c3,0
+ lghi zero,0
+
+ mul_add_c(0,0,c1,c2,c3);
+ stg c1,0*8(%r2)
+ lghi c1,0
+
+ mul_add_c(0,1,c2,c3,c1);
+ mul_add_c(1,0,c2,c3,c1);
+ stg c2,1*8(%r2)
+ lghi c2,0
+
+ mul_add_c(2,0,c3,c1,c2);
+ mul_add_c(1,1,c3,c1,c2);
+ mul_add_c(0,2,c3,c1,c2);
+ stg c3,2*8(%r2)
+ lghi c3,0
+
+ mul_add_c(0,3,c1,c2,c3);
+ mul_add_c(1,2,c1,c2,c3);
+ mul_add_c(2,1,c1,c2,c3);
+ mul_add_c(3,0,c1,c2,c3);
+ stg c1,3*8(%r2)
+ lghi c1,0
+
+ mul_add_c(4,0,c2,c3,c1);
+ mul_add_c(3,1,c2,c3,c1);
+ mul_add_c(2,2,c2,c3,c1);
+ mul_add_c(1,3,c2,c3,c1);
+ mul_add_c(0,4,c2,c3,c1);
+ stg c2,4*8(%r2)
+ lghi c2,0
+
+ mul_add_c(0,5,c3,c1,c2);
+ mul_add_c(1,4,c3,c1,c2);
+ mul_add_c(2,3,c3,c1,c2);
+ mul_add_c(3,2,c3,c1,c2);
+ mul_add_c(4,1,c3,c1,c2);
+ mul_add_c(5,0,c3,c1,c2);
+ stg c3,5*8(%r2)
+ lghi c3,0
+
+ mul_add_c(6,0,c1,c2,c3);
+ mul_add_c(5,1,c1,c2,c3);
+ mul_add_c(4,2,c1,c2,c3);
+ mul_add_c(3,3,c1,c2,c3);
+ mul_add_c(2,4,c1,c2,c3);
+ mul_add_c(1,5,c1,c2,c3);
+ mul_add_c(0,6,c1,c2,c3);
+ stg c1,6*8(%r2)
+ lghi c1,0
+
+ mul_add_c(0,7,c2,c3,c1);
+ mul_add_c(1,6,c2,c3,c1);
+ mul_add_c(2,5,c2,c3,c1);
+ mul_add_c(3,4,c2,c3,c1);
+ mul_add_c(4,3,c2,c3,c1);
+ mul_add_c(5,2,c2,c3,c1);
+ mul_add_c(6,1,c2,c3,c1);
+ mul_add_c(7,0,c2,c3,c1);
+ stg c2,7*8(%r2)
+ lghi c2,0
+
+ mul_add_c(7,1,c3,c1,c2);
+ mul_add_c(6,2,c3,c1,c2);
+ mul_add_c(5,3,c3,c1,c2);
+ mul_add_c(4,4,c3,c1,c2);
+ mul_add_c(3,5,c3,c1,c2);
+ mul_add_c(2,6,c3,c1,c2);
+ mul_add_c(1,7,c3,c1,c2);
+ stg c3,8*8(%r2)
+ lghi c3,0
+
+ mul_add_c(2,7,c1,c2,c3);
+ mul_add_c(3,6,c1,c2,c3);
+ mul_add_c(4,5,c1,c2,c3);
+ mul_add_c(5,4,c1,c2,c3);
+ mul_add_c(6,3,c1,c2,c3);
+ mul_add_c(7,2,c1,c2,c3);
+ stg c1,9*8(%r2)
+ lghi c1,0
+
+ mul_add_c(7,3,c2,c3,c1);
+ mul_add_c(6,4,c2,c3,c1);
+ mul_add_c(5,5,c2,c3,c1);
+ mul_add_c(4,6,c2,c3,c1);
+ mul_add_c(3,7,c2,c3,c1);
+ stg c2,10*8(%r2)
+ lghi c2,0
+
+ mul_add_c(4,7,c3,c1,c2);
+ mul_add_c(5,6,c3,c1,c2);
+ mul_add_c(6,5,c3,c1,c2);
+ mul_add_c(7,4,c3,c1,c2);
+ stg c3,11*8(%r2)
+ lghi c3,0
+
+ mul_add_c(7,5,c1,c2,c3);
+ mul_add_c(6,6,c1,c2,c3);
+ mul_add_c(5,7,c1,c2,c3);
+ stg c1,12*8(%r2)
+ lghi c1,0
+
+
+ mul_add_c(6,7,c2,c3,c1);
+ mul_add_c(7,6,c2,c3,c1);
+ stg c2,13*8(%r2)
+ lghi c2,0
+
+ mul_add_c(7,7,c3,c1,c2);
+ stg c3,14*8(%r2)
+ stg c1,15*8(%r2)
+
+ lmg %r6,%r8,48(%r15)
+ br %r14
+.size bn_mul_comba8,.-bn_mul_comba8
+
+// void bn_mul_comba4(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4);
+.globl bn_mul_comba4
+.type bn_mul_comba4,@function
+.align 4
+bn_mul_comba4:
+ stmg %r6,%r8,48(%r15)
+
+ lghi c1,0
+ lghi c2,0
+ lghi c3,0
+ lghi zero,0
+
+ mul_add_c(0,0,c1,c2,c3);
+ stg c1,0*8(%r3)
+ lghi c1,0
+
+ mul_add_c(0,1,c2,c3,c1);
+ mul_add_c(1,0,c2,c3,c1);
+ stg c2,1*8(%r2)
+ lghi c2,0
+
+ mul_add_c(2,0,c3,c1,c2);
+ mul_add_c(1,1,c3,c1,c2);
+ mul_add_c(0,2,c3,c1,c2);
+ stg c3,2*8(%r2)
+ lghi c3,0
+
+ mul_add_c(0,3,c1,c2,c3);
+ mul_add_c(1,2,c1,c2,c3);
+ mul_add_c(2,1,c1,c2,c3);
+ mul_add_c(3,0,c1,c2,c3);
+ stg c1,3*8(%r2)
+ lghi c1,0
+
+ mul_add_c(3,1,c2,c3,c1);
+ mul_add_c(2,2,c2,c3,c1);
+ mul_add_c(1,3,c2,c3,c1);
+ stg c2,4*8(%r2)
+ lghi c2,0
+
+ mul_add_c(2,3,c3,c1,c2);
+ mul_add_c(3,2,c3,c1,c2);
+ stg c3,5*8(%r2)
+ lghi c3,0
+
+ mul_add_c(3,3,c1,c2,c3);
+ stg c1,6*8(%r2)
+ stg c2,7*8(%r2)
+
+ stmg %r6,%r8,48(%r15)
+ br %r14
+.size bn_mul_comba4,.-bn_mul_comba4
+
+#define sqr_add_c(ai,c1,c2,c3) \
+ lg %r7,ai*8(%r3); \
+ mlgr %r6,%r7; \
+ algr c1,%r7; \
+ alcgr c2,%r6; \
+ alcgr c3,zero
+
+#define sqr_add_c2(ai,aj,c1,c2,c3) \
+ lg %r7,ai*8(%r3); \
+ mlg %r6,aj*8(%r3); \
+ algr c1,%r7; \
+ alcgr c2,%r6; \
+ alcgr c3,zero; \
+ algr c1,%r7; \
+ alcgr c2,%r6; \
+ alcgr c3,zero
+
+// void bn_sqr_comba8(BN_ULONG *r2,BN_ULONG *r3);
+.globl bn_sqr_comba8
+.type bn_sqr_comba8,@function
+.align 4
+bn_sqr_comba8:
+ stmg %r6,%r8,48(%r15)
+
+ lghi c1,0
+ lghi c2,0
+ lghi c3,0
+ lghi zero,0
+
+ sqr_add_c(0,c1,c2,c3);
+ stg c1,0*8(%r2)
+ lghi c1,0
+
+ sqr_add_c2(1,0,c2,c3,c1);
+ stg c2,1*8(%r2)
+ lghi c2,0
+
+ sqr_add_c(1,c3,c1,c2);
+ sqr_add_c2(2,0,c3,c1,c2);
+ stg c3,2*8(%r2)
+ lghi c3,0
+
+ sqr_add_c2(3,0,c1,c2,c3);
+ sqr_add_c2(2,1,c1,c2,c3);
+ stg c1,3*8(%r2)
+ lghi c1,0
+
+ sqr_add_c(2,c2,c3,c1);
+ sqr_add_c2(3,1,c2,c3,c1);
+ sqr_add_c2(4,0,c2,c3,c1);
+ stg c2,4*8(%r2)
+ lghi c2,0
+
+ sqr_add_c2(5,0,c3,c1,c2);
+ sqr_add_c2(4,1,c3,c1,c2);
+ sqr_add_c2(3,2,c3,c1,c2);
+ stg c3,5*8(%r2)
+ lghi c3,0
+
+ sqr_add_c(3,c1,c2,c3);
+ sqr_add_c2(4,2,c1,c2,c3);
+ sqr_add_c2(5,1,c1,c2,c3);
+ sqr_add_c2(6,0,c1,c2,c3);
+ stg c1,6*8(%r2)
+ lghi c1,0
+
+ sqr_add_c2(7,0,c2,c3,c1);
+ sqr_add_c2(6,1,c2,c3,c1);
+ sqr_add_c2(5,2,c2,c3,c1);
+ sqr_add_c2(4,3,c2,c3,c1);
+ stg c2,7*8(%r2)
+ lghi c2,0
+
+ sqr_add_c(4,c3,c1,c2);
+ sqr_add_c2(5,3,c3,c1,c2);
+ sqr_add_c2(6,2,c3,c1,c2);
+ sqr_add_c2(7,1,c3,c1,c2);
+ stg c3,8*8(%r2)
+ lghi c3,0
+
+ sqr_add_c2(7,2,c1,c2,c3);
+ sqr_add_c2(6,3,c1,c2,c3);
+ sqr_add_c2(5,4,c1,c2,c3);
+ stg c1,9*8(%r2)
+ lghi c1,0
+
+ sqr_add_c(5,c2,c3,c1);
+ sqr_add_c2(6,4,c2,c3,c1);
+ sqr_add_c2(7,3,c2,c3,c1);
+ stg c2,10*8(%r2)
+ lghi c2,0
+
+ sqr_add_c2(7,4,c3,c1,c2);
+ sqr_add_c2(6,5,c3,c1,c2);
+ stg c3,11*8(%r2)
+ lghi c3,0
+
+ sqr_add_c(6,c1,c2,c3);
+ sqr_add_c2(7,5,c1,c2,c3);
+ stg c1,12*8(%r2)
+ lghi c1,0
+
+ sqr_add_c2(7,6,c2,c3,c1);
+ stg c2,13*8(%r2)
+ lghi c2,0
+
+ sqr_add_c(7,c3,c1,c2);
+ stg c3,14*8(%r2)
+ stg c1,15*8(%r2)
+
+ lmg %r6,%r8,48(%r15)
+ br %r14
+.size bn_sqr_comba8,.-bn_sqr_comba8
+
+// void bn_sqr_comba4(BN_ULONG *r2,BN_ULONG *r3);
+.globl bn_sqr_comba4
+.type bn_sqr_comba4,@function
+.align 4
+bn_sqr_comba4:
+ stmg %r6,%r8,48(%r15)
+
+ lghi c1,0
+ lghi c2,0
+ lghi c3,0
+ lghi zero,0
+
+ sqr_add_c(0,c1,c2,c3);
+ stg c1,0*8(%r2)
+ lghi c1,0
+
+ sqr_add_c2(1,0,c2,c3,c1);
+ stg c2,1*8(%r2)
+ lghi c2,0
+
+ sqr_add_c(1,c3,c1,c2);
+ sqr_add_c2(2,0,c3,c1,c2);
+ stg c3,2*8(%r2)
+ lghi c3,0
+
+ sqr_add_c2(3,0,c1,c2,c3);
+ sqr_add_c2(2,1,c1,c2,c3);
+ stg c1,3*8(%r2)
+ lghi c1,0
+
+ sqr_add_c(2,c2,c3,c1);
+ sqr_add_c2(3,1,c2,c3,c1);
+ stg c2,4*8(%r2)
+ lghi c2,0
+
+ sqr_add_c2(3,2,c3,c1,c2);
+ stg c3,5*8(%r2)
+ lghi c3,0
+
+ sqr_add_c(3,c1,c2,c3);
+ stg c1,6*8(%r2)
+ stg c2,7*8(%r2)
+
+ lmg %r6,%r8,48(%r15)
+ br %r14
+.size bn_sqr_comba4,.-bn_sqr_comba4
diff --git a/crypto/bn/asm/sparcv8plus.S b/crypto/bn/asm/sparcv8plus.S
index 8c56e2e7e7cb..63de1860f285 100644
--- a/crypto/bn/asm/sparcv8plus.S
+++ b/crypto/bn/asm/sparcv8plus.S
@@ -144,6 +144,19 @@
* }
*/
+#if defined(__SUNPRO_C) && defined(__sparcv9)
+ /* They've said -xarch=v9 at command line */
+ .register %g2,#scratch
+ .register %g3,#scratch
+# define FRAME_SIZE -192
+#elif defined(__GNUC__) && defined(__arch64__)
+ /* They've said -m64 at command line */
+ .register %g2,#scratch
+ .register %g3,#scratch
+# define FRAME_SIZE -192
+#else
+# define FRAME_SIZE -96
+#endif
/*
* GNU assembler can't stand stuw:-(
*/
@@ -619,8 +632,6 @@ bn_sub_words:
* Andy.
*/
-#define FRAME_SIZE -96
-
/*
* Here is register usage map for *all* routines below.
*/
diff --git a/crypto/bn/asm/sparcv9-mont.pl b/crypto/bn/asm/sparcv9-mont.pl
new file mode 100755
index 000000000000..b8fb1e8a25dc
--- /dev/null
+++ b/crypto/bn/asm/sparcv9-mont.pl
@@ -0,0 +1,606 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# December 2005
+#
+# Pure SPARCv9/8+ and IALU-only bn_mul_mont implementation. The reasons
+# for undertaken effort are multiple. First of all, UltraSPARC is not
+# the whole SPARCv9 universe and other VIS-free implementations deserve
+# optimized code as much. Secondly, newly introduced UltraSPARC T1,
+# a.k.a. Niagara, has shared FPU and concurrent FPU-intensive pathes,
+# such as sparcv9a-mont, will simply sink it. Yes, T1 is equipped with
+# several integrated RSA/DSA accelerator circuits accessible through
+# kernel driver [only(*)], but having decent user-land software
+# implementation is important too. Finally, reasons like desire to
+# experiment with dedicated squaring procedure. Yes, this module
+# implements one, because it was easiest to draft it in SPARCv9
+# instructions...
+
+# (*) Engine accessing the driver in question is on my TODO list.
+# For reference, acceleator is estimated to give 6 to 10 times
+# improvement on single-threaded RSA sign. It should be noted
+# that 6-10x improvement coefficient does not actually mean
+# something extraordinary in terms of absolute [single-threaded]
+# performance, as SPARCv9 instruction set is by all means least
+# suitable for high performance crypto among other 64 bit
+# platforms. 6-10x factor simply places T1 in same performance
+# domain as say AMD64 and IA-64. Improvement of RSA verify don't
+# appear impressive at all, but it's the sign operation which is
+# far more critical/interesting.
+
+# You might notice that inner loops are modulo-scheduled:-) This has
+# essentially negligible impact on UltraSPARC performance, it's
+# Fujitsu SPARC64 V users who should notice and hopefully appreciate
+# the advantage... Currently this module surpasses sparcv9a-mont.pl
+# by ~20% on UltraSPARC-III and later cores, but recall that sparcv9a
+# module still have hidden potential [see TODO list there], which is
+# estimated to be larger than 20%...
+
+# int bn_mul_mont(
+$rp="%i0"; # BN_ULONG *rp,
+$ap="%i1"; # const BN_ULONG *ap,
+$bp="%i2"; # const BN_ULONG *bp,
+$np="%i3"; # const BN_ULONG *np,
+$n0="%i4"; # const BN_ULONG *n0,
+$num="%i5"; # int num);
+
+$bits=32;
+for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
+if ($bits==64) { $bias=2047; $frame=192; }
+else { $bias=0; $frame=128; }
+
+$car0="%o0";
+$car1="%o1";
+$car2="%o2"; # 1 bit
+$acc0="%o3";
+$acc1="%o4";
+$mask="%g1"; # 32 bits, what a waste...
+$tmp0="%g4";
+$tmp1="%g5";
+
+$i="%l0";
+$j="%l1";
+$mul0="%l2";
+$mul1="%l3";
+$tp="%l4";
+$apj="%l5";
+$npj="%l6";
+$tpj="%l7";
+
+$fname="bn_mul_mont_int";
+
+$code=<<___;
+.section ".text",#alloc,#execinstr
+
+.global $fname
+.align 32
+$fname:
+ cmp %o5,4 ! 128 bits minimum
+ bge,pt %icc,.Lenter
+ sethi %hi(0xffffffff),$mask
+ retl
+ clr %o0
+.align 32
+.Lenter:
+ save %sp,-$frame,%sp
+ sll $num,2,$num ! num*=4
+ or $mask,%lo(0xffffffff),$mask
+ ld [$n0],$n0
+ cmp $ap,$bp
+ and $num,$mask,$num
+ ld [$bp],$mul0 ! bp[0]
+ nop
+
+ add %sp,$bias,%o7 ! real top of stack
+ ld [$ap],$car0 ! ap[0] ! redundant in squaring context
+ sub %o7,$num,%o7
+ ld [$ap+4],$apj ! ap[1]
+ and %o7,-1024,%o7
+ ld [$np],$car1 ! np[0]
+ sub %o7,$bias,%sp ! alloca
+ ld [$np+4],$npj ! np[1]
+ be,pt `$bits==32?"%icc":"%xcc"`,.Lbn_sqr_mont
+ mov 12,$j
+
+ mulx $car0,$mul0,$car0 ! ap[0]*bp[0]
+ mulx $apj,$mul0,$tmp0 !prologue! ap[1]*bp[0]
+ and $car0,$mask,$acc0
+ add %sp,$bias+$frame,$tp
+ ld [$ap+8],$apj !prologue!
+
+ mulx $n0,$acc0,$mul1 ! "t[0]"*n0
+ and $mul1,$mask,$mul1
+
+ mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0
+ mulx $npj,$mul1,$acc1 !prologue! np[1]*"t[0]"*n0
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ ld [$np+8],$npj !prologue!
+ srlx $car1,32,$car1
+ mov $tmp0,$acc0 !prologue!
+
+.L1st:
+ mulx $apj,$mul0,$tmp0
+ mulx $npj,$mul1,$tmp1
+ add $acc0,$car0,$car0
+ ld [$ap+$j],$apj ! ap[j]
+ and $car0,$mask,$acc0
+ add $acc1,$car1,$car1
+ ld [$np+$j],$npj ! np[j]
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ add $j,4,$j ! j++
+ mov $tmp0,$acc0
+ st $car1,[$tp]
+ cmp $j,$num
+ mov $tmp1,$acc1
+ srlx $car1,32,$car1
+ bl %icc,.L1st
+ add $tp,4,$tp ! tp++
+!.L1st
+
+ mulx $apj,$mul0,$tmp0 !epilogue!
+ mulx $npj,$mul1,$tmp1
+ add $acc0,$car0,$car0
+ and $car0,$mask,$acc0
+ add $acc1,$car1,$car1
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+
+ add $tmp0,$car0,$car0
+ and $car0,$mask,$acc0
+ add $tmp1,$car1,$car1
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car1
+
+ add $car0,$car1,$car1
+ st $car1,[$tp+8]
+ srlx $car1,32,$car2
+
+ mov 4,$i ! i++
+ ld [$bp+4],$mul0 ! bp[1]
+.Louter:
+ add %sp,$bias+$frame,$tp
+ ld [$ap],$car0 ! ap[0]
+ ld [$ap+4],$apj ! ap[1]
+ ld [$np],$car1 ! np[0]
+ ld [$np+4],$npj ! np[1]
+ ld [$tp],$tmp1 ! tp[0]
+ ld [$tp+4],$tpj ! tp[1]
+ mov 12,$j
+
+ mulx $car0,$mul0,$car0
+ mulx $apj,$mul0,$tmp0 !prologue!
+ add $tmp1,$car0,$car0
+ ld [$ap+8],$apj !prologue!
+ and $car0,$mask,$acc0
+
+ mulx $n0,$acc0,$mul1
+ and $mul1,$mask,$mul1
+
+ mulx $car1,$mul1,$car1
+ mulx $npj,$mul1,$acc1 !prologue!
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ ld [$np+8],$npj !prologue!
+ srlx $car1,32,$car1
+ mov $tmp0,$acc0 !prologue!
+
+.Linner:
+ mulx $apj,$mul0,$tmp0
+ mulx $npj,$mul1,$tmp1
+ add $tpj,$car0,$car0
+ ld [$ap+$j],$apj ! ap[j]
+ add $acc0,$car0,$car0
+ add $acc1,$car1,$car1
+ ld [$np+$j],$npj ! np[j]
+ and $car0,$mask,$acc0
+ ld [$tp+8],$tpj ! tp[j]
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ add $j,4,$j ! j++
+ mov $tmp0,$acc0
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+ mov $tmp1,$acc1
+ cmp $j,$num
+ bl %icc,.Linner
+ add $tp,4,$tp ! tp++
+!.Linner
+
+ mulx $apj,$mul0,$tmp0 !epilogue!
+ mulx $npj,$mul1,$tmp1
+ add $tpj,$car0,$car0
+ add $acc0,$car0,$car0
+ ld [$tp+8],$tpj ! tp[j]
+ and $car0,$mask,$acc0
+ add $acc1,$car1,$car1
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+
+ add $tpj,$car0,$car0
+ add $tmp0,$car0,$car0
+ and $car0,$mask,$acc0
+ add $tmp1,$car1,$car1
+ add $acc0,$car1,$car1
+ st $car1,[$tp+4] ! tp[j-1]
+ srlx $car0,32,$car0
+ add $i,4,$i ! i++
+ srlx $car1,32,$car1
+
+ add $car0,$car1,$car1
+ cmp $i,$num
+ add $car2,$car1,$car1
+ st $car1,[$tp+8]
+
+ srlx $car1,32,$car2
+ bl,a %icc,.Louter
+ ld [$bp+$i],$mul0 ! bp[i]
+!.Louter
+
+ add $tp,12,$tp
+
+.Ltail:
+ add $np,$num,$np
+ add $rp,$num,$rp
+ mov $tp,$ap
+ sub %g0,$num,%o7 ! k=-num
+ ba .Lsub
+ subcc %g0,%g0,%g0 ! clear %icc.c
+.align 16
+.Lsub:
+ ld [$tp+%o7],%o0
+ ld [$np+%o7],%o1
+ subccc %o0,%o1,%o1 ! tp[j]-np[j]
+ add $rp,%o7,$i
+ add %o7,4,%o7
+ brnz %o7,.Lsub
+ st %o1,[$i]
+ subc $car2,0,$car2 ! handle upmost overflow bit
+ and $tp,$car2,$ap
+ andn $rp,$car2,$np
+ or $ap,$np,$ap
+ sub %g0,$num,%o7
+
+.Lcopy:
+ ld [$ap+%o7],%o0 ! copy or in-place refresh
+ st %g0,[$tp+%o7] ! zap tp
+ st %o0,[$rp+%o7]
+ add %o7,4,%o7
+ brnz %o7,.Lcopy
+ nop
+ mov 1,%i0
+ ret
+ restore
+___
+
+########
+######## .Lbn_sqr_mont gives up to 20% *overall* improvement over
+######## code without following dedicated squaring procedure.
+########
+$sbit="%i2"; # re-use $bp!
+
+$code.=<<___;
+.align 32
+.Lbn_sqr_mont:
+ mulx $mul0,$mul0,$car0 ! ap[0]*ap[0]
+ mulx $apj,$mul0,$tmp0 !prologue!
+ and $car0,$mask,$acc0
+ add %sp,$bias+$frame,$tp
+ ld [$ap+8],$apj !prologue!
+
+ mulx $n0,$acc0,$mul1 ! "t[0]"*n0
+ srlx $car0,32,$car0
+ and $mul1,$mask,$mul1
+
+ mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0
+ mulx $npj,$mul1,$acc1 !prologue!
+ and $car0,1,$sbit
+ ld [$np+8],$npj !prologue!
+ srlx $car0,1,$car0
+ add $acc0,$car1,$car1
+ srlx $car1,32,$car1
+ mov $tmp0,$acc0 !prologue!
+
+.Lsqr_1st:
+ mulx $apj,$mul0,$tmp0
+ mulx $npj,$mul1,$tmp1
+ add $acc0,$car0,$car0 ! ap[j]*a0+c0
+ add $acc1,$car1,$car1
+ ld [$ap+$j],$apj ! ap[j]
+ and $car0,$mask,$acc0
+ ld [$np+$j],$npj ! np[j]
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ or $sbit,$acc0,$acc0
+ mov $tmp1,$acc1
+ srlx $acc0,32,$sbit
+ add $j,4,$j ! j++
+ and $acc0,$mask,$acc0
+ cmp $j,$num
+ add $acc0,$car1,$car1
+ st $car1,[$tp]
+ mov $tmp0,$acc0
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_1st
+ add $tp,4,$tp ! tp++
+!.Lsqr_1st
+
+ mulx $apj,$mul0,$tmp0 ! epilogue
+ mulx $npj,$mul1,$tmp1
+ add $acc0,$car0,$car0 ! ap[j]*a0+c0
+ add $acc1,$car1,$car1
+ and $car0,$mask,$acc0
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ or $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ add $acc0,$car1,$car1
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+
+ add $tmp0,$car0,$car0 ! ap[j]*a0+c0
+ add $tmp1,$car1,$car1
+ and $car0,$mask,$acc0
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ or $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ add $acc0,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car1
+
+ add $car0,$car0,$car0
+ or $sbit,$car0,$car0
+ add $car0,$car1,$car1
+ st $car1,[$tp+8]
+ srlx $car1,32,$car2
+
+ ld [%sp+$bias+$frame],$tmp0 ! tp[0]
+ ld [%sp+$bias+$frame+4],$tmp1 ! tp[1]
+ ld [%sp+$bias+$frame+8],$tpj ! tp[2]
+ ld [$ap+4],$mul0 ! ap[1]
+ ld [$ap+8],$apj ! ap[2]
+ ld [$np],$car1 ! np[0]
+ ld [$np+4],$npj ! np[1]
+ mulx $n0,$tmp0,$mul1
+
+ mulx $mul0,$mul0,$car0
+ and $mul1,$mask,$mul1
+
+ mulx $car1,$mul1,$car1
+ mulx $npj,$mul1,$acc1
+ add $tmp0,$car1,$car1
+ and $car0,$mask,$acc0
+ ld [$np+8],$npj ! np[2]
+ srlx $car1,32,$car1
+ add $tmp1,$car1,$car1
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ and $car0,1,$sbit
+ add $acc1,$car1,$car1
+ srlx $car0,1,$car0
+ mov 12,$j
+ st $car1,[%sp+$bias+$frame] ! tp[0]=
+ srlx $car1,32,$car1
+ add %sp,$bias+$frame+4,$tp
+
+.Lsqr_2nd:
+ mulx $apj,$mul0,$acc0
+ mulx $npj,$mul1,$acc1
+ add $acc0,$car0,$car0
+ add $tpj,$car1,$car1
+ ld [$ap+$j],$apj ! ap[j]
+ and $car0,$mask,$acc0
+ ld [$np+$j],$npj ! np[j]
+ srlx $car0,32,$car0
+ add $acc1,$car1,$car1
+ ld [$tp+8],$tpj ! tp[j]
+ add $acc0,$acc0,$acc0
+ add $j,4,$j ! j++
+ or $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ cmp $j,$num
+ add $acc0,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_2nd
+ add $tp,4,$tp ! tp++
+!.Lsqr_2nd
+
+ mulx $apj,$mul0,$acc0
+ mulx $npj,$mul1,$acc1
+ add $acc0,$car0,$car0
+ add $tpj,$car1,$car1
+ and $car0,$mask,$acc0
+ srlx $car0,32,$car0
+ add $acc1,$car1,$car1
+ add $acc0,$acc0,$acc0
+ or $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ add $acc0,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+
+ add $car0,$car0,$car0
+ or $sbit,$car0,$car0
+ add $car0,$car1,$car1
+ add $car2,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car2
+
+ ld [%sp+$bias+$frame],$tmp1 ! tp[0]
+ ld [%sp+$bias+$frame+4],$tpj ! tp[1]
+ ld [$ap+8],$mul0 ! ap[2]
+ ld [$np],$car1 ! np[0]
+ ld [$np+4],$npj ! np[1]
+ mulx $n0,$tmp1,$mul1
+ and $mul1,$mask,$mul1
+ mov 8,$i
+
+ mulx $mul0,$mul0,$car0
+ mulx $car1,$mul1,$car1
+ and $car0,$mask,$acc0
+ add $tmp1,$car1,$car1
+ srlx $car0,32,$car0
+ add %sp,$bias+$frame,$tp
+ srlx $car1,32,$car1
+ and $car0,1,$sbit
+ srlx $car0,1,$car0
+ mov 4,$j
+
+.Lsqr_outer:
+.Lsqr_inner1:
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ add $j,4,$j
+ ld [$tp+8],$tpj
+ cmp $j,$i
+ add $acc1,$car1,$car1
+ ld [$np+$j],$npj
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_inner1
+ add $tp,4,$tp
+!.Lsqr_inner1
+
+ add $j,4,$j
+ ld [$ap+$j],$apj ! ap[j]
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ ld [$np+$j],$npj ! np[j]
+ add $acc0,$car1,$car1
+ ld [$tp+8],$tpj ! tp[j]
+ add $acc1,$car1,$car1
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+
+ add $j,4,$j
+ cmp $j,$num
+ be,pn %icc,.Lsqr_no_inner2
+ add $tp,4,$tp
+
+.Lsqr_inner2:
+ mulx $apj,$mul0,$acc0
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ add $acc0,$car0,$car0
+ ld [$ap+$j],$apj ! ap[j]
+ and $car0,$mask,$acc0
+ ld [$np+$j],$npj ! np[j]
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ ld [$tp+8],$tpj ! tp[j]
+ or $sbit,$acc0,$acc0
+ add $j,4,$j ! j++
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ cmp $j,$num
+ add $acc0,$car1,$car1
+ add $acc1,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_inner2
+ add $tp,4,$tp ! tp++
+
+.Lsqr_no_inner2:
+ mulx $apj,$mul0,$acc0
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ add $acc0,$car0,$car0
+ and $car0,$mask,$acc0
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ or $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ add $acc0,$car1,$car1
+ add $acc1,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+
+ add $car0,$car0,$car0
+ or $sbit,$car0,$car0
+ add $car0,$car1,$car1
+ add $car2,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car2
+
+ add $i,4,$i ! i++
+ ld [%sp+$bias+$frame],$tmp1 ! tp[0]
+ ld [%sp+$bias+$frame+4],$tpj ! tp[1]
+ ld [$ap+$i],$mul0 ! ap[j]
+ ld [$np],$car1 ! np[0]
+ ld [$np+4],$npj ! np[1]
+ mulx $n0,$tmp1,$mul1
+ and $mul1,$mask,$mul1
+ add $i,4,$tmp0
+
+ mulx $mul0,$mul0,$car0
+ mulx $car1,$mul1,$car1
+ and $car0,$mask,$acc0
+ add $tmp1,$car1,$car1
+ srlx $car0,32,$car0
+ add %sp,$bias+$frame,$tp
+ srlx $car1,32,$car1
+ and $car0,1,$sbit
+ srlx $car0,1,$car0
+
+ cmp $tmp0,$num ! i<num-1
+ bl %icc,.Lsqr_outer
+ mov 4,$j
+
+.Lsqr_last:
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ add $j,4,$j
+ ld [$tp+8],$tpj
+ cmp $j,$i
+ add $acc1,$car1,$car1
+ ld [$np+$j],$npj
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_last
+ add $tp,4,$tp
+!.Lsqr_last
+
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ add $acc0,$car1,$car1
+ add $acc1,$car1,$car1
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+
+ add $car0,$car0,$car0 ! recover $car0
+ or $sbit,$car0,$car0
+ add $car0,$car1,$car1
+ add $car2,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car2
+
+ ba .Ltail
+ add $tp,8,$tp
+.type $fname,#function
+.size $fname,(.-$fname)
+.asciz "Montgomery Multipltication for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
+.align 32
+___
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+print $code;
+close STDOUT;
diff --git a/crypto/bn/asm/sparcv9a-mont.pl b/crypto/bn/asm/sparcv9a-mont.pl
new file mode 100755
index 000000000000..a14205f2f006
--- /dev/null
+++ b/crypto/bn/asm/sparcv9a-mont.pl
@@ -0,0 +1,882 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# October 2005
+#
+# "Teaser" Montgomery multiplication module for UltraSPARC. Why FPU?
+# Because unlike integer multiplier, which simply stalls whole CPU,
+# FPU is fully pipelined and can effectively emit 48 bit partial
+# product every cycle. Why not blended SPARC v9? One can argue that
+# making this module dependent on UltraSPARC VIS extension limits its
+# binary compatibility. Well yes, it does exclude SPARC64 prior-V(!)
+# implementations from compatibility matrix. But the rest, whole Sun
+# UltraSPARC family and brand new Fujitsu's SPARC64 V, all support
+# VIS extension instructions used in this module. This is considered
+# good enough to not care about HAL SPARC64 users [if any] who have
+# integer-only pure SPARCv9 module to "fall down" to.
+
+# USI&II cores currently exhibit uniform 2x improvement [over pre-
+# bn_mul_mont codebase] for all key lengths and benchmarks. On USIII
+# performance improves few percents for shorter keys and worsens few
+# percents for longer keys. This is because USIII integer multiplier
+# is >3x faster than USI&II one, which is harder to match [but see
+# TODO list below]. It should also be noted that SPARC64 V features
+# out-of-order execution, which *might* mean that integer multiplier
+# is pipelined, which in turn *might* be impossible to match... On
+# additional note, SPARC64 V implements FP Multiply-Add instruction,
+# which is perfectly usable in this context... In other words, as far
+# as Fujitsu SPARC64 V goes, talk to the author:-)
+
+# The implementation implies following "non-natural" limitations on
+# input arguments:
+# - num may not be less than 4;
+# - num has to be even;
+# Failure to meet either condition has no fatal effects, simply
+# doesn't give any performance gain.
+
+# TODO:
+# - modulo-schedule inner loop for better performance (on in-order
+# execution core such as UltraSPARC this shall result in further
+# noticeable(!) improvement);
+# - dedicated squaring procedure[?];
+
+######################################################################
+# November 2006
+#
+# Modulo-scheduled inner loops allow to interleave floating point and
+# integer instructions and minimize Read-After-Write penalties. This
+# results in *further* 20-50% perfromance improvement [depending on
+# key length, more for longer keys] on USI&II cores and 30-80% - on
+# USIII&IV.
+
+$fname="bn_mul_mont_fpu";
+$bits=32;
+for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
+
+if ($bits==64) {
+ $bias=2047;
+ $frame=192;
+} else {
+ $bias=0;
+ $frame=128; # 96 rounded up to largest known cache-line
+}
+$locals=64;
+
+# In order to provide for 32-/64-bit ABI duality, I keep integers wider
+# than 32 bit in %g1-%g4 and %o0-%o5. %l0-%l7 and %i0-%i5 are used
+# exclusively for pointers, indexes and other small values...
+# int bn_mul_mont(
+$rp="%i0"; # BN_ULONG *rp,
+$ap="%i1"; # const BN_ULONG *ap,
+$bp="%i2"; # const BN_ULONG *bp,
+$np="%i3"; # const BN_ULONG *np,
+$n0="%i4"; # const BN_ULONG *n0,
+$num="%i5"; # int num);
+
+$tp="%l0"; # t[num]
+$ap_l="%l1"; # a[num],n[num] are smashed to 32-bit words and saved
+$ap_h="%l2"; # to these four vectors as double-precision FP values.
+$np_l="%l3"; # This way a bunch of fxtods are eliminated in second
+$np_h="%l4"; # loop and L1-cache aliasing is minimized...
+$i="%l5";
+$j="%l6";
+$mask="%l7"; # 16-bit mask, 0xffff
+
+$n0="%g4"; # reassigned(!) to "64-bit" register
+$carry="%i4"; # %i4 reused(!) for a carry bit
+
+# FP register naming chart
+#
+# ..HILO
+# dcba
+# --------
+# LOa
+# LOb
+# LOc
+# LOd
+# HIa
+# HIb
+# HIc
+# HId
+# ..a
+# ..b
+$ba="%f0"; $bb="%f2"; $bc="%f4"; $bd="%f6";
+$na="%f8"; $nb="%f10"; $nc="%f12"; $nd="%f14";
+$alo="%f16"; $alo_="%f17"; $ahi="%f18"; $ahi_="%f19";
+$nlo="%f20"; $nlo_="%f21"; $nhi="%f22"; $nhi_="%f23";
+
+$dota="%f24"; $dotb="%f26";
+
+$aloa="%f32"; $alob="%f34"; $aloc="%f36"; $alod="%f38";
+$ahia="%f40"; $ahib="%f42"; $ahic="%f44"; $ahid="%f46";
+$nloa="%f48"; $nlob="%f50"; $nloc="%f52"; $nlod="%f54";
+$nhia="%f56"; $nhib="%f58"; $nhic="%f60"; $nhid="%f62";
+
+$ASI_FL16_P=0xD2; # magic ASI value to engage 16-bit FP load
+
+$code=<<___;
+.section ".text",#alloc,#execinstr
+
+.global $fname
+.align 32
+$fname:
+ save %sp,-$frame-$locals,%sp
+
+ cmp $num,4
+ bl,a,pn %icc,.Lret
+ clr %i0
+ andcc $num,1,%g0 ! $num has to be even...
+ bnz,a,pn %icc,.Lret
+ clr %i0 ! signal "unsupported input value"
+
+ srl $num,1,$num
+ sethi %hi(0xffff),$mask
+ ld [%i4+0],$n0 ! $n0 reassigned, remember?
+ or $mask,%lo(0xffff),$mask
+ ld [%i4+4],%o0
+ sllx %o0,32,%o0
+ or %o0,$n0,$n0 ! $n0=n0[1].n0[0]
+
+ sll $num,3,$num ! num*=8
+
+ add %sp,$bias,%o0 ! real top of stack
+ sll $num,2,%o1
+ add %o1,$num,%o1 ! %o1=num*5
+ sub %o0,%o1,%o0
+ and %o0,-2048,%o0 ! optimize TLB utilization
+ sub %o0,$bias,%sp ! alloca(5*num*8)
+
+ rd %asi,%o7 ! save %asi
+ add %sp,$bias+$frame+$locals,$tp
+ add $tp,$num,$ap_l
+ add $ap_l,$num,$ap_l ! [an]p_[lh] point at the vectors' ends !
+ add $ap_l,$num,$ap_h
+ add $ap_h,$num,$np_l
+ add $np_l,$num,$np_h
+
+ wr %g0,$ASI_FL16_P,%asi ! setup %asi for 16-bit FP loads
+
+ add $rp,$num,$rp ! readjust input pointers to point
+ add $ap,$num,$ap ! at the ends too...
+ add $bp,$num,$bp
+ add $np,$num,$np
+
+ stx %o7,[%sp+$bias+$frame+48] ! save %asi
+
+ sub %g0,$num,$i ! i=-num
+ sub %g0,$num,$j ! j=-num
+
+ add $ap,$j,%o3
+ add $bp,$i,%o4
+
+ ld [%o3+4],%g1 ! bp[0]
+ ld [%o3+0],%o0
+ ld [%o4+4],%g5 ! ap[0]
+ sllx %g1,32,%g1
+ ld [%o4+0],%o1
+ sllx %g5,32,%g5
+ or %g1,%o0,%o0
+ or %g5,%o1,%o1
+
+ add $np,$j,%o5
+
+ mulx %o1,%o0,%o0 ! ap[0]*bp[0]
+ mulx $n0,%o0,%o0 ! ap[0]*bp[0]*n0
+ stx %o0,[%sp+$bias+$frame+0]
+
+ ld [%o3+0],$alo_ ! load a[j] as pair of 32-bit words
+ fzeros $alo
+ ld [%o3+4],$ahi_
+ fzeros $ahi
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
+ fzeros $nlo
+ ld [%o5+4],$nhi_
+ fzeros $nhi
+
+ ! transfer b[i] to FPU as 4x16-bit values
+ ldda [%o4+2]%asi,$ba
+ fxtod $alo,$alo
+ ldda [%o4+0]%asi,$bb
+ fxtod $ahi,$ahi
+ ldda [%o4+6]%asi,$bc
+ fxtod $nlo,$nlo
+ ldda [%o4+4]%asi,$bd
+ fxtod $nhi,$nhi
+
+ ! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values
+ ldda [%sp+$bias+$frame+6]%asi,$na
+ fxtod $ba,$ba
+ ldda [%sp+$bias+$frame+4]%asi,$nb
+ fxtod $bb,$bb
+ ldda [%sp+$bias+$frame+2]%asi,$nc
+ fxtod $bc,$bc
+ ldda [%sp+$bias+$frame+0]%asi,$nd
+ fxtod $bd,$bd
+
+ std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
+ fxtod $na,$na
+ std $ahi,[$ap_h+$j]
+ fxtod $nb,$nb
+ std $nlo,[$np_l+$j] ! save smashed np[j] in double format
+ fxtod $nc,$nc
+ std $nhi,[$np_h+$j]
+ fxtod $nd,$nd
+
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ fmuld $alo,$bd,$alod
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ fmuld $ahi,$ba,$ahia
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ fmuld $ahi,$bb,$ahib
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ add $j,8,$j
+ std $nlob,[%sp+$bias+$frame+8]
+ add $ap,$j,%o4
+ std $nloc,[%sp+$bias+$frame+16]
+ add $np,$j,%o5
+ std $nlod,[%sp+$bias+$frame+24]
+
+ ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
+ fzeros $alo
+ ld [%o4+4],$ahi_
+ fzeros $ahi
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
+ fzeros $nlo
+ ld [%o5+4],$nhi_
+ fzeros $nhi
+
+ fxtod $alo,$alo
+ fxtod $ahi,$ahi
+ fxtod $nlo,$nlo
+ fxtod $nhi,$nhi
+
+ ldx [%sp+$bias+$frame+0],%o0
+ fmuld $alo,$ba,$aloa
+ ldx [%sp+$bias+$frame+8],%o1
+ fmuld $nlo,$na,$nloa
+ ldx [%sp+$bias+$frame+16],%o2
+ fmuld $alo,$bb,$alob
+ ldx [%sp+$bias+$frame+24],%o3
+ fmuld $nlo,$nb,$nlob
+
+ srlx %o0,16,%o7
+ std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
+ fmuld $alo,$bc,$aloc
+ add %o7,%o1,%o1
+ std $ahi,[$ap_h+$j]
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ srlx %o1,16,%o7
+ std $nlo,[$np_l+$j] ! save smashed np[j] in double format
+ fmuld $alo,$bd,$alod
+ add %o7,%o2,%o2
+ std $nhi,[$np_h+$j]
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ srlx %o2,16,%o7
+ fmuld $ahi,$ba,$ahia
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ !and %o0,$mask,%o0
+ !and %o1,$mask,%o1
+ !and %o2,$mask,%o2
+ !sllx %o1,16,%o1
+ !sllx %o2,32,%o2
+ !sllx %o3,48,%o7
+ !or %o1,%o0,%o0
+ !or %o2,%o0,%o0
+ !or %o7,%o0,%o0 ! 64-bit result
+ srlx %o3,16,%g1 ! 34-bit carry
+ fmuld $ahi,$bb,$ahib
+
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
+ faddd $dota,$nloa,$nloa
+ faddd $dotb,$nlob,$nlob
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ addcc $j,8,$j
+ std $nloc,[%sp+$bias+$frame+16]
+ bz,pn %icc,.L1stskip
+ std $nlod,[%sp+$bias+$frame+24]
+
+.align 32 ! incidentally already aligned !
+.L1st:
+ add $ap,$j,%o4
+ add $np,$j,%o5
+ ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
+ fzeros $alo
+ ld [%o4+4],$ahi_
+ fzeros $ahi
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
+ fzeros $nlo
+ ld [%o5+4],$nhi_
+ fzeros $nhi
+
+ fxtod $alo,$alo
+ fxtod $ahi,$ahi
+ fxtod $nlo,$nlo
+ fxtod $nhi,$nhi
+
+ ldx [%sp+$bias+$frame+0],%o0
+ fmuld $alo,$ba,$aloa
+ ldx [%sp+$bias+$frame+8],%o1
+ fmuld $nlo,$na,$nloa
+ ldx [%sp+$bias+$frame+16],%o2
+ fmuld $alo,$bb,$alob
+ ldx [%sp+$bias+$frame+24],%o3
+ fmuld $nlo,$nb,$nlob
+
+ srlx %o0,16,%o7
+ std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
+ fmuld $alo,$bc,$aloc
+ add %o7,%o1,%o1
+ std $ahi,[$ap_h+$j]
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ srlx %o1,16,%o7
+ std $nlo,[$np_l+$j] ! save smashed np[j] in double format
+ fmuld $alo,$bd,$alod
+ add %o7,%o2,%o2
+ std $nhi,[$np_h+$j]
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ srlx %o2,16,%o7
+ fmuld $ahi,$ba,$ahia
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ fmuld $ahi,$bb,$ahib
+ sllx %o1,16,%o1
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ sllx %o2,32,%o2
+ fmuld $ahi,$bc,$ahic
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ or %o2,%o0,%o0
+ fmuld $ahi,$bd,$ahid
+ or %o7,%o0,%o0 ! 64-bit result
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+ addcc %g1,%o0,%o0
+ faddd $dota,$nloa,$nloa
+ srlx %o3,16,%g1 ! 34-bit carry
+ faddd $dotb,$nlob,$nlob
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]=
+
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ std $nloc,[%sp+$bias+$frame+16]
+ std $nlod,[%sp+$bias+$frame+24]
+
+ addcc $j,8,$j
+ bnz,pt %icc,.L1st
+ add $tp,8,$tp
+
+.L1stskip:
+ fdtox $dota,$dota
+ fdtox $dotb,$dotb
+
+ ldx [%sp+$bias+$frame+0],%o0
+ ldx [%sp+$bias+$frame+8],%o1
+ ldx [%sp+$bias+$frame+16],%o2
+ ldx [%sp+$bias+$frame+24],%o3
+
+ srlx %o0,16,%o7
+ std $dota,[%sp+$bias+$frame+32]
+ add %o7,%o1,%o1
+ std $dotb,[%sp+$bias+$frame+40]
+ srlx %o1,16,%o7
+ add %o7,%o2,%o2
+ srlx %o2,16,%o7
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ sllx %o1,16,%o1
+ sllx %o2,32,%o2
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ or %o2,%o0,%o0
+ or %o7,%o0,%o0 ! 64-bit result
+ ldx [%sp+$bias+$frame+32],%o4
+ addcc %g1,%o0,%o0
+ ldx [%sp+$bias+$frame+40],%o5
+ srlx %o3,16,%g1 ! 34-bit carry
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]=
+ add $tp,8,$tp
+
+ srlx %o4,16,%o7
+ add %o7,%o5,%o5
+ and %o4,$mask,%o4
+ sllx %o5,16,%o7
+ or %o7,%o4,%o4
+ addcc %g1,%o4,%o4
+ srlx %o5,48,%g1
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ mov %g1,$carry
+ stx %o4,[$tp] ! tp[num-1]=
+
+ ba .Louter
+ add $i,8,$i
+.align 32
+.Louter:
+ sub %g0,$num,$j ! j=-num
+ add %sp,$bias+$frame+$locals,$tp
+
+ add $ap,$j,%o3
+ add $bp,$i,%o4
+
+ ld [%o3+4],%g1 ! bp[i]
+ ld [%o3+0],%o0
+ ld [%o4+4],%g5 ! ap[0]
+ sllx %g1,32,%g1
+ ld [%o4+0],%o1
+ sllx %g5,32,%g5
+ or %g1,%o0,%o0
+ or %g5,%o1,%o1
+
+ ldx [$tp],%o2 ! tp[0]
+ mulx %o1,%o0,%o0
+ addcc %o2,%o0,%o0
+ mulx $n0,%o0,%o0 ! (ap[0]*bp[i]+t[0])*n0
+ stx %o0,[%sp+$bias+$frame+0]
+
+ ! transfer b[i] to FPU as 4x16-bit values
+ ldda [%o4+2]%asi,$ba
+ ldda [%o4+0]%asi,$bb
+ ldda [%o4+6]%asi,$bc
+ ldda [%o4+4]%asi,$bd
+
+ ! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values
+ ldda [%sp+$bias+$frame+6]%asi,$na
+ fxtod $ba,$ba
+ ldda [%sp+$bias+$frame+4]%asi,$nb
+ fxtod $bb,$bb
+ ldda [%sp+$bias+$frame+2]%asi,$nc
+ fxtod $bc,$bc
+ ldda [%sp+$bias+$frame+0]%asi,$nd
+ fxtod $bd,$bd
+ ldd [$ap_l+$j],$alo ! load a[j] in double format
+ fxtod $na,$na
+ ldd [$ap_h+$j],$ahi
+ fxtod $nb,$nb
+ ldd [$np_l+$j],$nlo ! load n[j] in double format
+ fxtod $nc,$nc
+ ldd [$np_h+$j],$nhi
+ fxtod $nd,$nd
+
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ fmuld $alo,$bd,$alod
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ fmuld $ahi,$ba,$ahia
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ fmuld $ahi,$bb,$ahib
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ std $nloc,[%sp+$bias+$frame+16]
+ add $j,8,$j
+ std $nlod,[%sp+$bias+$frame+24]
+
+ ldd [$ap_l+$j],$alo ! load a[j] in double format
+ ldd [$ap_h+$j],$ahi
+ ldd [$np_l+$j],$nlo ! load n[j] in double format
+ ldd [$np_h+$j],$nhi
+
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ ldx [%sp+$bias+$frame+0],%o0
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ ldx [%sp+$bias+$frame+8],%o1
+ fmuld $alo,$bd,$alod
+ ldx [%sp+$bias+$frame+16],%o2
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ ldx [%sp+$bias+$frame+24],%o3
+ fmuld $ahi,$ba,$ahia
+
+ srlx %o0,16,%o7
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ add %o7,%o1,%o1
+ fmuld $ahi,$bb,$ahib
+ srlx %o1,16,%o7
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ add %o7,%o2,%o2
+ fmuld $ahi,$bc,$ahic
+ srlx %o2,16,%o7
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ ! why?
+ and %o0,$mask,%o0
+ fmuld $ahi,$bd,$ahid
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+ sllx %o1,16,%o1
+ faddd $dota,$nloa,$nloa
+ sllx %o2,32,%o2
+ faddd $dotb,$nlob,$nlob
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ faddd $ahic,$nhic,$dota ! $nhic
+ or %o2,%o0,%o0
+ faddd $ahid,$nhid,$dotb ! $nhid
+ or %o7,%o0,%o0 ! 64-bit result
+ ldx [$tp],%o7
+ faddd $nloc,$nhia,$nloc
+ addcc %o7,%o0,%o0
+ ! end-of-why?
+ faddd $nlod,$nhib,$nlod
+ srlx %o3,16,%g1 ! 34-bit carry
+ fdtox $nloa,$nloa
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ addcc $j,8,$j
+ std $nloc,[%sp+$bias+$frame+16]
+ bz,pn %icc,.Linnerskip
+ std $nlod,[%sp+$bias+$frame+24]
+
+ ba .Linner
+ nop
+.align 32
+.Linner:
+ ldd [$ap_l+$j],$alo ! load a[j] in double format
+ ldd [$ap_h+$j],$ahi
+ ldd [$np_l+$j],$nlo ! load n[j] in double format
+ ldd [$np_h+$j],$nhi
+
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ ldx [%sp+$bias+$frame+0],%o0
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ ldx [%sp+$bias+$frame+8],%o1
+ fmuld $alo,$bd,$alod
+ ldx [%sp+$bias+$frame+16],%o2
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ ldx [%sp+$bias+$frame+24],%o3
+ fmuld $ahi,$ba,$ahia
+
+ srlx %o0,16,%o7
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ add %o7,%o1,%o1
+ fmuld $ahi,$bb,$ahib
+ srlx %o1,16,%o7
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ add %o7,%o2,%o2
+ fmuld $ahi,$bc,$ahic
+ srlx %o2,16,%o7
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ fmuld $ahi,$bd,$ahid
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+ sllx %o1,16,%o1
+ faddd $dota,$nloa,$nloa
+ sllx %o2,32,%o2
+ faddd $dotb,$nlob,$nlob
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ faddd $ahic,$nhic,$dota ! $nhic
+ or %o2,%o0,%o0
+ faddd $ahid,$nhid,$dotb ! $nhid
+ or %o7,%o0,%o0 ! 64-bit result
+ faddd $nloc,$nhia,$nloc
+ addcc %g1,%o0,%o0
+ ldx [$tp+8],%o7 ! tp[j]
+ faddd $nlod,$nhib,$nlod
+ srlx %o3,16,%g1 ! 34-bit carry
+ fdtox $nloa,$nloa
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+ fdtox $nlob,$nlob
+ addcc %o7,%o0,%o0
+ fdtox $nloc,$nloc
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ std $nloc,[%sp+$bias+$frame+16]
+ addcc $j,8,$j
+ std $nlod,[%sp+$bias+$frame+24]
+ bnz,pt %icc,.Linner
+ add $tp,8,$tp
+
+.Linnerskip:
+ fdtox $dota,$dota
+ fdtox $dotb,$dotb
+
+ ldx [%sp+$bias+$frame+0],%o0
+ ldx [%sp+$bias+$frame+8],%o1
+ ldx [%sp+$bias+$frame+16],%o2
+ ldx [%sp+$bias+$frame+24],%o3
+
+ srlx %o0,16,%o7
+ std $dota,[%sp+$bias+$frame+32]
+ add %o7,%o1,%o1
+ std $dotb,[%sp+$bias+$frame+40]
+ srlx %o1,16,%o7
+ add %o7,%o2,%o2
+ srlx %o2,16,%o7
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ sllx %o1,16,%o1
+ sllx %o2,32,%o2
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ or %o2,%o0,%o0
+ ldx [%sp+$bias+$frame+32],%o4
+ or %o7,%o0,%o0 ! 64-bit result
+ ldx [%sp+$bias+$frame+40],%o5
+ addcc %g1,%o0,%o0
+ ldx [$tp+8],%o7 ! tp[j]
+ srlx %o3,16,%g1 ! 34-bit carry
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ addcc %o7,%o0,%o0
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]
+ add $tp,8,$tp
+
+ srlx %o4,16,%o7
+ add %o7,%o5,%o5
+ and %o4,$mask,%o4
+ sllx %o5,16,%o7
+ or %o7,%o4,%o4
+ addcc %g1,%o4,%o4
+ srlx %o5,48,%g1
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ addcc $carry,%o4,%o4
+ stx %o4,[$tp] ! tp[num-1]
+ mov %g1,$carry
+ bcs,a %xcc,.+8
+ add $carry,1,$carry
+
+ addcc $i,8,$i
+ bnz %icc,.Louter
+ nop
+
+ add $tp,8,$tp ! adjust tp to point at the end
+ orn %g0,%g0,%g4
+ sub %g0,$num,%o7 ! n=-num
+ ba .Lsub
+ subcc %g0,%g0,%g0 ! clear %icc.c
+
+.align 32
+.Lsub:
+ ldx [$tp+%o7],%o0
+ add $np,%o7,%g1
+ ld [%g1+0],%o2
+ ld [%g1+4],%o3
+ srlx %o0,32,%o1
+ subccc %o0,%o2,%o2
+ add $rp,%o7,%g1
+ subccc %o1,%o3,%o3
+ st %o2,[%g1+0]
+ add %o7,8,%o7
+ brnz,pt %o7,.Lsub
+ st %o3,[%g1+4]
+ subc $carry,0,%g4
+ sub %g0,$num,%o7 ! n=-num
+ ba .Lcopy
+ nop
+
+.align 32
+.Lcopy:
+ ldx [$tp+%o7],%o0
+ add $rp,%o7,%g1
+ ld [%g1+0],%o2
+ ld [%g1+4],%o3
+ stx %g0,[$tp+%o7]
+ and %o0,%g4,%o0
+ srlx %o0,32,%o1
+ andn %o2,%g4,%o2
+ andn %o3,%g4,%o3
+ or %o2,%o0,%o0
+ or %o3,%o1,%o1
+ st %o0,[%g1+0]
+ add %o7,8,%o7
+ brnz,pt %o7,.Lcopy
+ st %o1,[%g1+4]
+ sub %g0,$num,%o7 ! n=-num
+
+.Lzap:
+ stx %g0,[$ap_l+%o7]
+ stx %g0,[$ap_h+%o7]
+ stx %g0,[$np_l+%o7]
+ stx %g0,[$np_h+%o7]
+ add %o7,8,%o7
+ brnz,pt %o7,.Lzap
+ nop
+
+ ldx [%sp+$bias+$frame+48],%o7
+ wr %g0,%o7,%asi ! restore %asi
+
+ mov 1,%i0
+.Lret:
+ ret
+ restore
+.type $fname,#function
+.size $fname,(.-$fname)
+.asciz "Montgomery Multipltication for UltraSPARC, CRYPTOGAMS by <appro\@openssl.org>"
+.align 32
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+# Below substitution makes it possible to compile without demanding
+# VIS extentions on command line, e.g. -xarch=v9 vs. -xarch=v9a. I
+# dare to do this, because VIS capability is detected at run-time now
+# and this routine is not called on CPU not capable to execute it. Do
+# note that fzeros is not the only VIS dependency! Another dependency
+# is implicit and is just _a_ numerical value loaded to %asi register,
+# which assembler can't recognize as VIS specific...
+$code =~ s/fzeros\s+%f([0-9]+)/
+ sprintf(".word\t0x%x\t! fzeros %%f%d",0x81b00c20|($1<<25),$1)
+ /gem;
+
+print $code;
+# flush
+close STDOUT;
diff --git a/crypto/bn/asm/via-mont.pl b/crypto/bn/asm/via-mont.pl
new file mode 100755
index 000000000000..c046a514c873
--- /dev/null
+++ b/crypto/bn/asm/via-mont.pl
@@ -0,0 +1,242 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# Wrapper around 'rep montmul', VIA-specific instruction accessing
+# PadLock Montgomery Multiplier. The wrapper is designed as drop-in
+# replacement for OpenSSL bn_mul_mont [first implemented in 0.9.9].
+#
+# Below are interleaved outputs from 'openssl speed rsa dsa' for 4
+# different software configurations on 1.5GHz VIA Esther processor.
+# Lines marked with "software integer" denote performance of hand-
+# coded integer-only assembler found in OpenSSL 0.9.7. "Software SSE2"
+# refers to hand-coded SSE2 Montgomery multiplication procedure found
+# OpenSSL 0.9.9. "Hardware VIA SDK" refers to padlock_pmm routine from
+# Padlock SDK 2.0.1 available for download from VIA, which naturally
+# utilizes the magic 'repz montmul' instruction. And finally "hardware
+# this" refers to *this* implementation which also uses 'repz montmul'
+#
+# sign verify sign/s verify/s
+# rsa 512 bits 0.001720s 0.000140s 581.4 7149.7 software integer
+# rsa 512 bits 0.000690s 0.000086s 1450.3 11606.0 software SSE2
+# rsa 512 bits 0.006136s 0.000201s 163.0 4974.5 hardware VIA SDK
+# rsa 512 bits 0.000712s 0.000050s 1404.9 19858.5 hardware this
+#
+# rsa 1024 bits 0.008518s 0.000413s 117.4 2420.8 software integer
+# rsa 1024 bits 0.004275s 0.000277s 233.9 3609.7 software SSE2
+# rsa 1024 bits 0.012136s 0.000260s 82.4 3844.5 hardware VIA SDK
+# rsa 1024 bits 0.002522s 0.000116s 396.5 8650.9 hardware this
+#
+# rsa 2048 bits 0.050101s 0.001371s 20.0 729.6 software integer
+# rsa 2048 bits 0.030273s 0.001008s 33.0 991.9 software SSE2
+# rsa 2048 bits 0.030833s 0.000976s 32.4 1025.1 hardware VIA SDK
+# rsa 2048 bits 0.011879s 0.000342s 84.2 2921.7 hardware this
+#
+# rsa 4096 bits 0.327097s 0.004859s 3.1 205.8 software integer
+# rsa 4096 bits 0.229318s 0.003859s 4.4 259.2 software SSE2
+# rsa 4096 bits 0.233953s 0.003274s 4.3 305.4 hardware VIA SDK
+# rsa 4096 bits 0.070493s 0.001166s 14.2 857.6 hardware this
+#
+# dsa 512 bits 0.001342s 0.001651s 745.2 605.7 software integer
+# dsa 512 bits 0.000844s 0.000987s 1185.3 1013.1 software SSE2
+# dsa 512 bits 0.001902s 0.002247s 525.6 444.9 hardware VIA SDK
+# dsa 512 bits 0.000458s 0.000524s 2182.2 1909.1 hardware this
+#
+# dsa 1024 bits 0.003964s 0.004926s 252.3 203.0 software integer
+# dsa 1024 bits 0.002686s 0.003166s 372.3 315.8 software SSE2
+# dsa 1024 bits 0.002397s 0.002823s 417.1 354.3 hardware VIA SDK
+# dsa 1024 bits 0.000978s 0.001170s 1022.2 855.0 hardware this
+#
+# dsa 2048 bits 0.013280s 0.016518s 75.3 60.5 software integer
+# dsa 2048 bits 0.009911s 0.011522s 100.9 86.8 software SSE2
+# dsa 2048 bits 0.009542s 0.011763s 104.8 85.0 hardware VIA SDK
+# dsa 2048 bits 0.002884s 0.003352s 346.8 298.3 hardware this
+#
+# To give you some other reference point here is output for 2.4GHz P4
+# running hand-coded SSE2 bn_mul_mont found in 0.9.9, i.e. "software
+# SSE2" in above terms.
+#
+# rsa 512 bits 0.000407s 0.000047s 2454.2 21137.0
+# rsa 1024 bits 0.002426s 0.000141s 412.1 7100.0
+# rsa 2048 bits 0.015046s 0.000491s 66.5 2034.9
+# rsa 4096 bits 0.109770s 0.002379s 9.1 420.3
+# dsa 512 bits 0.000438s 0.000525s 2281.1 1904.1
+# dsa 1024 bits 0.001346s 0.001595s 742.7 627.0
+# dsa 2048 bits 0.004745s 0.005582s 210.7 179.1
+#
+# Conclusions:
+# - VIA SDK leaves a *lot* of room for improvement (which this
+# implementation successfully fills:-);
+# - 'rep montmul' gives up to >3x performance improvement depending on
+# key length;
+# - in terms of absolute performance it delivers approximately as much
+# as modern out-of-order 32-bit cores [again, for longer keys].
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],"via-mont.pl");
+
+# int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num);
+$func="bn_mul_mont_padlock";
+
+$pad=16*1; # amount of reserved bytes on top of every vector
+
+# stack layout
+$mZeroPrime=&DWP(0,"esp"); # these are specified by VIA
+$A=&DWP(4,"esp");
+$B=&DWP(8,"esp");
+$T=&DWP(12,"esp");
+$M=&DWP(16,"esp");
+$scratch=&DWP(20,"esp");
+$rp=&DWP(24,"esp"); # these are mine
+$sp=&DWP(28,"esp");
+# &DWP(32,"esp") # 32 byte scratch area
+# &DWP(64+(4*$num+$pad)*0,"esp") # padded tp[num]
+# &DWP(64+(4*$num+$pad)*1,"esp") # padded copy of ap[num]
+# &DWP(64+(4*$num+$pad)*2,"esp") # padded copy of bp[num]
+# &DWP(64+(4*$num+$pad)*3,"esp") # padded copy of np[num]
+# Note that SDK suggests to unconditionally allocate 2K per vector. This
+# has quite an impact on performance. It naturally depends on key length,
+# but to give an example 1024 bit private RSA key operations suffer >30%
+# penalty. I allocate only as much as actually required...
+
+&function_begin($func);
+ &xor ("eax","eax");
+ &mov ("ecx",&wparam(5)); # num
+ # meet VIA's limitations for num [note that the specification
+ # expresses them in bits, while we work with amount of 32-bit words]
+ &test ("ecx",3);
+ &jnz (&label("leave")); # num % 4 != 0
+ &cmp ("ecx",8);
+ &jb (&label("leave")); # num < 8
+ &cmp ("ecx",1024);
+ &ja (&label("leave")); # num > 1024
+
+ &pushf ();
+ &cld ();
+
+ &mov ("edi",&wparam(0)); # rp
+ &mov ("eax",&wparam(1)); # ap
+ &mov ("ebx",&wparam(2)); # bp
+ &mov ("edx",&wparam(3)); # np
+ &mov ("esi",&wparam(4)); # n0
+ &mov ("esi",&DWP(0,"esi")); # *n0
+
+ &lea ("ecx",&DWP($pad,"","ecx",4)); # ecx becomes vector size in bytes
+ &lea ("ebp",&DWP(64,"","ecx",4)); # allocate 4 vectors + 64 bytes
+ &neg ("ebp");
+ &add ("ebp","esp");
+ &and ("ebp",-64); # align to cache-line
+ &xchg ("ebp","esp"); # alloca
+
+ &mov ($rp,"edi"); # save rp
+ &mov ($sp,"ebp"); # save esp
+
+ &mov ($mZeroPrime,"esi");
+ &lea ("esi",&DWP(64,"esp")); # tp
+ &mov ($T,"esi");
+ &lea ("edi",&DWP(32,"esp")); # scratch area
+ &mov ($scratch,"edi");
+ &mov ("esi","eax");
+
+ &lea ("ebp",&DWP(-$pad,"ecx"));
+ &shr ("ebp",2); # restore original num value in ebp
+
+ &xor ("eax","eax");
+
+ &mov ("ecx","ebp");
+ &lea ("ecx",&DWP((32+$pad)/4,"ecx"));# padded tp + scratch
+ &data_byte(0xf3,0xab); # rep stosl, bzero
+
+ &mov ("ecx","ebp");
+ &lea ("edi",&DWP(64+$pad,"esp","ecx",4));# pointer to ap copy
+ &mov ($A,"edi");
+ &data_byte(0xf3,0xa5); # rep movsl, memcpy
+ &mov ("ecx",$pad/4);
+ &data_byte(0xf3,0xab); # rep stosl, bzero pad
+ # edi points at the end of padded ap copy...
+
+ &mov ("ecx","ebp");
+ &mov ("esi","ebx");
+ &mov ($B,"edi");
+ &data_byte(0xf3,0xa5); # rep movsl, memcpy
+ &mov ("ecx",$pad/4);
+ &data_byte(0xf3,0xab); # rep stosl, bzero pad
+ # edi points at the end of padded bp copy...
+
+ &mov ("ecx","ebp");
+ &mov ("esi","edx");
+ &mov ($M,"edi");
+ &data_byte(0xf3,0xa5); # rep movsl, memcpy
+ &mov ("ecx",$pad/4);
+ &data_byte(0xf3,0xab); # rep stosl, bzero pad
+ # edi points at the end of padded np copy...
+
+ # let magic happen...
+ &mov ("ecx","ebp");
+ &mov ("esi","esp");
+ &shl ("ecx",5); # convert word counter to bit counter
+ &align (4);
+ &data_byte(0xf3,0x0f,0xa6,0xc0);# rep montmul
+
+ &mov ("ecx","ebp");
+ &lea ("esi",&DWP(64,"esp")); # tp
+ # edi still points at the end of padded np copy...
+ &neg ("ebp");
+ &lea ("ebp",&DWP(-$pad,"edi","ebp",4)); # so just "rewind"
+ &mov ("edi",$rp); # restore rp
+ &xor ("edx","edx"); # i=0 and clear CF
+
+&set_label("sub",8);
+ &mov ("eax",&DWP(0,"esi","edx",4));
+ &sbb ("eax",&DWP(0,"ebp","edx",4));
+ &mov (&DWP(0,"edi","edx",4),"eax"); # rp[i]=tp[i]-np[i]
+ &lea ("edx",&DWP(1,"edx")); # i++
+ &loop (&label("sub")); # doesn't affect CF!
+
+ &mov ("eax",&DWP(0,"esi","edx",4)); # upmost overflow bit
+ &sbb ("eax",0);
+ &and ("esi","eax");
+ &not ("eax");
+ &mov ("ebp","edi");
+ &and ("ebp","eax");
+ &or ("esi","ebp"); # tp=carry?tp:rp
+
+ &mov ("ecx","edx"); # num
+ &xor ("edx","edx"); # i=0
+
+&set_label("copy",8);
+ &mov ("eax",&DWP(0,"esi","edx",4));
+ &mov (&DWP(64,"esp","edx",4),"ecx"); # zap tp
+ &mov (&DWP(0,"edi","edx",4),"eax");
+ &lea ("edx",&DWP(1,"edx")); # i++
+ &loop (&label("copy"));
+
+ &mov ("ebp",$sp);
+ &xor ("eax","eax");
+
+ &mov ("ecx",64/4);
+ &mov ("edi","esp"); # zap frame including scratch area
+ &data_byte(0xf3,0xab); # rep stosl, bzero
+
+ # zap copies of ap, bp and np
+ &lea ("edi",&DWP(64+$pad,"esp","edx",4));# pointer to ap
+ &lea ("ecx",&DWP(3*$pad/4,"edx","edx",2));
+ &data_byte(0xf3,0xab); # rep stosl, bzero
+
+ &mov ("esp","ebp");
+ &inc ("eax"); # signal "done"
+ &popf ();
+&set_label("leave");
+&function_end($func);
+
+&asciz("Padlock Montgomery Multiplication, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
diff --git a/crypto/bn/asm/x86-gf2m.pl b/crypto/bn/asm/x86-gf2m.pl
new file mode 100755
index 000000000000..808a1e59691d
--- /dev/null
+++ b/crypto/bn/asm/x86-gf2m.pl
@@ -0,0 +1,313 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
+# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
+# the time being... Except that it has three code paths: pure integer
+# code suitable for any x86 CPU, MMX code suitable for PIII and later
+# and PCLMULQDQ suitable for Westmere and later. Improvement varies
+# from one benchmark and µ-arch to another. Below are interval values
+# for 163- and 571-bit ECDH benchmarks relative to compiler-generated
+# code:
+#
+# PIII 16%-30%
+# P4 12%-12%
+# Opteron 18%-40%
+# Core2 19%-44%
+# Atom 38%-64%
+# Westmere 53%-121%(PCLMULQDQ)/20%-32%(MMX)
+# Sandy Bridge 72%-127%(PCLMULQDQ)/27%-23%(MMX)
+#
+# Note that above improvement coefficients are not coefficients for
+# bn_GF2m_mul_2x2 itself. For example 120% ECDH improvement is result
+# of bn_GF2m_mul_2x2 being >4x faster. As it gets faster, benchmark
+# is more and more dominated by other subroutines, most notably by
+# BN_GF2m_mod[_mul]_arr...
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0,$x86only = $ARGV[$#ARGV] eq "386");
+
+$sse2=0;
+for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+&external_label("OPENSSL_ia32cap_P") if ($sse2);
+
+$a="eax";
+$b="ebx";
+($a1,$a2,$a4)=("ecx","edx","ebp");
+
+$R="mm0";
+@T=("mm1","mm2");
+($A,$B,$B30,$B31)=("mm2","mm3","mm4","mm5");
+@i=("esi","edi");
+
+ if (!$x86only) {
+&function_begin_B("_mul_1x1_mmx");
+ &sub ("esp",32+4);
+ &mov ($a1,$a);
+ &lea ($a2,&DWP(0,$a,$a));
+ &and ($a1,0x3fffffff);
+ &lea ($a4,&DWP(0,$a2,$a2));
+ &mov (&DWP(0*4,"esp"),0);
+ &and ($a2,0x7fffffff);
+ &movd ($A,$a);
+ &movd ($B,$b);
+ &mov (&DWP(1*4,"esp"),$a1); # a1
+ &xor ($a1,$a2); # a1^a2
+ &pxor ($B31,$B31);
+ &pxor ($B30,$B30);
+ &mov (&DWP(2*4,"esp"),$a2); # a2
+ &xor ($a2,$a4); # a2^a4
+ &mov (&DWP(3*4,"esp"),$a1); # a1^a2
+ &pcmpgtd($B31,$A); # broadcast 31st bit
+ &paddd ($A,$A); # $A<<=1
+ &xor ($a1,$a2); # a1^a4=a1^a2^a2^a4
+ &mov (&DWP(4*4,"esp"),$a4); # a4
+ &xor ($a4,$a2); # a2=a4^a2^a4
+ &pand ($B31,$B);
+ &pcmpgtd($B30,$A); # broadcast 30th bit
+ &mov (&DWP(5*4,"esp"),$a1); # a1^a4
+ &xor ($a4,$a1); # a1^a2^a4
+ &psllq ($B31,31);
+ &pand ($B30,$B);
+ &mov (&DWP(6*4,"esp"),$a2); # a2^a4
+ &mov (@i[0],0x7);
+ &mov (&DWP(7*4,"esp"),$a4); # a1^a2^a4
+ &mov ($a4,@i[0]);
+ &and (@i[0],$b);
+ &shr ($b,3);
+ &mov (@i[1],$a4);
+ &psllq ($B30,30);
+ &and (@i[1],$b);
+ &shr ($b,3);
+ &movd ($R,&DWP(0,"esp",@i[0],4));
+ &mov (@i[0],$a4);
+ &and (@i[0],$b);
+ &shr ($b,3);
+ for($n=1;$n<9;$n++) {
+ &movd (@T[1],&DWP(0,"esp",@i[1],4));
+ &mov (@i[1],$a4);
+ &psllq (@T[1],3*$n);
+ &and (@i[1],$b);
+ &shr ($b,3);
+ &pxor ($R,@T[1]);
+
+ push(@i,shift(@i)); push(@T,shift(@T));
+ }
+ &movd (@T[1],&DWP(0,"esp",@i[1],4));
+ &pxor ($R,$B30);
+ &psllq (@T[1],3*$n++);
+ &pxor ($R,@T[1]);
+
+ &movd (@T[0],&DWP(0,"esp",@i[0],4));
+ &pxor ($R,$B31);
+ &psllq (@T[0],3*$n);
+ &add ("esp",32+4);
+ &pxor ($R,@T[0]);
+ &ret ();
+&function_end_B("_mul_1x1_mmx");
+ }
+
+($lo,$hi)=("eax","edx");
+@T=("ecx","ebp");
+
+&function_begin_B("_mul_1x1_ialu");
+ &sub ("esp",32+4);
+ &mov ($a1,$a);
+ &lea ($a2,&DWP(0,$a,$a));
+ &lea ($a4,&DWP(0,"",$a,4));
+ &and ($a1,0x3fffffff);
+ &lea (@i[1],&DWP(0,$lo,$lo));
+ &sar ($lo,31); # broadcast 31st bit
+ &mov (&DWP(0*4,"esp"),0);
+ &and ($a2,0x7fffffff);
+ &mov (&DWP(1*4,"esp"),$a1); # a1
+ &xor ($a1,$a2); # a1^a2
+ &mov (&DWP(2*4,"esp"),$a2); # a2
+ &xor ($a2,$a4); # a2^a4
+ &mov (&DWP(3*4,"esp"),$a1); # a1^a2
+ &xor ($a1,$a2); # a1^a4=a1^a2^a2^a4
+ &mov (&DWP(4*4,"esp"),$a4); # a4
+ &xor ($a4,$a2); # a2=a4^a2^a4
+ &mov (&DWP(5*4,"esp"),$a1); # a1^a4
+ &xor ($a4,$a1); # a1^a2^a4
+ &sar (@i[1],31); # broardcast 30th bit
+ &and ($lo,$b);
+ &mov (&DWP(6*4,"esp"),$a2); # a2^a4
+ &and (@i[1],$b);
+ &mov (&DWP(7*4,"esp"),$a4); # a1^a2^a4
+ &mov ($hi,$lo);
+ &shl ($lo,31);
+ &mov (@T[0],@i[1]);
+ &shr ($hi,1);
+
+ &mov (@i[0],0x7);
+ &shl (@i[1],30);
+ &and (@i[0],$b);
+ &shr (@T[0],2);
+ &xor ($lo,@i[1]);
+
+ &shr ($b,3);
+ &mov (@i[1],0x7); # 5-byte instruction!?
+ &and (@i[1],$b);
+ &shr ($b,3);
+ &xor ($hi,@T[0]);
+ &xor ($lo,&DWP(0,"esp",@i[0],4));
+ &mov (@i[0],0x7);
+ &and (@i[0],$b);
+ &shr ($b,3);
+ for($n=1;$n<9;$n++) {
+ &mov (@T[1],&DWP(0,"esp",@i[1],4));
+ &mov (@i[1],0x7);
+ &mov (@T[0],@T[1]);
+ &shl (@T[1],3*$n);
+ &and (@i[1],$b);
+ &shr (@T[0],32-3*$n);
+ &xor ($lo,@T[1]);
+ &shr ($b,3);
+ &xor ($hi,@T[0]);
+
+ push(@i,shift(@i)); push(@T,shift(@T));
+ }
+ &mov (@T[1],&DWP(0,"esp",@i[1],4));
+ &mov (@T[0],@T[1]);
+ &shl (@T[1],3*$n);
+ &mov (@i[1],&DWP(0,"esp",@i[0],4));
+ &shr (@T[0],32-3*$n); $n++;
+ &mov (@i[0],@i[1]);
+ &xor ($lo,@T[1]);
+ &shl (@i[1],3*$n);
+ &xor ($hi,@T[0]);
+ &shr (@i[0],32-3*$n);
+ &xor ($lo,@i[1]);
+ &xor ($hi,@i[0]);
+
+ &add ("esp",32+4);
+ &ret ();
+&function_end_B("_mul_1x1_ialu");
+
+# void bn_GF2m_mul_2x2(BN_ULONG *r, BN_ULONG a1, BN_ULONG a0, BN_ULONG b1, BN_ULONG b0);
+&function_begin_B("bn_GF2m_mul_2x2");
+if (!$x86only) {
+ &picmeup("edx","OPENSSL_ia32cap_P");
+ &mov ("eax",&DWP(0,"edx"));
+ &mov ("edx",&DWP(4,"edx"));
+ &test ("eax",1<<23); # check MMX bit
+ &jz (&label("ialu"));
+if ($sse2) {
+ &test ("eax",1<<24); # check FXSR bit
+ &jz (&label("mmx"));
+ &test ("edx",1<<1); # check PCLMULQDQ bit
+ &jz (&label("mmx"));
+
+ &movups ("xmm0",&QWP(8,"esp"));
+ &shufps ("xmm0","xmm0",0b10110001);
+ &pclmulqdq ("xmm0","xmm0",1);
+ &mov ("eax",&DWP(4,"esp"));
+ &movups (&QWP(0,"eax"),"xmm0");
+ &ret ();
+
+&set_label("mmx",16);
+}
+ &push ("ebp");
+ &push ("ebx");
+ &push ("esi");
+ &push ("edi");
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &call ("_mul_1x1_mmx"); # a1·b1
+ &movq ("mm7",$R);
+
+ &mov ($a,&wparam(2));
+ &mov ($b,&wparam(4));
+ &call ("_mul_1x1_mmx"); # a0·b0
+ &movq ("mm6",$R);
+
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &xor ($a,&wparam(2));
+ &xor ($b,&wparam(4));
+ &call ("_mul_1x1_mmx"); # (a0+a1)·(b0+b1)
+ &pxor ($R,"mm7");
+ &mov ($a,&wparam(0));
+ &pxor ($R,"mm6"); # (a0+a1)·(b0+b1)-a1·b1-a0·b0
+
+ &movq ($A,$R);
+ &psllq ($R,32);
+ &pop ("edi");
+ &psrlq ($A,32);
+ &pop ("esi");
+ &pxor ($R,"mm6");
+ &pop ("ebx");
+ &pxor ($A,"mm7");
+ &movq (&QWP(0,$a),$R);
+ &pop ("ebp");
+ &movq (&QWP(8,$a),$A);
+ &emms ();
+ &ret ();
+&set_label("ialu",16);
+}
+ &push ("ebp");
+ &push ("ebx");
+ &push ("esi");
+ &push ("edi");
+ &stack_push(4+1);
+
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &call ("_mul_1x1_ialu"); # a1·b1
+ &mov (&DWP(8,"esp"),$lo);
+ &mov (&DWP(12,"esp"),$hi);
+
+ &mov ($a,&wparam(2));
+ &mov ($b,&wparam(4));
+ &call ("_mul_1x1_ialu"); # a0·b0
+ &mov (&DWP(0,"esp"),$lo);
+ &mov (&DWP(4,"esp"),$hi);
+
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &xor ($a,&wparam(2));
+ &xor ($b,&wparam(4));
+ &call ("_mul_1x1_ialu"); # (a0+a1)·(b0+b1)
+
+ &mov ("ebp",&wparam(0));
+ @r=("ebx","ecx","edi","esi");
+ &mov (@r[0],&DWP(0,"esp"));
+ &mov (@r[1],&DWP(4,"esp"));
+ &mov (@r[2],&DWP(8,"esp"));
+ &mov (@r[3],&DWP(12,"esp"));
+
+ &xor ($lo,$hi);
+ &xor ($hi,@r[1]);
+ &xor ($lo,@r[0]);
+ &mov (&DWP(0,"ebp"),@r[0]);
+ &xor ($hi,@r[2]);
+ &mov (&DWP(12,"ebp"),@r[3]);
+ &xor ($lo,@r[3]);
+ &stack_pop(4+1);
+ &xor ($hi,@r[3]);
+ &pop ("edi");
+ &xor ($lo,$hi);
+ &pop ("esi");
+ &mov (&DWP(8,"ebp"),$hi);
+ &pop ("ebx");
+ &mov (&DWP(4,"ebp"),$lo);
+ &pop ("ebp");
+ &ret ();
+&function_end_B("bn_GF2m_mul_2x2");
+
+&asciz ("GF(2^m) Multiplication for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
diff --git a/crypto/bn/asm/mo-586.pl b/crypto/bn/asm/x86-mont.pl
index 061127e0b11d..e8f6b050842e 100755
--- a/crypto/bn/asm/mo-586.pl
+++ b/crypto/bn/asm/x86-mont.pl
@@ -1,18 +1,5 @@
#!/usr/bin/env perl
-# This is crypto/bn/asm/x86-mont.pl (with asciz from crypto/perlasm/x86asm.pl)
-# from OpenSSL 0.9.9-dev
-
-sub ::asciz
-{ my @str=unpack("C*",shift);
- push @str,0;
- while ($#str>15) {
- &data_byte(@str[0..15]);
- foreach (0..15) { shift @str; }
- }
- &data_byte(@str) if (@str);
-}
-
# ====================================================================
# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
@@ -39,7 +26,8 @@ sub ::asciz
# Integer-only code [being equipped with dedicated squaring procedure]
# gives ~40% on rsa512 sign benchmark...
-push(@INC,"perlasm","../../perlasm");
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
require "x86asm.pl";
&asm_init($ARGV[0],$0);
diff --git a/crypto/bn/asm/x86_64-gcc.c b/crypto/bn/asm/x86_64-gcc.c
index b1b8a1109bf7..acb0b401181e 100644
--- a/crypto/bn/asm/x86_64-gcc.c
+++ b/crypto/bn/asm/x86_64-gcc.c
@@ -1,5 +1,5 @@
#include "../bn_lcl.h"
-#ifdef __SUNPRO_C
+#if !(defined(__GNUC__) && __GNUC__>=2)
# include "../bn_asm.c" /* kind of dirty hack for Sun Studio */
#else
/*
@@ -55,7 +55,11 @@
* machine.
*/
+#ifdef _WIN64
+#define BN_ULONG unsigned long long
+#else
#define BN_ULONG unsigned long
+#endif
#undef mul
#undef mul_add
@@ -187,7 +191,7 @@ BN_ULONG bn_add_words (BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int
asm (
" subq %2,%2 \n"
- ".align 16 \n"
+ ".p2align 4 \n"
"1: movq (%4,%2,8),%0 \n"
" adcq (%5,%2,8),%0 \n"
" movq %0,(%3,%2,8) \n"
@@ -210,7 +214,7 @@ BN_ULONG bn_sub_words (BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int
asm (
" subq %2,%2 \n"
- ".align 16 \n"
+ ".p2align 4 \n"
"1: movq (%4,%2,8),%0 \n"
" sbbq (%5,%2,8),%0 \n"
" movq %0,(%3,%2,8) \n"
diff --git a/crypto/bn/asm/x86_64-gf2m.pl b/crypto/bn/asm/x86_64-gf2m.pl
new file mode 100755
index 000000000000..1658acbbddd6
--- /dev/null
+++ b/crypto/bn/asm/x86_64-gf2m.pl
@@ -0,0 +1,389 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
+# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
+# the time being... Except that it has two code paths: code suitable
+# for any x86_64 CPU and PCLMULQDQ one suitable for Westmere and
+# later. Improvement varies from one benchmark and µ-arch to another.
+# Vanilla code path is at most 20% faster than compiler-generated code
+# [not very impressive], while PCLMULQDQ - whole 85%-160% better on
+# 163- and 571-bit ECDH benchmarks on Intel CPUs. Keep in mind that
+# these coefficients are not ones for bn_GF2m_mul_2x2 itself, as not
+# all CPU time is burnt in it...
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+($lo,$hi)=("%rax","%rdx"); $a=$lo;
+($i0,$i1)=("%rsi","%rdi");
+($t0,$t1)=("%rbx","%rcx");
+($b,$mask)=("%rbp","%r8");
+($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(9..15));
+($R,$Tx)=("%xmm0","%xmm1");
+
+$code.=<<___;
+.text
+
+.type _mul_1x1,\@abi-omnipotent
+.align 16
+_mul_1x1:
+ sub \$128+8,%rsp
+ mov \$-1,$a1
+ lea ($a,$a),$i0
+ shr \$3,$a1
+ lea (,$a,4),$i1
+ and $a,$a1 # a1=a&0x1fffffffffffffff
+ lea (,$a,8),$a8
+ sar \$63,$a # broadcast 63rd bit
+ lea ($a1,$a1),$a2
+ sar \$63,$i0 # broadcast 62nd bit
+ lea (,$a1,4),$a4
+ and $b,$a
+ sar \$63,$i1 # boardcast 61st bit
+ mov $a,$hi # $a is $lo
+ shl \$63,$lo
+ and $b,$i0
+ shr \$1,$hi
+ mov $i0,$t1
+ shl \$62,$i0
+ and $b,$i1
+ shr \$2,$t1
+ xor $i0,$lo
+ mov $i1,$t0
+ shl \$61,$i1
+ xor $t1,$hi
+ shr \$3,$t0
+ xor $i1,$lo
+ xor $t0,$hi
+
+ mov $a1,$a12
+ movq \$0,0(%rsp) # tab[0]=0
+ xor $a2,$a12 # a1^a2
+ mov $a1,8(%rsp) # tab[1]=a1
+ mov $a4,$a48
+ mov $a2,16(%rsp) # tab[2]=a2
+ xor $a8,$a48 # a4^a8
+ mov $a12,24(%rsp) # tab[3]=a1^a2
+
+ xor $a4,$a1
+ mov $a4,32(%rsp) # tab[4]=a4
+ xor $a4,$a2
+ mov $a1,40(%rsp) # tab[5]=a1^a4
+ xor $a4,$a12
+ mov $a2,48(%rsp) # tab[6]=a2^a4
+ xor $a48,$a1 # a1^a4^a4^a8=a1^a8
+ mov $a12,56(%rsp) # tab[7]=a1^a2^a4
+ xor $a48,$a2 # a2^a4^a4^a8=a1^a8
+
+ mov $a8,64(%rsp) # tab[8]=a8
+ xor $a48,$a12 # a1^a2^a4^a4^a8=a1^a2^a8
+ mov $a1,72(%rsp) # tab[9]=a1^a8
+ xor $a4,$a1 # a1^a8^a4
+ mov $a2,80(%rsp) # tab[10]=a2^a8
+ xor $a4,$a2 # a2^a8^a4
+ mov $a12,88(%rsp) # tab[11]=a1^a2^a8
+
+ xor $a4,$a12 # a1^a2^a8^a4
+ mov $a48,96(%rsp) # tab[12]=a4^a8
+ mov $mask,$i0
+ mov $a1,104(%rsp) # tab[13]=a1^a4^a8
+ and $b,$i0
+ mov $a2,112(%rsp) # tab[14]=a2^a4^a8
+ shr \$4,$b
+ mov $a12,120(%rsp) # tab[15]=a1^a2^a4^a8
+ mov $mask,$i1
+ and $b,$i1
+ shr \$4,$b
+
+ movq (%rsp,$i0,8),$R # half of calculations is done in SSE2
+ mov $mask,$i0
+ and $b,$i0
+ shr \$4,$b
+___
+ for ($n=1;$n<8;$n++) {
+ $code.=<<___;
+ mov (%rsp,$i1,8),$t1
+ mov $mask,$i1
+ mov $t1,$t0
+ shl \$`8*$n-4`,$t1
+ and $b,$i1
+ movq (%rsp,$i0,8),$Tx
+ shr \$`64-(8*$n-4)`,$t0
+ xor $t1,$lo
+ pslldq \$$n,$Tx
+ mov $mask,$i0
+ shr \$4,$b
+ xor $t0,$hi
+ and $b,$i0
+ shr \$4,$b
+ pxor $Tx,$R
+___
+ }
+$code.=<<___;
+ mov (%rsp,$i1,8),$t1
+ mov $t1,$t0
+ shl \$`8*$n-4`,$t1
+ movq $R,$i0
+ shr \$`64-(8*$n-4)`,$t0
+ xor $t1,$lo
+ psrldq \$8,$R
+ xor $t0,$hi
+ movq $R,$i1
+ xor $i0,$lo
+ xor $i1,$hi
+
+ add \$128+8,%rsp
+ ret
+.Lend_mul_1x1:
+.size _mul_1x1,.-_mul_1x1
+___
+
+($rp,$a1,$a0,$b1,$b0) = $win64? ("%rcx","%rdx","%r8", "%r9","%r10") : # Win64 order
+ ("%rdi","%rsi","%rdx","%rcx","%r8"); # Unix order
+
+$code.=<<___;
+.extern OPENSSL_ia32cap_P
+.globl bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,\@abi-omnipotent
+.align 16
+bn_GF2m_mul_2x2:
+ mov OPENSSL_ia32cap_P(%rip),%rax
+ bt \$33,%rax
+ jnc .Lvanilla_mul_2x2
+
+ movq $a1,%xmm0
+ movq $b1,%xmm1
+ movq $a0,%xmm2
+___
+$code.=<<___ if ($win64);
+ movq 40(%rsp),%xmm3
+___
+$code.=<<___ if (!$win64);
+ movq $b0,%xmm3
+___
+$code.=<<___;
+ movdqa %xmm0,%xmm4
+ movdqa %xmm1,%xmm5
+ pclmulqdq \$0,%xmm1,%xmm0 # a1·b1
+ pxor %xmm2,%xmm4
+ pxor %xmm3,%xmm5
+ pclmulqdq \$0,%xmm3,%xmm2 # a0·b0
+ pclmulqdq \$0,%xmm5,%xmm4 # (a0+a1)·(b0+b1)
+ xorps %xmm0,%xmm4
+ xorps %xmm2,%xmm4 # (a0+a1)·(b0+b1)-a0·b0-a1·b1
+ movdqa %xmm4,%xmm5
+ pslldq \$8,%xmm4
+ psrldq \$8,%xmm5
+ pxor %xmm4,%xmm2
+ pxor %xmm5,%xmm0
+ movdqu %xmm2,0($rp)
+ movdqu %xmm0,16($rp)
+ ret
+
+.align 16
+.Lvanilla_mul_2x2:
+ lea -8*17(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+ mov `8*17+40`(%rsp),$b0
+ mov %rdi,8*15(%rsp)
+ mov %rsi,8*16(%rsp)
+___
+$code.=<<___;
+ mov %r14,8*10(%rsp)
+ mov %r13,8*11(%rsp)
+ mov %r12,8*12(%rsp)
+ mov %rbp,8*13(%rsp)
+ mov %rbx,8*14(%rsp)
+.Lbody_mul_2x2:
+ mov $rp,32(%rsp) # save the arguments
+ mov $a1,40(%rsp)
+ mov $a0,48(%rsp)
+ mov $b1,56(%rsp)
+ mov $b0,64(%rsp)
+
+ mov \$0xf,$mask
+ mov $a1,$a
+ mov $b1,$b
+ call _mul_1x1 # a1·b1
+ mov $lo,16(%rsp)
+ mov $hi,24(%rsp)
+
+ mov 48(%rsp),$a
+ mov 64(%rsp),$b
+ call _mul_1x1 # a0·b0
+ mov $lo,0(%rsp)
+ mov $hi,8(%rsp)
+
+ mov 40(%rsp),$a
+ mov 56(%rsp),$b
+ xor 48(%rsp),$a
+ xor 64(%rsp),$b
+ call _mul_1x1 # (a0+a1)·(b0+b1)
+___
+ @r=("%rbx","%rcx","%rdi","%rsi");
+$code.=<<___;
+ mov 0(%rsp),@r[0]
+ mov 8(%rsp),@r[1]
+ mov 16(%rsp),@r[2]
+ mov 24(%rsp),@r[3]
+ mov 32(%rsp),%rbp
+
+ xor $hi,$lo
+ xor @r[1],$hi
+ xor @r[0],$lo
+ mov @r[0],0(%rbp)
+ xor @r[2],$hi
+ mov @r[3],24(%rbp)
+ xor @r[3],$lo
+ xor @r[3],$hi
+ xor $hi,$lo
+ mov $hi,16(%rbp)
+ mov $lo,8(%rbp)
+
+ mov 8*10(%rsp),%r14
+ mov 8*11(%rsp),%r13
+ mov 8*12(%rsp),%r12
+ mov 8*13(%rsp),%rbp
+ mov 8*14(%rsp),%rbx
+___
+$code.=<<___ if ($win64);
+ mov 8*15(%rsp),%rdi
+ mov 8*16(%rsp),%rsi
+___
+$code.=<<___;
+ lea 8*17(%rsp),%rsp
+ ret
+.Lend_mul_2x2:
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+.asciz "GF(2^m) Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align 16
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+
+.type se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 152($context),%rax # pull context->Rsp
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lbody_mul_2x2(%rip),%r10
+ cmp %r10,%rbx # context->Rip<"prologue" label
+ jb .Lin_prologue
+
+ mov 8*10(%rax),%r14 # mimic epilogue
+ mov 8*11(%rax),%r13
+ mov 8*12(%rax),%r12
+ mov 8*13(%rax),%rbp
+ mov 8*14(%rax),%rbx
+ mov 8*15(%rax),%rdi
+ mov 8*16(%rax),%rsi
+
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+
+.Lin_prologue:
+ lea 8*17(%rax),%rax
+ mov %rax,152($context) # restore context->Rsp
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size se_handler,.-se_handler
+
+.section .pdata
+.align 4
+ .rva _mul_1x1
+ .rva .Lend_mul_1x1
+ .rva .LSEH_info_1x1
+
+ .rva .Lvanilla_mul_2x2
+ .rva .Lend_mul_2x2
+ .rva .LSEH_info_2x2
+.section .xdata
+.align 8
+.LSEH_info_1x1:
+ .byte 0x01,0x07,0x02,0x00
+ .byte 0x07,0x01,0x11,0x00 # sub rsp,128+8
+.LSEH_info_2x2:
+ .byte 9,0,0,0
+ .rva se_handler
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+print $code;
+close STDOUT;
diff --git a/crypto/bn/asm/x86_64-mont.pl b/crypto/bn/asm/x86_64-mont.pl
index c43b69592a5c..5d79b35e1cf2 100755
--- a/crypto/bn/asm/x86_64-mont.pl
+++ b/crypto/bn/asm/x86_64-mont.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -15,14 +15,32 @@
# respectful 50%. It remains to be seen if loop unrolling and
# dedicated squaring routine can provide further improvement...
-$output=shift;
+# July 2011.
+#
+# Add dedicated squaring procedure. Performance improvement varies
+# from platform to platform, but in average it's ~5%/15%/25%/33%
+# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
+
+# August 2011.
+#
+# Unroll and modulo-schedule inner loops in such manner that they
+# are "fallen through" for input lengths of 8, which is critical for
+# 1024-bit RSA *sign*. Average performance improvement in comparison
+# to *initial* version of this module from 2005 is ~0%/30%/40%/45%
+# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
die "can't locate x86_64-xlate.pl";
-open STDOUT,"| $^X $xlate $output";
+open STDOUT,"| $^X $xlate $flavour $output";
# int bn_mul_mont(
$rp="%rdi"; # BN_ULONG *rp,
@@ -33,7 +51,6 @@ $n0="%r8"; # const BN_ULONG *n0,
$num="%r9"; # int num);
$lo0="%r10";
$hi0="%r11";
-$bp="%r12"; # reassign $bp
$hi1="%r13";
$i="%r14";
$j="%r15";
@@ -47,6 +64,16 @@ $code=<<___;
.type bn_mul_mont,\@function,6
.align 16
bn_mul_mont:
+ test \$3,${num}d
+ jnz .Lmul_enter
+ cmp \$8,${num}d
+ jb .Lmul_enter
+ cmp $ap,$bp
+ jne .Lmul4x_enter
+ jmp .Lsqr4x_enter
+
+.align 16
+.Lmul_enter:
push %rbx
push %rbp
push %r12
@@ -55,54 +82,73 @@ bn_mul_mont:
push %r15
mov ${num}d,${num}d
- lea 2($num),%rax
- mov %rsp,%rbp
- neg %rax
- lea (%rsp,%rax,8),%rsp # tp=alloca(8*(num+2))
+ lea 2($num),%r10
+ mov %rsp,%r11
+ neg %r10
+ lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2))
and \$-1024,%rsp # minimize TLB usage
- mov %rbp,8(%rsp,$num,8) # tp[num+1]=%rsp
- mov %rdx,$bp # $bp reassigned, remember?
-
+ mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
+.Lmul_body:
+ mov $bp,%r12 # reassign $bp
+___
+ $bp="%r12";
+$code.=<<___;
mov ($n0),$n0 # pull n0[0] value
+ mov ($bp),$m0 # m0=bp[0]
+ mov ($ap),%rax
xor $i,$i # i=0
xor $j,$j # j=0
- mov ($bp),$m0 # m0=bp[0]
- mov ($ap),%rax
+ mov $n0,$m1
mulq $m0 # ap[0]*bp[0]
mov %rax,$lo0
- mov %rdx,$hi0
+ mov ($np),%rax
- imulq $n0,%rax # "tp[0]"*n0
- mov %rax,$m1
+ imulq $lo0,$m1 # "tp[0]"*n0
+ mov %rdx,$hi0
- mulq ($np) # np[0]*m1
- add $lo0,%rax # discarded
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
adc \$0,%rdx
mov %rdx,$hi1
lea 1($j),$j # j++
+ jmp .L1st_enter
+
+.align 16
.L1st:
+ add %rax,$hi1
mov ($ap,$j,8),%rax
- mulq $m0 # ap[j]*bp[0]
- add $hi0,%rax
adc \$0,%rdx
- mov %rax,$lo0
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
+ mov $lo0,$hi0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.L1st_enter:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$hi0
mov ($np,$j,8),%rax
- mov %rdx,$hi0
+ adc \$0,%rdx
+ lea 1($j),$j # j++
+ mov %rdx,$lo0
mulq $m1 # np[j]*m1
- add $hi1,%rax
- lea 1($j),$j # j++
+ cmp $num,$j
+ jne .L1st
+
+ add %rax,$hi1
+ mov ($ap),%rax # ap[0]
adc \$0,%rdx
- add $lo0,%rax # np[j]*m1+ap[j]*bp[0]
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
adc \$0,%rdx
- mov %rax,-16(%rsp,$j,8) # tp[j-1]
- cmp $num,$j
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
mov %rdx,$hi1
- jl .L1st
+ mov $lo0,$hi0
xor %rdx,%rdx
add $hi0,$hi1
@@ -111,50 +157,64 @@ bn_mul_mont:
mov %rdx,(%rsp,$num,8) # store upmost overflow bit
lea 1($i),$i # i++
-.align 4
+ jmp .Louter
+.align 16
.Louter:
- xor $j,$j # j=0
-
mov ($bp,$i,8),$m0 # m0=bp[i]
- mov ($ap),%rax # ap[0]
+ xor $j,$j # j=0
+ mov $n0,$m1
+ mov (%rsp),$lo0
mulq $m0 # ap[0]*bp[i]
- add (%rsp),%rax # ap[0]*bp[i]+tp[0]
+ add %rax,$lo0 # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
adc \$0,%rdx
- mov %rax,$lo0
- mov %rdx,$hi0
- imulq $n0,%rax # tp[0]*n0
- mov %rax,$m1
+ imulq $lo0,$m1 # tp[0]*n0
+ mov %rdx,$hi0
- mulq ($np,$j,8) # np[0]*m1
- add $lo0,%rax # discarded
- mov 8(%rsp),$lo0 # tp[1]
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
adc \$0,%rdx
+ mov 8(%rsp),$lo0 # tp[1]
mov %rdx,$hi1
lea 1($j),$j # j++
-.align 4
+ jmp .Linner_enter
+
+.align 16
.Linner:
+ add %rax,$hi1
mov ($ap,$j,8),%rax
- mulq $m0 # ap[j]*bp[i]
- add $hi0,%rax
adc \$0,%rdx
- add %rax,$lo0 # ap[j]*bp[i]+tp[j]
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
+ mov (%rsp,$j,8),$lo0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.Linner_enter:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$hi0
mov ($np,$j,8),%rax
adc \$0,%rdx
+ add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
mov %rdx,$hi0
+ adc \$0,$hi0
+ lea 1($j),$j # j++
mulq $m1 # np[j]*m1
- add $hi1,%rax
- lea 1($j),$j # j++
- adc \$0,%rdx
- add $lo0,%rax # np[j]*m1+ap[j]*bp[i]+tp[j]
+ cmp $num,$j
+ jne .Linner
+
+ add %rax,$hi1
+ mov ($ap),%rax # ap[0]
adc \$0,%rdx
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
mov (%rsp,$j,8),$lo0
- cmp $num,$j
- mov %rax,-16(%rsp,$j,8) # tp[j-1]
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
mov %rdx,$hi1
- jl .Linner
xor %rdx,%rdx
add $hi0,$hi1
@@ -168,47 +228,1453 @@ bn_mul_mont:
cmp $num,$i
jl .Louter
- lea (%rsp),$ap # borrow ap for tp
- lea -1($num),$j # j=num-1
-
- mov ($ap),%rax # tp[0]
xor $i,$i # i=0 and clear CF!
+ mov (%rsp),%rax # tp[0]
+ lea (%rsp),$ap # borrow ap for tp
+ mov $num,$j # j=num
jmp .Lsub
.align 16
.Lsub: sbb ($np,$i,8),%rax
mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
- dec $j # doesn't affect CF!
mov 8($ap,$i,8),%rax # tp[i+1]
lea 1($i),$i # i++
- jge .Lsub
+ dec $j # doesnn't affect CF!
+ jnz .Lsub
sbb \$0,%rax # handle upmost overflow bit
+ xor $i,$i
and %rax,$ap
not %rax
mov $rp,$np
and %rax,$np
- lea -1($num),$j
+ mov $num,$j # j=num
or $np,$ap # ap=borrow?tp:rp
.align 16
.Lcopy: # copy or in-place refresh
+ mov ($ap,$i,8),%rax
+ mov $i,(%rsp,$i,8) # zap temporary vector
+ mov %rax,($rp,$i,8) # rp[i]=tp[i]
+ lea 1($i),$i
+ sub \$1,$j
+ jnz .Lcopy
+
+ mov 8(%rsp,$num,8),%rsi # restore %rsp
+ mov \$1,%rax
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lmul_epilogue:
+ ret
+.size bn_mul_mont,.-bn_mul_mont
+___
+{{{
+my @A=("%r10","%r11");
+my @N=("%r13","%rdi");
+$code.=<<___;
+.type bn_mul4x_mont,\@function,6
+.align 16
+bn_mul4x_mont:
+.Lmul4x_enter:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ mov ${num}d,${num}d
+ lea 4($num),%r10
+ mov %rsp,%r11
+ neg %r10
+ lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
+ and \$-1024,%rsp # minimize TLB usage
+
+ mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
+.Lmul4x_body:
+ mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
+ mov %rdx,%r12 # reassign $bp
+___
+ $bp="%r12";
+$code.=<<___;
+ mov ($n0),$n0 # pull n0[0] value
+ mov ($bp),$m0 # m0=bp[0]
+ mov ($ap),%rax
+
+ xor $i,$i # i=0
+ xor $j,$j # j=0
+
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[0]
+ mov %rax,$A[0]
+ mov ($np),%rax
+
+ imulq $A[0],$m1 # "tp[0]"*n0
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0
+ add %rax,$A[1]
+ mov 8($np),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1
+ add %rax,$N[1]
+ mov 16($ap),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ lea 4($j),$j # j++
+ adc \$0,%rdx
+ mov $N[1],(%rsp)
+ mov %rdx,$N[0]
+ jmp .L1st4x
+.align 16
+.L1st4x:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
mov ($ap,$j,8),%rax
- mov %rax,($rp,$j,8) # rp[i]=tp[i]
- mov $i,(%rsp,$j,8) # zap temporary vector
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-8(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov 8($np,$j,8),%rax
+ adc \$0,%rdx
+ lea 4($j),$j # j++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov -16($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+ cmp $num,$j
+ jl .L1st4x
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ mov $N[0],-8(%rsp,$j,8)
+ mov $N[1],(%rsp,$j,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+.align 4
+.Louter4x:
+ mov ($bp,$i,8),$m0 # m0=bp[i]
+ xor $j,$j # j=0
+ mov (%rsp),$A[0]
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[i]
+ add %rax,$A[0] # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
+ adc \$0,%rdx
+
+ imulq $A[0],$m1 # tp[0]*n0
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # "$N[0]", discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8($np),%rax
+ adc \$0,%rdx
+ add 8(%rsp),$A[1] # +tp[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov 16($ap),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
+ lea 4($j),$j # j+=2
+ adc \$0,%rdx
+ mov $N[1],(%rsp) # tp[j-1]
+ mov %rdx,$N[0]
+ jmp .Linner4x
+.align 16
+.Linner4x:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ add -8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[0],-8(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8($np,$j,8),%rax
+ adc \$0,%rdx
+ add 8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ lea 4($j),$j # j++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov -16($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+ cmp $num,$j
+ jl .Linner4x
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ add -8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ lea 1($i),$i # i++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ add (%rsp,$num,8),$N[0] # pull upmost overflow bit
+ adc \$0,$N[1]
+ mov $N[0],-8(%rsp,$j,8)
+ mov $N[1],(%rsp,$j,8) # store upmost overflow bit
+
+ cmp $num,$i
+ jl .Louter4x
+___
+{
+my @ri=("%rax","%rdx",$m0,$m1);
+$code.=<<___;
+ mov 16(%rsp,$num,8),$rp # restore $rp
+ mov 0(%rsp),@ri[0] # tp[0]
+ pxor %xmm0,%xmm0
+ mov 8(%rsp),@ri[1] # tp[1]
+ shr \$2,$num # num/=4
+ lea (%rsp),$ap # borrow ap for tp
+ xor $i,$i # i=0 and clear CF!
+
+ sub 0($np),@ri[0]
+ mov 16($ap),@ri[2] # tp[2]
+ mov 24($ap),@ri[3] # tp[3]
+ sbb 8($np),@ri[1]
+ lea -1($num),$j # j=num/4-1
+ jmp .Lsub4x
+.align 16
+.Lsub4x:
+ mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 16($np,$i,8),@ri[2]
+ mov 32($ap,$i,8),@ri[0] # tp[i+1]
+ mov 40($ap,$i,8),@ri[1]
+ sbb 24($np,$i,8),@ri[3]
+ mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 32($np,$i,8),@ri[0]
+ mov 48($ap,$i,8),@ri[2]
+ mov 56($ap,$i,8),@ri[3]
+ sbb 40($np,$i,8),@ri[1]
+ lea 4($i),$i # i++
+ dec $j # doesnn't affect CF!
+ jnz .Lsub4x
+
+ mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov 32($ap,$i,8),@ri[0] # load overflow bit
+ sbb 16($np,$i,8),@ri[2]
+ mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 24($np,$i,8),@ri[3]
+ mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
+
+ sbb \$0,@ri[0] # handle upmost overflow bit
+ mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
+ xor $i,$i # i=0
+ and @ri[0],$ap
+ not @ri[0]
+ mov $rp,$np
+ and @ri[0],$np
+ lea -1($num),$j
+ or $np,$ap # ap=borrow?tp:rp
+
+ movdqu ($ap),%xmm1
+ movdqa %xmm0,(%rsp)
+ movdqu %xmm1,($rp)
+ jmp .Lcopy4x
+.align 16
+.Lcopy4x: # copy or in-place refresh
+ movdqu 16($ap,$i),%xmm2
+ movdqu 32($ap,$i),%xmm1
+ movdqa %xmm0,16(%rsp,$i)
+ movdqu %xmm2,16($rp,$i)
+ movdqa %xmm0,32(%rsp,$i)
+ movdqu %xmm1,32($rp,$i)
+ lea 32($i),$i
+ dec $j
+ jnz .Lcopy4x
+
+ shl \$2,$num
+ movdqu 16($ap,$i),%xmm2
+ movdqa %xmm0,16(%rsp,$i)
+ movdqu %xmm2,16($rp,$i)
+___
+}
+$code.=<<___;
+ mov 8(%rsp,$num,8),%rsi # restore %rsp
+ mov \$1,%rax
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lmul4x_epilogue:
+ ret
+.size bn_mul4x_mont,.-bn_mul4x_mont
+___
+}}}
+ {{{
+######################################################################
+# void bn_sqr4x_mont(
+my $rptr="%rdi"; # const BN_ULONG *rptr,
+my $aptr="%rsi"; # const BN_ULONG *aptr,
+my $bptr="%rdx"; # not used
+my $nptr="%rcx"; # const BN_ULONG *nptr,
+my $n0 ="%r8"; # const BN_ULONG *n0);
+my $num ="%r9"; # int num, has to be divisible by 4 and
+ # not less than 8
+
+my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
+my @A0=("%r10","%r11");
+my @A1=("%r12","%r13");
+my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
+
+$code.=<<___;
+.type bn_sqr4x_mont,\@function,6
+.align 16
+bn_sqr4x_mont:
+.Lsqr4x_enter:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ shl \$3,${num}d # convert $num to bytes
+ xor %r10,%r10
+ mov %rsp,%r11 # put aside %rsp
+ sub $num,%r10 # -$num
+ mov ($n0),$n0 # *n0
+ lea -72(%rsp,%r10,2),%rsp # alloca(frame+2*$num)
+ and \$-1024,%rsp # minimize TLB usage
+ ##############################################################
+ # Stack layout
+ #
+ # +0 saved $num, used in reduction section
+ # +8 &t[2*$num], used in reduction section
+ # +32 saved $rptr
+ # +40 saved $nptr
+ # +48 saved *n0
+ # +56 saved %rsp
+ # +64 t[2*$num]
+ #
+ mov $rptr,32(%rsp) # save $rptr
+ mov $nptr,40(%rsp)
+ mov $n0, 48(%rsp)
+ mov %r11, 56(%rsp) # save original %rsp
+.Lsqr4x_body:
+ ##############################################################
+ # Squaring part:
+ #
+ # a) multiply-n-add everything but a[i]*a[i];
+ # b) shift result of a) by 1 to the left and accumulate
+ # a[i]*a[i] products;
+ #
+ lea 32(%r10),$i # $i=-($num-32)
+ lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
+
+ mov $num,$j # $j=$num
+
+ # comments apply to $num==8 case
+ mov -32($aptr,$i),$a0 # a[0]
+ lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
+ mov -24($aptr,$i),%rax # a[1]
+ lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
+ mov -16($aptr,$i),$ai # a[2]
+ mov %rax,$a1
+
+ mul $a0 # a[1]*a[0]
+ mov %rax,$A0[0] # a[1]*a[0]
+ mov $ai,%rax # a[2]
+ mov %rdx,$A0[1]
+ mov $A0[0],-24($tptr,$i) # t[1]
+
+ xor $A0[0],$A0[0]
+ mul $a0 # a[2]*a[0]
+ add %rax,$A0[1]
+ mov $ai,%rax
+ adc %rdx,$A0[0]
+ mov $A0[1],-16($tptr,$i) # t[2]
+
+ lea -16($i),$j # j=-16
+
+
+ mov 8($aptr,$j),$ai # a[3]
+ mul $a1 # a[2]*a[1]
+ mov %rax,$A1[0] # a[2]*a[1]+t[3]
+ mov $ai,%rax
+ mov %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ lea 16($j),$j
+ adc \$0,$A0[1]
+ mul $a0 # a[3]*a[0]
+ add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],-8($tptr,$j) # t[3]
+ jmp .Lsqr4x_1st
+
+.align 16
+.Lsqr4x_1st:
+ mov ($aptr,$j),$ai # a[4]
+ xor $A1[0],$A1[0]
+ mul $a1 # a[3]*a[1]
+ add %rax,$A1[1] # a[3]*a[1]+t[4]
+ mov $ai,%rax
+ adc %rdx,$A1[0]
+
+ xor $A0[0],$A0[0]
+ add $A1[1],$A0[1]
+ adc \$0,$A0[0]
+ mul $a0 # a[4]*a[0]
+ add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
+ mov $ai,%rax # a[3]
+ adc %rdx,$A0[0]
+ mov $A0[1],($tptr,$j) # t[4]
+
+
+ mov 8($aptr,$j),$ai # a[5]
+ xor $A1[1],$A1[1]
+ mul $a1 # a[4]*a[3]
+ add %rax,$A1[0] # a[4]*a[3]+t[5]
+ mov $ai,%rax
+ adc %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ adc \$0,$A0[1]
+ mul $a0 # a[5]*a[2]
+ add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],8($tptr,$j) # t[5]
+
+ mov 16($aptr,$j),$ai # a[6]
+ xor $A1[0],$A1[0]
+ mul $a1 # a[5]*a[3]
+ add %rax,$A1[1] # a[5]*a[3]+t[6]
+ mov $ai,%rax
+ adc %rdx,$A1[0]
+
+ xor $A0[0],$A0[0]
+ add $A1[1],$A0[1]
+ adc \$0,$A0[0]
+ mul $a0 # a[6]*a[2]
+ add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
+ mov $ai,%rax # a[3]
+ adc %rdx,$A0[0]
+ mov $A0[1],16($tptr,$j) # t[6]
+
+
+ mov 24($aptr,$j),$ai # a[7]
+ xor $A1[1],$A1[1]
+ mul $a1 # a[6]*a[5]
+ add %rax,$A1[0] # a[6]*a[5]+t[7]
+ mov $ai,%rax
+ adc %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ lea 32($j),$j
+ adc \$0,$A0[1]
+ mul $a0 # a[7]*a[4]
+ add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],-8($tptr,$j) # t[7]
+
+ cmp \$0,$j
+ jne .Lsqr4x_1st
+
+ xor $A1[0],$A1[0]
+ add $A0[1],$A1[1]
+ adc \$0,$A1[0]
+ mul $a1 # a[7]*a[5]
+ add %rax,$A1[1]
+ adc %rdx,$A1[0]
+
+ mov $A1[1],($tptr) # t[8]
+ lea 16($i),$i
+ mov $A1[0],8($tptr) # t[9]
+ jmp .Lsqr4x_outer
+
+.align 16
+.Lsqr4x_outer: # comments apply to $num==6 case
+ mov -32($aptr,$i),$a0 # a[0]
+ lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
+ mov -24($aptr,$i),%rax # a[1]
+ lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
+ mov -16($aptr,$i),$ai # a[2]
+ mov %rax,$a1
+
+ mov -24($tptr,$i),$A0[0] # t[1]
+ xor $A0[1],$A0[1]
+ mul $a0 # a[1]*a[0]
+ add %rax,$A0[0] # a[1]*a[0]+t[1]
+ mov $ai,%rax # a[2]
+ adc %rdx,$A0[1]
+ mov $A0[0],-24($tptr,$i) # t[1]
+
+ xor $A0[0],$A0[0]
+ add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
+ adc \$0,$A0[0]
+ mul $a0 # a[2]*a[0]
+ add %rax,$A0[1]
+ mov $ai,%rax
+ adc %rdx,$A0[0]
+ mov $A0[1],-16($tptr,$i) # t[2]
+
+ lea -16($i),$j # j=-16
+ xor $A1[0],$A1[0]
+
+
+ mov 8($aptr,$j),$ai # a[3]
+ xor $A1[1],$A1[1]
+ add 8($tptr,$j),$A1[0]
+ adc \$0,$A1[1]
+ mul $a1 # a[2]*a[1]
+ add %rax,$A1[0] # a[2]*a[1]+t[3]
+ mov $ai,%rax
+ adc %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ adc \$0,$A0[1]
+ mul $a0 # a[3]*a[0]
+ add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],8($tptr,$j) # t[3]
+
+ lea 16($j),$j
+ jmp .Lsqr4x_inner
+
+.align 16
+.Lsqr4x_inner:
+ mov ($aptr,$j),$ai # a[4]
+ xor $A1[0],$A1[0]
+ add ($tptr,$j),$A1[1]
+ adc \$0,$A1[0]
+ mul $a1 # a[3]*a[1]
+ add %rax,$A1[1] # a[3]*a[1]+t[4]
+ mov $ai,%rax
+ adc %rdx,$A1[0]
+
+ xor $A0[0],$A0[0]
+ add $A1[1],$A0[1]
+ adc \$0,$A0[0]
+ mul $a0 # a[4]*a[0]
+ add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
+ mov $ai,%rax # a[3]
+ adc %rdx,$A0[0]
+ mov $A0[1],($tptr,$j) # t[4]
+
+ mov 8($aptr,$j),$ai # a[5]
+ xor $A1[1],$A1[1]
+ add 8($tptr,$j),$A1[0]
+ adc \$0,$A1[1]
+ mul $a1 # a[4]*a[3]
+ add %rax,$A1[0] # a[4]*a[3]+t[5]
+ mov $ai,%rax
+ adc %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ lea 16($j),$j # j++
+ adc \$0,$A0[1]
+ mul $a0 # a[5]*a[2]
+ add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
+
+ cmp \$0,$j
+ jne .Lsqr4x_inner
+
+ xor $A1[0],$A1[0]
+ add $A0[1],$A1[1]
+ adc \$0,$A1[0]
+ mul $a1 # a[5]*a[3]
+ add %rax,$A1[1]
+ adc %rdx,$A1[0]
+
+ mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
+ mov $A1[0],8($tptr) # t[7], "preloaded t[3]" below
+
+ add \$16,$i
+ jnz .Lsqr4x_outer
+
+ # comments apply to $num==4 case
+ mov -32($aptr),$a0 # a[0]
+ lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
+ mov -24($aptr),%rax # a[1]
+ lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
+ mov -16($aptr),$ai # a[2]
+ mov %rax,$a1
+
+ xor $A0[1],$A0[1]
+ mul $a0 # a[1]*a[0]
+ add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
+ mov $ai,%rax # a[2]
+ adc %rdx,$A0[1]
+ mov $A0[0],-24($tptr) # t[1]
+
+ xor $A0[0],$A0[0]
+ add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
+ adc \$0,$A0[0]
+ mul $a0 # a[2]*a[0]
+ add %rax,$A0[1]
+ mov $ai,%rax
+ adc %rdx,$A0[0]
+ mov $A0[1],-16($tptr) # t[2]
+
+ mov -8($aptr),$ai # a[3]
+ mul $a1 # a[2]*a[1]
+ add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
+ mov $ai,%rax
+ adc \$0,%rdx
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ mov %rdx,$A1[1]
+ adc \$0,$A0[1]
+ mul $a0 # a[3]*a[0]
+ add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],-8($tptr) # t[3]
+
+ xor $A1[0],$A1[0]
+ add $A0[1],$A1[1]
+ adc \$0,$A1[0]
+ mul $a1 # a[3]*a[1]
+ add %rax,$A1[1]
+ mov -16($aptr),%rax # a[2]
+ adc %rdx,$A1[0]
+
+ mov $A1[1],($tptr) # t[4]
+ mov $A1[0],8($tptr) # t[5]
+
+ mul $ai # a[2]*a[3]
+___
+{
+my ($shift,$carry)=($a0,$a1);
+my @S=(@A1,$ai,$n0);
+$code.=<<___;
+ add \$16,$i
+ xor $shift,$shift
+ sub $num,$i # $i=16-$num
+ xor $carry,$carry
+
+ add $A1[0],%rax # t[5]
+ adc \$0,%rdx
+ mov %rax,8($tptr) # t[5]
+ mov %rdx,16($tptr) # t[6]
+ mov $carry,24($tptr) # t[7]
+
+ mov -16($aptr,$i),%rax # a[0]
+ lea 64(%rsp,$num,2),$tptr
+ xor $A0[0],$A0[0] # t[0]
+ mov -24($tptr,$i,2),$A0[1] # t[1]
+
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov -8($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[0],-32($tptr,$i,2)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
+ mov $S[1],-24($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[2]
+ mov 0($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[2],-16($tptr,$i,2)
+ adc %rdx,$S[3]
+ lea 16($i),$i
+ mov $S[3],-40($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ jmp .Lsqr4x_shift_n_add
+
+.align 16
+.Lsqr4x_shift_n_add:
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov -8($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[0],-32($tptr,$i,2)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
+ mov $S[1],-24($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[2]
+ mov 0($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[2],-16($tptr,$i,2)
+ adc %rdx,$S[3]
+
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ mov $S[3],-8($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov 16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 24($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov 8($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[0],0($tptr,$i,2)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
+ mov $S[1],8($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mov 32($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 40($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[2]
+ mov 16($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[2],16($tptr,$i,2)
+ adc %rdx,$S[3]
+ mov $S[3],24($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ add \$32,$i
+ jnz .Lsqr4x_shift_n_add
+
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov -8($aptr),%rax # a[i+1] # prefetch
+ mov $S[0],-32($tptr)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
+ mov $S[1],-24($tptr)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ adc %rax,$S[2]
+ adc %rdx,$S[3]
+ mov $S[2],-16($tptr)
+ mov $S[3],-8($tptr)
+___
+}
+##############################################################
+# Montgomery reduction part, "word-by-word" algorithm.
+#
+{
+my ($topbit,$nptr)=("%rbp",$aptr);
+my ($m0,$m1)=($a0,$a1);
+my @Ni=("%rbx","%r9");
+$code.=<<___;
+ mov 40(%rsp),$nptr # restore $nptr
+ mov 48(%rsp),$n0 # restore *n0
+ xor $j,$j
+ mov $num,0(%rsp) # save $num
+ sub $num,$j # $j=-$num
+ mov 64(%rsp),$A0[0] # t[0] # modsched #
+ mov $n0,$m0 # # modsched #
+ lea 64(%rsp,$num,2),%rax # end of t[] buffer
+ lea 64(%rsp,$num),$tptr # end of t[] window
+ mov %rax,8(%rsp) # save end of t[] buffer
+ lea ($nptr,$num),$nptr # end of n[] buffer
+ xor $topbit,$topbit # $topbit=0
+
+ mov 0($nptr,$j),%rax # n[0] # modsched #
+ mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
+ imulq $A0[0],$m0 # m0=t[0]*n0 # modsched #
+ mov %rax,$Ni[0] # # modsched #
+ jmp .Lsqr4x_mont_outer
+
+.align 16
+.Lsqr4x_mont_outer:
+ xor $A0[1],$A0[1]
+ mul $m0 # n[0]*m0
+ add %rax,$A0[0] # n[0]*m0+t[0]
+ mov $Ni[1],%rax
+ adc %rdx,$A0[1]
+ mov $n0,$m1
+
+ xor $A0[0],$A0[0]
+ add 8($tptr,$j),$A0[1]
+ adc \$0,$A0[0]
+ mul $m0 # n[1]*m0
+ add %rax,$A0[1] # n[1]*m0+t[1]
+ mov $Ni[0],%rax
+ adc %rdx,$A0[0]
+
+ imulq $A0[1],$m1
+
+ mov 16($nptr,$j),$Ni[0] # n[2]
+ xor $A1[1],$A1[1]
+ add $A0[1],$A1[0]
+ adc \$0,$A1[1]
+ mul $m1 # n[0]*m1
+ add %rax,$A1[0] # n[0]*m1+"t[1]"
+ mov $Ni[0],%rax
+ adc %rdx,$A1[1]
+ mov $A1[0],8($tptr,$j) # "t[1]"
+
+ xor $A0[1],$A0[1]
+ add 16($tptr,$j),$A0[0]
+ adc \$0,$A0[1]
+ mul $m0 # n[2]*m0
+ add %rax,$A0[0] # n[2]*m0+t[2]
+ mov $Ni[1],%rax
+ adc %rdx,$A0[1]
+
+ mov 24($nptr,$j),$Ni[1] # n[3]
+ xor $A1[0],$A1[0]
+ add $A0[0],$A1[1]
+ adc \$0,$A1[0]
+ mul $m1 # n[1]*m1
+ add %rax,$A1[1] # n[1]*m1+"t[2]"
+ mov $Ni[1],%rax
+ adc %rdx,$A1[0]
+ mov $A1[1],16($tptr,$j) # "t[2]"
+
+ xor $A0[0],$A0[0]
+ add 24($tptr,$j),$A0[1]
+ lea 32($j),$j
+ adc \$0,$A0[0]
+ mul $m0 # n[3]*m0
+ add %rax,$A0[1] # n[3]*m0+t[3]
+ mov $Ni[0],%rax
+ adc %rdx,$A0[0]
+ jmp .Lsqr4x_mont_inner
+
+.align 16
+.Lsqr4x_mont_inner:
+ mov ($nptr,$j),$Ni[0] # n[4]
+ xor $A1[1],$A1[1]
+ add $A0[1],$A1[0]
+ adc \$0,$A1[1]
+ mul $m1 # n[2]*m1
+ add %rax,$A1[0] # n[2]*m1+"t[3]"
+ mov $Ni[0],%rax
+ adc %rdx,$A1[1]
+ mov $A1[0],-8($tptr,$j) # "t[3]"
+
+ xor $A0[1],$A0[1]
+ add ($tptr,$j),$A0[0]
+ adc \$0,$A0[1]
+ mul $m0 # n[4]*m0
+ add %rax,$A0[0] # n[4]*m0+t[4]
+ mov $Ni[1],%rax
+ adc %rdx,$A0[1]
+
+ mov 8($nptr,$j),$Ni[1] # n[5]
+ xor $A1[0],$A1[0]
+ add $A0[0],$A1[1]
+ adc \$0,$A1[0]
+ mul $m1 # n[3]*m1
+ add %rax,$A1[1] # n[3]*m1+"t[4]"
+ mov $Ni[1],%rax
+ adc %rdx,$A1[0]
+ mov $A1[1],($tptr,$j) # "t[4]"
+
+ xor $A0[0],$A0[0]
+ add 8($tptr,$j),$A0[1]
+ adc \$0,$A0[0]
+ mul $m0 # n[5]*m0
+ add %rax,$A0[1] # n[5]*m0+t[5]
+ mov $Ni[0],%rax
+ adc %rdx,$A0[0]
+
+
+ mov 16($nptr,$j),$Ni[0] # n[6]
+ xor $A1[1],$A1[1]
+ add $A0[1],$A1[0]
+ adc \$0,$A1[1]
+ mul $m1 # n[4]*m1
+ add %rax,$A1[0] # n[4]*m1+"t[5]"
+ mov $Ni[0],%rax
+ adc %rdx,$A1[1]
+ mov $A1[0],8($tptr,$j) # "t[5]"
+
+ xor $A0[1],$A0[1]
+ add 16($tptr,$j),$A0[0]
+ adc \$0,$A0[1]
+ mul $m0 # n[6]*m0
+ add %rax,$A0[0] # n[6]*m0+t[6]
+ mov $Ni[1],%rax
+ adc %rdx,$A0[1]
+
+ mov 24($nptr,$j),$Ni[1] # n[7]
+ xor $A1[0],$A1[0]
+ add $A0[0],$A1[1]
+ adc \$0,$A1[0]
+ mul $m1 # n[5]*m1
+ add %rax,$A1[1] # n[5]*m1+"t[6]"
+ mov $Ni[1],%rax
+ adc %rdx,$A1[0]
+ mov $A1[1],16($tptr,$j) # "t[6]"
+
+ xor $A0[0],$A0[0]
+ add 24($tptr,$j),$A0[1]
+ lea 32($j),$j
+ adc \$0,$A0[0]
+ mul $m0 # n[7]*m0
+ add %rax,$A0[1] # n[7]*m0+t[7]
+ mov $Ni[0],%rax
+ adc %rdx,$A0[0]
+ cmp \$0,$j
+ jne .Lsqr4x_mont_inner
+
+ sub 0(%rsp),$j # $j=-$num # modsched #
+ mov $n0,$m0 # # modsched #
+
+ xor $A1[1],$A1[1]
+ add $A0[1],$A1[0]
+ adc \$0,$A1[1]
+ mul $m1 # n[6]*m1
+ add %rax,$A1[0] # n[6]*m1+"t[7]"
+ mov $Ni[1],%rax
+ adc %rdx,$A1[1]
+ mov $A1[0],-8($tptr) # "t[7]"
+
+ xor $A0[1],$A0[1]
+ add ($tptr),$A0[0] # +t[8]
+ adc \$0,$A0[1]
+ mov 0($nptr,$j),$Ni[0] # n[0] # modsched #
+ add $topbit,$A0[0]
+ adc \$0,$A0[1]
+
+ imulq 16($tptr,$j),$m0 # m0=t[0]*n0 # modsched #
+ xor $A1[0],$A1[0]
+ mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
+ add $A0[0],$A1[1]
+ mov 16($tptr,$j),$A0[0] # t[0] # modsched #
+ adc \$0,$A1[0]
+ mul $m1 # n[7]*m1
+ add %rax,$A1[1] # n[7]*m1+"t[8]"
+ mov $Ni[0],%rax # # modsched #
+ adc %rdx,$A1[0]
+ mov $A1[1],($tptr) # "t[8]"
+
+ xor $topbit,$topbit
+ add 8($tptr),$A1[0] # +t[9]
+ adc $topbit,$topbit
+ add $A0[1],$A1[0]
+ lea 16($tptr),$tptr # "t[$num]>>128"
+ adc \$0,$topbit
+ mov $A1[0],-8($tptr) # "t[9]"
+ cmp 8(%rsp),$tptr # are we done?
+ jb .Lsqr4x_mont_outer
+
+ mov 0(%rsp),$num # restore $num
+ mov $topbit,($tptr) # save $topbit
+___
+}
+##############################################################
+# Post-condition, 4x unrolled copy from bn_mul_mont
+#
+{
+my ($tptr,$nptr)=("%rbx",$aptr);
+my @ri=("%rax","%rdx","%r10","%r11");
+$code.=<<___;
+ mov 64(%rsp,$num),@ri[0] # tp[0]
+ lea 64(%rsp,$num),$tptr # upper half of t[2*$num] holds result
+ mov 40(%rsp),$nptr # restore $nptr
+ shr \$5,$num # num/4
+ mov 8($tptr),@ri[1] # t[1]
+ xor $i,$i # i=0 and clear CF!
+
+ mov 32(%rsp),$rptr # restore $rptr
+ sub 0($nptr),@ri[0]
+ mov 16($tptr),@ri[2] # t[2]
+ mov 24($tptr),@ri[3] # t[3]
+ sbb 8($nptr),@ri[1]
+ lea -1($num),$j # j=num/4-1
+ jmp .Lsqr4x_sub
+.align 16
+.Lsqr4x_sub:
+ mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 16($nptr,$i,8),@ri[2]
+ mov 32($tptr,$i,8),@ri[0] # tp[i+1]
+ mov 40($tptr,$i,8),@ri[1]
+ sbb 24($nptr,$i,8),@ri[3]
+ mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 32($nptr,$i,8),@ri[0]
+ mov 48($tptr,$i,8),@ri[2]
+ mov 56($tptr,$i,8),@ri[3]
+ sbb 40($nptr,$i,8),@ri[1]
+ lea 4($i),$i # i++
+ dec $j # doesn't affect CF!
+ jnz .Lsqr4x_sub
+
+ mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ mov 32($tptr,$i,8),@ri[0] # load overflow bit
+ sbb 16($nptr,$i,8),@ri[2]
+ mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 24($nptr,$i,8),@ri[3]
+ mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
+
+ sbb \$0,@ri[0] # handle upmost overflow bit
+ mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ xor $i,$i # i=0
+ and @ri[0],$tptr
+ not @ri[0]
+ mov $rptr,$nptr
+ and @ri[0],$nptr
+ lea -1($num),$j
+ or $nptr,$tptr # tp=borrow?tp:rp
+
+ pxor %xmm0,%xmm0
+ lea 64(%rsp,$num,8),$nptr
+ movdqu ($tptr),%xmm1
+ lea ($nptr,$num,8),$nptr
+ movdqa %xmm0,64(%rsp) # zap lower half of temporary vector
+ movdqa %xmm0,($nptr) # zap upper half of temporary vector
+ movdqu %xmm1,($rptr)
+ jmp .Lsqr4x_copy
+.align 16
+.Lsqr4x_copy: # copy or in-place refresh
+ movdqu 16($tptr,$i),%xmm2
+ movdqu 32($tptr,$i),%xmm1
+ movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
+ movdqa %xmm0,96(%rsp,$i) # zap lower half of temporary vector
+ movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
+ movdqa %xmm0,32($nptr,$i) # zap upper half of temporary vector
+ movdqu %xmm2,16($rptr,$i)
+ movdqu %xmm1,32($rptr,$i)
+ lea 32($i),$i
dec $j
- jge .Lcopy
+ jnz .Lsqr4x_copy
- mov 8(%rsp,$num,8),%rsp # restore %rsp
+ movdqu 16($tptr,$i),%xmm2
+ movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
+ movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
+ movdqu %xmm2,16($rptr,$i)
+___
+}
+$code.=<<___;
+ mov 56(%rsp),%rsi # restore %rsp
mov \$1,%rax
+ mov 0(%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lsqr4x_epilogue:
+ ret
+.size bn_sqr4x_mont,.-bn_sqr4x_mont
+___
+}}}
+$code.=<<___;
+.asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align 16
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type mul_handler,\@abi-omnipotent
+.align 16
+mul_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<end of prologue label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ mov 192($context),%r10 # pull $num
+ mov 8(%rax,%r10,8),%rax # pull saved stack pointer
+ lea 48(%rax),%rax
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+ jmp .Lcommon_seh_tail
+.size mul_handler,.-mul_handler
+
+.type sqr_handler,\@abi-omnipotent
+.align 16
+sqr_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lsqr4x_body(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lsqr_body
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ lea .Lsqr4x_epilogue(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
+ jae .Lcommon_seh_tail
+
+ mov 56(%rax),%rax # pull saved stack pointer
+ lea 48(%rax),%rax
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbp
pop %rbx
+ pop %rdi
+ pop %rsi
ret
-.size bn_mul_mont,.-bn_mul_mont
-.asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.size sqr_handler,.-sqr_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_bn_mul_mont
+ .rva .LSEH_end_bn_mul_mont
+ .rva .LSEH_info_bn_mul_mont
+
+ .rva .LSEH_begin_bn_mul4x_mont
+ .rva .LSEH_end_bn_mul4x_mont
+ .rva .LSEH_info_bn_mul4x_mont
+
+ .rva .LSEH_begin_bn_sqr4x_mont
+ .rva .LSEH_end_bn_sqr4x_mont
+ .rva .LSEH_info_bn_sqr4x_mont
+
+.section .xdata
+.align 8
+.LSEH_info_bn_mul_mont:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
+.LSEH_info_bn_mul4x_mont:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
+.LSEH_info_bn_sqr4x_mont:
+ .byte 9,0,0,0
+ .rva sqr_handler
___
+}
print $code;
close STDOUT;
diff --git a/crypto/bn/asm/x86_64-mont5.pl b/crypto/bn/asm/x86_64-mont5.pl
new file mode 100755
index 000000000000..057cda28aaed
--- /dev/null
+++ b/crypto/bn/asm/x86_64-mont5.pl
@@ -0,0 +1,1070 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# August 2011.
+#
+# Companion to x86_64-mont.pl that optimizes cache-timing attack
+# countermeasures. The subroutines are produced by replacing bp[i]
+# references in their x86_64-mont.pl counterparts with cache-neutral
+# references to powers table computed in BN_mod_exp_mont_consttime.
+# In addition subroutine that scatters elements of the powers table
+# is implemented, so that scatter-/gathering can be tuned without
+# bn_exp.c modifications.
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+# int bn_mul_mont_gather5(
+$rp="%rdi"; # BN_ULONG *rp,
+$ap="%rsi"; # const BN_ULONG *ap,
+$bp="%rdx"; # const BN_ULONG *bp,
+$np="%rcx"; # const BN_ULONG *np,
+$n0="%r8"; # const BN_ULONG *n0,
+$num="%r9"; # int num,
+ # int idx); # 0 to 2^5-1, "index" in $bp holding
+ # pre-computed powers of a', interlaced
+ # in such manner that b[0] is $bp[idx],
+ # b[1] is [2^5+idx], etc.
+$lo0="%r10";
+$hi0="%r11";
+$hi1="%r13";
+$i="%r14";
+$j="%r15";
+$m0="%rbx";
+$m1="%rbp";
+
+$code=<<___;
+.text
+
+.globl bn_mul_mont_gather5
+.type bn_mul_mont_gather5,\@function,6
+.align 64
+bn_mul_mont_gather5:
+ test \$3,${num}d
+ jnz .Lmul_enter
+ cmp \$8,${num}d
+ jb .Lmul_enter
+ jmp .Lmul4x_enter
+
+.align 16
+.Lmul_enter:
+ mov ${num}d,${num}d
+ mov `($win64?56:8)`(%rsp),%r10d # load 7th argument
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+___
+$code.=<<___ if ($win64);
+ lea -0x28(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+.Lmul_alloca:
+___
+$code.=<<___;
+ mov %rsp,%rax
+ lea 2($num),%r11
+ neg %r11
+ lea (%rsp,%r11,8),%rsp # tp=alloca(8*(num+2))
+ and \$-1024,%rsp # minimize TLB usage
+
+ mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
+.Lmul_body:
+ mov $bp,%r12 # reassign $bp
+___
+ $bp="%r12";
+ $STRIDE=2**5*8; # 5 is "window size"
+ $N=$STRIDE/4; # should match cache line size
+$code.=<<___;
+ mov %r10,%r11
+ shr \$`log($N/8)/log(2)`,%r10
+ and \$`$N/8-1`,%r11
+ not %r10
+ lea .Lmagic_masks(%rip),%rax
+ and \$`2**5/($N/8)-1`,%r10 # 5 is "window size"
+ lea 96($bp,%r11,8),$bp # pointer within 1st cache line
+ movq 0(%rax,%r10,8),%xmm4 # set of masks denoting which
+ movq 8(%rax,%r10,8),%xmm5 # cache line contains element
+ movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument
+ movq 24(%rax,%r10,8),%xmm7
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ movq %xmm0,$m0 # m0=bp[0]
+
+ mov ($n0),$n0 # pull n0[0] value
+ mov ($ap),%rax
+
+ xor $i,$i # i=0
+ xor $j,$j # j=0
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[0]
+ mov %rax,$lo0
+ mov ($np),%rax
+
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq $lo0,$m1 # "tp[0]"*n0
+ mov %rdx,$hi0
+
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$hi1
+
+ lea 1($j),$j # j++
+ jmp .L1st_enter
+
+.align 16
+.L1st:
+ add %rax,$hi1
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
+ mov $lo0,$hi0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.L1st_enter:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$hi0
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ lea 1($j),$j # j++
+ mov %rdx,$lo0
+
+ mulq $m1 # np[j]*m1
+ cmp $num,$j
+ jne .L1st
+
+ movq %xmm0,$m0 # bp[1]
+
+ add %rax,$hi1
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+ mov $lo0,$hi0
+
+ xor %rdx,%rdx
+ add $hi0,$hi1
+ adc \$0,%rdx
+ mov $hi1,-8(%rsp,$num,8)
+ mov %rdx,(%rsp,$num,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+ jmp .Louter
+.align 16
+.Louter:
+ xor $j,$j # j=0
+ mov $n0,$m1
+ mov (%rsp),$lo0
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+
+ mulq $m0 # ap[0]*bp[i]
+ add %rax,$lo0 # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
+ adc \$0,%rdx
+
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq $lo0,$m1 # tp[0]*n0
+ mov %rdx,$hi0
+
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov 8(%rsp),$lo0 # tp[1]
+ mov %rdx,$hi1
+
+ lea 1($j),$j # j++
+ jmp .Linner_enter
+
+.align 16
+.Linner:
+ add %rax,$hi1
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
+ mov (%rsp,$j,8),$lo0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.Linner_enter:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$hi0
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
+ mov %rdx,$hi0
+ adc \$0,$hi0
+ lea 1($j),$j # j++
+
+ mulq $m1 # np[j]*m1
+ cmp $num,$j
+ jne .Linner
+
+ movq %xmm0,$m0 # bp[i+1]
+
+ add %rax,$hi1
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
+ mov (%rsp,$j,8),$lo0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+ xor %rdx,%rdx
+ add $hi0,$hi1
+ adc \$0,%rdx
+ add $lo0,$hi1 # pull upmost overflow bit
+ adc \$0,%rdx
+ mov $hi1,-8(%rsp,$num,8)
+ mov %rdx,(%rsp,$num,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+ cmp $num,$i
+ jl .Louter
+
+ xor $i,$i # i=0 and clear CF!
+ mov (%rsp),%rax # tp[0]
+ lea (%rsp),$ap # borrow ap for tp
+ mov $num,$j # j=num
+ jmp .Lsub
+.align 16
+.Lsub: sbb ($np,$i,8),%rax
+ mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov 8($ap,$i,8),%rax # tp[i+1]
+ lea 1($i),$i # i++
+ dec $j # doesnn't affect CF!
+ jnz .Lsub
+
+ sbb \$0,%rax # handle upmost overflow bit
+ xor $i,$i
+ and %rax,$ap
+ not %rax
+ mov $rp,$np
+ and %rax,$np
+ mov $num,$j # j=num
+ or $np,$ap # ap=borrow?tp:rp
+.align 16
+.Lcopy: # copy or in-place refresh
+ mov ($ap,$i,8),%rax
+ mov $i,(%rsp,$i,8) # zap temporary vector
+ mov %rax,($rp,$i,8) # rp[i]=tp[i]
+ lea 1($i),$i
+ sub \$1,$j
+ jnz .Lcopy
+
+ mov 8(%rsp,$num,8),%rsi # restore %rsp
+ mov \$1,%rax
+___
+$code.=<<___ if ($win64);
+ movaps (%rsi),%xmm6
+ movaps 0x10(%rsi),%xmm7
+ lea 0x28(%rsi),%rsi
+___
+$code.=<<___;
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lmul_epilogue:
+ ret
+.size bn_mul_mont_gather5,.-bn_mul_mont_gather5
+___
+{{{
+my @A=("%r10","%r11");
+my @N=("%r13","%rdi");
+$code.=<<___;
+.type bn_mul4x_mont_gather5,\@function,6
+.align 16
+bn_mul4x_mont_gather5:
+.Lmul4x_enter:
+ mov ${num}d,${num}d
+ mov `($win64?56:8)`(%rsp),%r10d # load 7th argument
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+___
+$code.=<<___ if ($win64);
+ lea -0x28(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+.Lmul4x_alloca:
+___
+$code.=<<___;
+ mov %rsp,%rax
+ lea 4($num),%r11
+ neg %r11
+ lea (%rsp,%r11,8),%rsp # tp=alloca(8*(num+4))
+ and \$-1024,%rsp # minimize TLB usage
+
+ mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
+.Lmul4x_body:
+ mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
+ mov %rdx,%r12 # reassign $bp
+___
+ $bp="%r12";
+ $STRIDE=2**5*8; # 5 is "window size"
+ $N=$STRIDE/4; # should match cache line size
+$code.=<<___;
+ mov %r10,%r11
+ shr \$`log($N/8)/log(2)`,%r10
+ and \$`$N/8-1`,%r11
+ not %r10
+ lea .Lmagic_masks(%rip),%rax
+ and \$`2**5/($N/8)-1`,%r10 # 5 is "window size"
+ lea 96($bp,%r11,8),$bp # pointer within 1st cache line
+ movq 0(%rax,%r10,8),%xmm4 # set of masks denoting which
+ movq 8(%rax,%r10,8),%xmm5 # cache line contains element
+ movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument
+ movq 24(%rax,%r10,8),%xmm7
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ movq %xmm0,$m0 # m0=bp[0]
+ mov ($n0),$n0 # pull n0[0] value
+ mov ($ap),%rax
+
+ xor $i,$i # i=0
+ xor $j,$j # j=0
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[0]
+ mov %rax,$A[0]
+ mov ($np),%rax
+
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq $A[0],$m1 # "tp[0]"*n0
+ mov %rdx,$A[1]
+
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0
+ add %rax,$A[1]
+ mov 8($np),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1
+ add %rax,$N[1]
+ mov 16($ap),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ lea 4($j),$j # j++
+ adc \$0,%rdx
+ mov $N[1],(%rsp)
+ mov %rdx,$N[0]
+ jmp .L1st4x
+.align 16
+.L1st4x:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-8(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov 8($np,$j,8),%rax
+ adc \$0,%rdx
+ lea 4($j),$j # j++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov -16($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+ cmp $num,$j
+ jl .L1st4x
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ movq %xmm0,$m0 # bp[1]
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ mov $N[0],-8(%rsp,$j,8)
+ mov $N[1],(%rsp,$j,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+.align 4
+.Louter4x:
+ xor $j,$j # j=0
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+
+ mov (%rsp),$A[0]
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[i]
+ add %rax,$A[0] # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
+ adc \$0,%rdx
+
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq $A[0],$m1 # tp[0]*n0
+ mov %rdx,$A[1]
+
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # "$N[0]", discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8($np),%rax
+ adc \$0,%rdx
+ add 8(%rsp),$A[1] # +tp[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov 16($ap),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
+ lea 4($j),$j # j+=2
+ adc \$0,%rdx
+ mov %rdx,$N[0]
+ jmp .Linner4x
+.align 16
+.Linner4x:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ add -8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8($np,$j,8),%rax
+ adc \$0,%rdx
+ add 8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ lea 4($j),$j # j++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov -16($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[0],-40(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+ cmp $num,$j
+ jl .Linner4x
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ add -8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ lea 1($i),$i # i++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ movq %xmm0,$m0 # bp[i+1]
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ add (%rsp,$num,8),$N[0] # pull upmost overflow bit
+ adc \$0,$N[1]
+ mov $N[0],-8(%rsp,$j,8)
+ mov $N[1],(%rsp,$j,8) # store upmost overflow bit
+
+ cmp $num,$i
+ jl .Louter4x
+___
+{
+my @ri=("%rax","%rdx",$m0,$m1);
+$code.=<<___;
+ mov 16(%rsp,$num,8),$rp # restore $rp
+ mov 0(%rsp),@ri[0] # tp[0]
+ pxor %xmm0,%xmm0
+ mov 8(%rsp),@ri[1] # tp[1]
+ shr \$2,$num # num/=4
+ lea (%rsp),$ap # borrow ap for tp
+ xor $i,$i # i=0 and clear CF!
+
+ sub 0($np),@ri[0]
+ mov 16($ap),@ri[2] # tp[2]
+ mov 24($ap),@ri[3] # tp[3]
+ sbb 8($np),@ri[1]
+ lea -1($num),$j # j=num/4-1
+ jmp .Lsub4x
+.align 16
+.Lsub4x:
+ mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 16($np,$i,8),@ri[2]
+ mov 32($ap,$i,8),@ri[0] # tp[i+1]
+ mov 40($ap,$i,8),@ri[1]
+ sbb 24($np,$i,8),@ri[3]
+ mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 32($np,$i,8),@ri[0]
+ mov 48($ap,$i,8),@ri[2]
+ mov 56($ap,$i,8),@ri[3]
+ sbb 40($np,$i,8),@ri[1]
+ lea 4($i),$i # i++
+ dec $j # doesnn't affect CF!
+ jnz .Lsub4x
+
+ mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov 32($ap,$i,8),@ri[0] # load overflow bit
+ sbb 16($np,$i,8),@ri[2]
+ mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 24($np,$i,8),@ri[3]
+ mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
+
+ sbb \$0,@ri[0] # handle upmost overflow bit
+ mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
+ xor $i,$i # i=0
+ and @ri[0],$ap
+ not @ri[0]
+ mov $rp,$np
+ and @ri[0],$np
+ lea -1($num),$j
+ or $np,$ap # ap=borrow?tp:rp
+
+ movdqu ($ap),%xmm1
+ movdqa %xmm0,(%rsp)
+ movdqu %xmm1,($rp)
+ jmp .Lcopy4x
+.align 16
+.Lcopy4x: # copy or in-place refresh
+ movdqu 16($ap,$i),%xmm2
+ movdqu 32($ap,$i),%xmm1
+ movdqa %xmm0,16(%rsp,$i)
+ movdqu %xmm2,16($rp,$i)
+ movdqa %xmm0,32(%rsp,$i)
+ movdqu %xmm1,32($rp,$i)
+ lea 32($i),$i
+ dec $j
+ jnz .Lcopy4x
+
+ shl \$2,$num
+ movdqu 16($ap,$i),%xmm2
+ movdqa %xmm0,16(%rsp,$i)
+ movdqu %xmm2,16($rp,$i)
+___
+}
+$code.=<<___;
+ mov 8(%rsp,$num,8),%rsi # restore %rsp
+ mov \$1,%rax
+___
+$code.=<<___ if ($win64);
+ movaps (%rsi),%xmm6
+ movaps 0x10(%rsi),%xmm7
+ lea 0x28(%rsi),%rsi
+___
+$code.=<<___;
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lmul4x_epilogue:
+ ret
+.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
+___
+}}}
+
+{
+my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%rdx","%r8", "%r9") : # Win64 order
+ ("%rdi","%rsi","%rdx","%rcx"); # Unix order
+my $out=$inp;
+my $STRIDE=2**5*8;
+my $N=$STRIDE/4;
+
+$code.=<<___;
+.globl bn_scatter5
+.type bn_scatter5,\@abi-omnipotent
+.align 16
+bn_scatter5:
+ cmp \$0, $num
+ jz .Lscatter_epilogue
+ lea ($tbl,$idx,8),$tbl
+.Lscatter:
+ mov ($inp),%rax
+ lea 8($inp),$inp
+ mov %rax,($tbl)
+ lea 32*8($tbl),$tbl
+ sub \$1,$num
+ jnz .Lscatter
+.Lscatter_epilogue:
+ ret
+.size bn_scatter5,.-bn_scatter5
+
+.globl bn_gather5
+.type bn_gather5,\@abi-omnipotent
+.align 16
+bn_gather5:
+___
+$code.=<<___ if ($win64);
+.LSEH_begin_bn_gather5:
+ # I can't trust assembler to use specific encoding:-(
+ .byte 0x48,0x83,0xec,0x28 #sub \$0x28,%rsp
+ .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp)
+ .byte 0x0f,0x29,0x7c,0x24,0x10 #movdqa %xmm7,0x10(%rsp)
+___
+$code.=<<___;
+ mov $idx,%r11
+ shr \$`log($N/8)/log(2)`,$idx
+ and \$`$N/8-1`,%r11
+ not $idx
+ lea .Lmagic_masks(%rip),%rax
+ and \$`2**5/($N/8)-1`,$idx # 5 is "window size"
+ lea 96($tbl,%r11,8),$tbl # pointer within 1st cache line
+ movq 0(%rax,$idx,8),%xmm4 # set of masks denoting which
+ movq 8(%rax,$idx,8),%xmm5 # cache line contains element
+ movq 16(%rax,$idx,8),%xmm6 # denoted by 7th argument
+ movq 24(%rax,$idx,8),%xmm7
+ jmp .Lgather
+.align 16
+.Lgather:
+ movq `0*$STRIDE/4-96`($tbl),%xmm0
+ movq `1*$STRIDE/4-96`($tbl),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($tbl),%xmm2
+ pand %xmm5,%xmm1
+ movq `3*$STRIDE/4-96`($tbl),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+ por %xmm2,%xmm0
+ lea $STRIDE($tbl),$tbl
+ por %xmm3,%xmm0
+
+ movq %xmm0,($out) # m0=bp[0]
+ lea 8($out),$out
+ sub \$1,$num
+ jnz .Lgather
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+ lea 0x28(%rsp),%rsp
+___
+$code.=<<___;
+ ret
+.LSEH_end_bn_gather5:
+.size bn_gather5,.-bn_gather5
+___
+}
+$code.=<<___;
+.align 64
+.Lmagic_masks:
+ .long 0,0, 0,0, 0,0, -1,-1
+ .long 0,0, 0,0, 0,0, 0,0
+.asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type mul_handler,\@abi-omnipotent
+.align 16
+mul_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<end of prologue label
+ jb .Lcommon_seh_tail
+
+ lea `40+48`(%rax),%rax
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # end of alloca label
+ cmp %r10,%rbx # context->Rip<end of alloca label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 8(%r11),%r10d # HandlerData[2]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ mov 192($context),%r10 # pull $num
+ mov 8(%rax,%r10,8),%rax # pull saved stack pointer
+
+ movaps (%rax),%xmm0
+ movaps 16(%rax),%xmm1
+ lea `40+48`(%rax),%rax
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+ movups %xmm0,512($context) # restore context->Xmm6
+ movups %xmm1,528($context) # restore context->Xmm7
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size mul_handler,.-mul_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_bn_mul_mont_gather5
+ .rva .LSEH_end_bn_mul_mont_gather5
+ .rva .LSEH_info_bn_mul_mont_gather5
+
+ .rva .LSEH_begin_bn_mul4x_mont_gather5
+ .rva .LSEH_end_bn_mul4x_mont_gather5
+ .rva .LSEH_info_bn_mul4x_mont_gather5
+
+ .rva .LSEH_begin_bn_gather5
+ .rva .LSEH_end_bn_gather5
+ .rva .LSEH_info_bn_gather5
+
+.section .xdata
+.align 8
+.LSEH_info_bn_mul_mont_gather5:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul_alloca,.Lmul_body,.Lmul_epilogue # HandlerData[]
+.align 8
+.LSEH_info_bn_mul4x_mont_gather5:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul4x_alloca,.Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
+.align 8
+.LSEH_info_bn_gather5:
+ .byte 0x01,0x0d,0x05,0x00
+ .byte 0x0d,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7
+ .byte 0x08,0x68,0x00,0x00 #movaps (rsp),xmm6
+ .byte 0x04,0x42,0x00,0x00 #sub rsp,0x28
+.align 8
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+close STDOUT;
diff --git a/crypto/bn/bn.h b/crypto/bn/bn.h
index f1719a5877f7..f34248ec4f87 100644
--- a/crypto/bn/bn.h
+++ b/crypto/bn/bn.h
@@ -56,6 +56,59 @@
* [including the GNU Public Licence.]
*/
/* ====================================================================
+ * Copyright (c) 1998-2006 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+/* ====================================================================
* Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
*
* Portions of the attached software ("Contribution") are developed by
@@ -77,6 +130,7 @@
#include <stdio.h> /* FILE */
#endif
#include <openssl/ossl_typ.h>
+#include <openssl/crypto.h>
#ifdef __cplusplus
extern "C" {
@@ -94,9 +148,11 @@ extern "C" {
/* #define BN_DEBUG */
/* #define BN_DEBUG_RAND */
+#ifndef OPENSSL_SMALL_FOOTPRINT
#define BN_MUL_COMBA
#define BN_SQR_COMBA
#define BN_RECURSION
+#endif
/* This next option uses the C libraries (2 word)/(1 word) function.
* If it is not defined, I use my C version (which is slower).
@@ -137,6 +193,8 @@ extern "C" {
#define BN_DEC_FMT1 "%lu"
#define BN_DEC_FMT2 "%019lu"
#define BN_DEC_NUM 19
+#define BN_HEX_FMT1 "%lX"
+#define BN_HEX_FMT2 "%016lX"
#endif
/* This is where the long long data type is 64 bits, but long is 32.
@@ -162,84 +220,56 @@ extern "C" {
#define BN_DEC_FMT1 "%llu"
#define BN_DEC_FMT2 "%019llu"
#define BN_DEC_NUM 19
+#define BN_HEX_FMT1 "%llX"
+#define BN_HEX_FMT2 "%016llX"
#endif
#ifdef THIRTY_TWO_BIT
#ifdef BN_LLONG
-# if defined(OPENSSL_SYS_WIN32) && !defined(__GNUC__)
+# if defined(_WIN32) && !defined(__GNUC__)
# define BN_ULLONG unsigned __int64
+# define BN_MASK (0xffffffffffffffffI64)
# else
# define BN_ULLONG unsigned long long
+# define BN_MASK (0xffffffffffffffffLL)
# endif
#endif
-#define BN_ULONG unsigned long
-#define BN_LONG long
+#define BN_ULONG unsigned int
+#define BN_LONG int
#define BN_BITS 64
#define BN_BYTES 4
#define BN_BITS2 32
#define BN_BITS4 16
-#ifdef OPENSSL_SYS_WIN32
-/* VC++ doesn't like the LL suffix */
-#define BN_MASK (0xffffffffffffffffL)
-#else
-#define BN_MASK (0xffffffffffffffffLL)
-#endif
#define BN_MASK2 (0xffffffffL)
#define BN_MASK2l (0xffff)
#define BN_MASK2h1 (0xffff8000L)
#define BN_MASK2h (0xffff0000L)
#define BN_TBIT (0x80000000L)
#define BN_DEC_CONV (1000000000L)
-#define BN_DEC_FMT1 "%lu"
-#define BN_DEC_FMT2 "%09lu"
-#define BN_DEC_NUM 9
-#endif
-
-#ifdef SIXTEEN_BIT
-#ifndef BN_DIV2W
-#define BN_DIV2W
-#endif
-#define BN_ULLONG unsigned long
-#define BN_ULONG unsigned short
-#define BN_LONG short
-#define BN_BITS 32
-#define BN_BYTES 2
-#define BN_BITS2 16
-#define BN_BITS4 8
-#define BN_MASK (0xffffffff)
-#define BN_MASK2 (0xffff)
-#define BN_MASK2l (0xff)
-#define BN_MASK2h1 (0xff80)
-#define BN_MASK2h (0xff00)
-#define BN_TBIT (0x8000)
-#define BN_DEC_CONV (100000)
#define BN_DEC_FMT1 "%u"
-#define BN_DEC_FMT2 "%05u"
-#define BN_DEC_NUM 5
+#define BN_DEC_FMT2 "%09u"
+#define BN_DEC_NUM 9
+#define BN_HEX_FMT1 "%X"
+#define BN_HEX_FMT2 "%08X"
#endif
-#ifdef EIGHT_BIT
-#ifndef BN_DIV2W
-#define BN_DIV2W
-#endif
-#define BN_ULLONG unsigned short
-#define BN_ULONG unsigned char
-#define BN_LONG char
-#define BN_BITS 16
-#define BN_BYTES 1
-#define BN_BITS2 8
-#define BN_BITS4 4
-#define BN_MASK (0xffff)
-#define BN_MASK2 (0xff)
-#define BN_MASK2l (0xf)
-#define BN_MASK2h1 (0xf8)
-#define BN_MASK2h (0xf0)
-#define BN_TBIT (0x80)
-#define BN_DEC_CONV (100)
-#define BN_DEC_FMT1 "%u"
-#define BN_DEC_FMT2 "%02u"
-#define BN_DEC_NUM 2
-#endif
+/* 2011-02-22 SMS.
+ * In various places, a size_t variable or a type cast to size_t was
+ * used to perform integer-only operations on pointers. This failed on
+ * VMS with 64-bit pointers (CC /POINTER_SIZE = 64) because size_t is
+ * still only 32 bits. What's needed in these cases is an integer type
+ * with the same size as a pointer, which size_t is not certain to be.
+ * The only fix here is VMS-specific.
+ */
+#if defined(OPENSSL_SYS_VMS)
+# if __INITIAL_POINTER_SIZE == 64
+# define PTR_SIZE_INT long long
+# else /* __INITIAL_POINTER_SIZE == 64 */
+# define PTR_SIZE_INT int
+# endif /* __INITIAL_POINTER_SIZE == 64 [else] */
+#else /* defined(OPENSSL_SYS_VMS) */
+# define PTR_SIZE_INT size_t
+#endif /* defined(OPENSSL_SYS_VMS) [else] */
#define BN_DEFAULT_BITS 1280
@@ -303,12 +333,8 @@ struct bn_mont_ctx_st
BIGNUM N; /* The modulus */
BIGNUM Ni; /* R*(1/R mod N) - N*Ni = 1
* (Ni is only stored for bignum algorithm) */
-#if 0
- /* OpenSSL 0.9.9 preview: */
- BN_ULONG n0[2];/* least significant word(s) of Ni */
-#else
- BN_ULONG n0; /* least significant word of Ni */
-#endif
+ BN_ULONG n0[2];/* least significant word(s) of Ni;
+ (type changed with 0.9.9, was "BN_ULONG n0;" before) */
int flags;
};
@@ -504,6 +530,7 @@ char * BN_bn2hex(const BIGNUM *a);
char * BN_bn2dec(const BIGNUM *a);
int BN_hex2bn(BIGNUM **a, const char *str);
int BN_dec2bn(BIGNUM **a, const char *str);
+int BN_asc2bn(BIGNUM **a, const char *str);
int BN_gcd(BIGNUM *r,const BIGNUM *a,const BIGNUM *b,BN_CTX *ctx);
int BN_kronecker(const BIGNUM *a,const BIGNUM *b,BN_CTX *ctx); /* returns -2 for error */
BIGNUM *BN_mod_inverse(BIGNUM *ret,
@@ -560,19 +587,22 @@ BN_MONT_CTX *BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, int lock,
#define BN_BLINDING_NO_UPDATE 0x00000001
#define BN_BLINDING_NO_RECREATE 0x00000002
-BN_BLINDING *BN_BLINDING_new(const BIGNUM *A, const BIGNUM *Ai, /* const */ BIGNUM *mod);
+BN_BLINDING *BN_BLINDING_new(const BIGNUM *A, const BIGNUM *Ai, BIGNUM *mod);
void BN_BLINDING_free(BN_BLINDING *b);
int BN_BLINDING_update(BN_BLINDING *b,BN_CTX *ctx);
int BN_BLINDING_convert(BIGNUM *n, BN_BLINDING *b, BN_CTX *ctx);
int BN_BLINDING_invert(BIGNUM *n, BN_BLINDING *b, BN_CTX *ctx);
int BN_BLINDING_convert_ex(BIGNUM *n, BIGNUM *r, BN_BLINDING *b, BN_CTX *);
int BN_BLINDING_invert_ex(BIGNUM *n, const BIGNUM *r, BN_BLINDING *b, BN_CTX *);
+#ifndef OPENSSL_NO_DEPRECATED
unsigned long BN_BLINDING_get_thread_id(const BN_BLINDING *);
void BN_BLINDING_set_thread_id(BN_BLINDING *, unsigned long);
+#endif
+CRYPTO_THREADID *BN_BLINDING_thread_id(BN_BLINDING *);
unsigned long BN_BLINDING_get_flags(const BN_BLINDING *);
void BN_BLINDING_set_flags(BN_BLINDING *, unsigned long);
BN_BLINDING *BN_BLINDING_create_param(BN_BLINDING *b,
- const BIGNUM *e, /* const */ BIGNUM *m, BN_CTX *ctx,
+ const BIGNUM *e, BIGNUM *m, BN_CTX *ctx,
int (*bn_mod_exp)(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx),
BN_MONT_CTX *m_ctx);
@@ -593,6 +623,8 @@ int BN_mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
int BN_div_recp(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m,
BN_RECP_CTX *recp, BN_CTX *ctx);
+#ifndef OPENSSL_NO_EC2M
+
/* Functions for arithmetic over binary polynomials represented by BIGNUMs.
*
* The BIGNUM::neg property of BIGNUMs representing binary polynomials is
@@ -625,24 +657,26 @@ int BN_GF2m_mod_solve_quad(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
* t^p[0] + t^p[1] + ... + t^p[k]
* where m = p[0] > p[1] > ... > p[k] = 0.
*/
-int BN_GF2m_mod_arr(BIGNUM *r, const BIGNUM *a, const unsigned int p[]);
+int BN_GF2m_mod_arr(BIGNUM *r, const BIGNUM *a, const int p[]);
/* r = a mod p */
int BN_GF2m_mod_mul_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
- const unsigned int p[], BN_CTX *ctx); /* r = (a * b) mod p */
-int BN_GF2m_mod_sqr_arr(BIGNUM *r, const BIGNUM *a, const unsigned int p[],
+ const int p[], BN_CTX *ctx); /* r = (a * b) mod p */
+int BN_GF2m_mod_sqr_arr(BIGNUM *r, const BIGNUM *a, const int p[],
BN_CTX *ctx); /* r = (a * a) mod p */
-int BN_GF2m_mod_inv_arr(BIGNUM *r, const BIGNUM *b, const unsigned int p[],
+int BN_GF2m_mod_inv_arr(BIGNUM *r, const BIGNUM *b, const int p[],
BN_CTX *ctx); /* r = (1 / b) mod p */
int BN_GF2m_mod_div_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
- const unsigned int p[], BN_CTX *ctx); /* r = (a / b) mod p */
+ const int p[], BN_CTX *ctx); /* r = (a / b) mod p */
int BN_GF2m_mod_exp_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
- const unsigned int p[], BN_CTX *ctx); /* r = (a ^ b) mod p */
+ const int p[], BN_CTX *ctx); /* r = (a ^ b) mod p */
int BN_GF2m_mod_sqrt_arr(BIGNUM *r, const BIGNUM *a,
- const unsigned int p[], BN_CTX *ctx); /* r = sqrt(a) mod p */
+ const int p[], BN_CTX *ctx); /* r = sqrt(a) mod p */
int BN_GF2m_mod_solve_quad_arr(BIGNUM *r, const BIGNUM *a,
- const unsigned int p[], BN_CTX *ctx); /* r^2 + r = a mod p */
-int BN_GF2m_poly2arr(const BIGNUM *a, unsigned int p[], int max);
-int BN_GF2m_arr2poly(const unsigned int p[], BIGNUM *a);
+ const int p[], BN_CTX *ctx); /* r^2 + r = a mod p */
+int BN_GF2m_poly2arr(const BIGNUM *a, int p[], int max);
+int BN_GF2m_arr2poly(const int p[], BIGNUM *a);
+
+#endif
/* faster mod functions for the 'NIST primes'
* 0 <= a < p^2 */
@@ -751,10 +785,12 @@ int RAND_pseudo_bytes(unsigned char *buf,int num);
#define bn_correct_top(a) \
{ \
BN_ULONG *ftl; \
- if ((a)->top > 0) \
+ int tmp_top = (a)->top; \
+ if (tmp_top > 0) \
{ \
- for (ftl= &((a)->d[(a)->top-1]); (a)->top > 0; (a)->top--) \
- if (*(ftl--)) break; \
+ for (ftl= &((a)->d[tmp_top-1]); tmp_top > 0; tmp_top--) \
+ if (*(ftl--)) break; \
+ (a)->top = tmp_top; \
} \
bn_pollute(a); \
}
diff --git a/crypto/bn/bn_asm.c b/crypto/bn/bn_asm.c
index 99bc2de4913e..c43c91cc09f4 100644
--- a/crypto/bn/bn_asm.c
+++ b/crypto/bn/bn_asm.c
@@ -75,6 +75,7 @@ BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
assert(num >= 0);
if (num <= 0) return(c1);
+#ifndef OPENSSL_SMALL_FOOTPRINT
while (num&~3)
{
mul_add(rp[0],ap[0],w,c1);
@@ -83,11 +84,11 @@ BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
mul_add(rp[3],ap[3],w,c1);
ap+=4; rp+=4; num-=4;
}
- if (num)
+#endif
+ while (num)
{
- mul_add(rp[0],ap[0],w,c1); if (--num==0) return c1;
- mul_add(rp[1],ap[1],w,c1); if (--num==0) return c1;
- mul_add(rp[2],ap[2],w,c1); return c1;
+ mul_add(rp[0],ap[0],w,c1);
+ ap++; rp++; num--;
}
return(c1);
@@ -100,6 +101,7 @@ BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
assert(num >= 0);
if (num <= 0) return(c1);
+#ifndef OPENSSL_SMALL_FOOTPRINT
while (num&~3)
{
mul(rp[0],ap[0],w,c1);
@@ -108,11 +110,11 @@ BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
mul(rp[3],ap[3],w,c1);
ap+=4; rp+=4; num-=4;
}
- if (num)
+#endif
+ while (num)
{
- mul(rp[0],ap[0],w,c1); if (--num == 0) return c1;
- mul(rp[1],ap[1],w,c1); if (--num == 0) return c1;
- mul(rp[2],ap[2],w,c1);
+ mul(rp[0],ap[0],w,c1);
+ ap++; rp++; num--;
}
return(c1);
}
@@ -121,6 +123,8 @@ void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n)
{
assert(n >= 0);
if (n <= 0) return;
+
+#ifndef OPENSSL_SMALL_FOOTPRINT
while (n&~3)
{
sqr(r[0],r[1],a[0]);
@@ -129,11 +133,11 @@ void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n)
sqr(r[6],r[7],a[3]);
a+=4; r+=8; n-=4;
}
- if (n)
+#endif
+ while (n)
{
- sqr(r[0],r[1],a[0]); if (--n == 0) return;
- sqr(r[2],r[3],a[1]); if (--n == 0) return;
- sqr(r[4],r[5],a[2]);
+ sqr(r[0],r[1],a[0]);
+ a++; r+=2; n--;
}
}
@@ -150,18 +154,20 @@ BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
bl=LBITS(w);
bh=HBITS(w);
- for (;;)
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (num&~3)
{
mul_add(rp[0],ap[0],bl,bh,c);
- if (--num == 0) break;
mul_add(rp[1],ap[1],bl,bh,c);
- if (--num == 0) break;
mul_add(rp[2],ap[2],bl,bh,c);
- if (--num == 0) break;
mul_add(rp[3],ap[3],bl,bh,c);
- if (--num == 0) break;
- ap+=4;
- rp+=4;
+ ap+=4; rp+=4; num-=4;
+ }
+#endif
+ while (num)
+ {
+ mul_add(rp[0],ap[0],bl,bh,c);
+ ap++; rp++; num--;
}
return(c);
}
@@ -177,18 +183,20 @@ BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
bl=LBITS(w);
bh=HBITS(w);
- for (;;)
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (num&~3)
{
mul(rp[0],ap[0],bl,bh,carry);
- if (--num == 0) break;
mul(rp[1],ap[1],bl,bh,carry);
- if (--num == 0) break;
mul(rp[2],ap[2],bl,bh,carry);
- if (--num == 0) break;
mul(rp[3],ap[3],bl,bh,carry);
- if (--num == 0) break;
- ap+=4;
- rp+=4;
+ ap+=4; rp+=4; num-=4;
+ }
+#endif
+ while (num)
+ {
+ mul(rp[0],ap[0],bl,bh,carry);
+ ap++; rp++; num--;
}
return(carry);
}
@@ -197,22 +205,21 @@ void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n)
{
assert(n >= 0);
if (n <= 0) return;
- for (;;)
+
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (n&~3)
{
sqr64(r[0],r[1],a[0]);
- if (--n == 0) break;
-
sqr64(r[2],r[3],a[1]);
- if (--n == 0) break;
-
sqr64(r[4],r[5],a[2]);
- if (--n == 0) break;
-
sqr64(r[6],r[7],a[3]);
- if (--n == 0) break;
-
- a+=4;
- r+=8;
+ a+=4; r+=8; n-=4;
+ }
+#endif
+ while (n)
+ {
+ sqr64(r[0],r[1],a[0]);
+ a++; r+=2; n--;
}
}
@@ -303,31 +310,30 @@ BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n)
assert(n >= 0);
if (n <= 0) return((BN_ULONG)0);
- for (;;)
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (n&~3)
{
ll+=(BN_ULLONG)a[0]+b[0];
r[0]=(BN_ULONG)ll&BN_MASK2;
ll>>=BN_BITS2;
- if (--n <= 0) break;
-
ll+=(BN_ULLONG)a[1]+b[1];
r[1]=(BN_ULONG)ll&BN_MASK2;
ll>>=BN_BITS2;
- if (--n <= 0) break;
-
ll+=(BN_ULLONG)a[2]+b[2];
r[2]=(BN_ULONG)ll&BN_MASK2;
ll>>=BN_BITS2;
- if (--n <= 0) break;
-
ll+=(BN_ULLONG)a[3]+b[3];
r[3]=(BN_ULONG)ll&BN_MASK2;
ll>>=BN_BITS2;
- if (--n <= 0) break;
-
- a+=4;
- b+=4;
- r+=4;
+ a+=4; b+=4; r+=4; n-=4;
+ }
+#endif
+ while (n)
+ {
+ ll+=(BN_ULLONG)a[0]+b[0];
+ r[0]=(BN_ULONG)ll&BN_MASK2;
+ ll>>=BN_BITS2;
+ a++; b++; r++; n--;
}
return((BN_ULONG)ll);
}
@@ -340,7 +346,8 @@ BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n)
if (n <= 0) return((BN_ULONG)0);
c=0;
- for (;;)
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (n&~3)
{
t=a[0];
t=(t+c)&BN_MASK2;
@@ -348,35 +355,36 @@ BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n)
l=(t+b[0])&BN_MASK2;
c+=(l < t);
r[0]=l;
- if (--n <= 0) break;
-
t=a[1];
t=(t+c)&BN_MASK2;
c=(t < c);
l=(t+b[1])&BN_MASK2;
c+=(l < t);
r[1]=l;
- if (--n <= 0) break;
-
t=a[2];
t=(t+c)&BN_MASK2;
c=(t < c);
l=(t+b[2])&BN_MASK2;
c+=(l < t);
r[2]=l;
- if (--n <= 0) break;
-
t=a[3];
t=(t+c)&BN_MASK2;
c=(t < c);
l=(t+b[3])&BN_MASK2;
c+=(l < t);
r[3]=l;
- if (--n <= 0) break;
-
- a+=4;
- b+=4;
- r+=4;
+ a+=4; b+=4; r+=4; n-=4;
+ }
+#endif
+ while(n)
+ {
+ t=a[0];
+ t=(t+c)&BN_MASK2;
+ c=(t < c);
+ l=(t+b[0])&BN_MASK2;
+ c+=(l < t);
+ r[0]=l;
+ a++; b++; r++; n--;
}
return((BN_ULONG)c);
}
@@ -390,36 +398,35 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n)
assert(n >= 0);
if (n <= 0) return((BN_ULONG)0);
- for (;;)
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (n&~3)
{
t1=a[0]; t2=b[0];
r[0]=(t1-t2-c)&BN_MASK2;
if (t1 != t2) c=(t1 < t2);
- if (--n <= 0) break;
-
t1=a[1]; t2=b[1];
r[1]=(t1-t2-c)&BN_MASK2;
if (t1 != t2) c=(t1 < t2);
- if (--n <= 0) break;
-
t1=a[2]; t2=b[2];
r[2]=(t1-t2-c)&BN_MASK2;
if (t1 != t2) c=(t1 < t2);
- if (--n <= 0) break;
-
t1=a[3]; t2=b[3];
r[3]=(t1-t2-c)&BN_MASK2;
if (t1 != t2) c=(t1 < t2);
- if (--n <= 0) break;
-
- a+=4;
- b+=4;
- r+=4;
+ a+=4; b+=4; r+=4; n-=4;
+ }
+#endif
+ while (n)
+ {
+ t1=a[0]; t2=b[0];
+ r[0]=(t1-t2-c)&BN_MASK2;
+ if (t1 != t2) c=(t1 < t2);
+ a++; b++; r++; n--;
}
return(c);
}
-#ifdef BN_MUL_COMBA
+#if defined(BN_MUL_COMBA) && !defined(OPENSSL_SMALL_FOOTPRINT)
#undef bn_mul_comba8
#undef bn_mul_comba4
@@ -820,18 +827,134 @@ void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
r[6]=c1;
r[7]=c2;
}
+
+#ifdef OPENSSL_NO_ASM
+#ifdef OPENSSL_BN_ASM_MONT
+#include <alloca.h>
+/*
+ * This is essentially reference implementation, which may or may not
+ * result in performance improvement. E.g. on IA-32 this routine was
+ * observed to give 40% faster rsa1024 private key operations and 10%
+ * faster rsa4096 ones, while on AMD64 it improves rsa1024 sign only
+ * by 10% and *worsens* rsa4096 sign by 15%. Once again, it's a
+ * reference implementation, one to be used as starting point for
+ * platform-specific assembler. Mentioned numbers apply to compiler
+ * generated code compiled with and without -DOPENSSL_BN_ASM_MONT and
+ * can vary not only from platform to platform, but even for compiler
+ * versions. Assembler vs. assembler improvement coefficients can
+ * [and are known to] differ and are to be documented elsewhere.
+ */
+int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0p, int num)
+ {
+ BN_ULONG c0,c1,ml,*tp,n0;
+#ifdef mul64
+ BN_ULONG mh;
+#endif
+ volatile BN_ULONG *vp;
+ int i=0,j;
+
+#if 0 /* template for platform-specific implementation */
+ if (ap==bp) return bn_sqr_mont(rp,ap,np,n0p,num);
+#endif
+ vp = tp = alloca((num+2)*sizeof(BN_ULONG));
+
+ n0 = *n0p;
+
+ c0 = 0;
+ ml = bp[0];
+#ifdef mul64
+ mh = HBITS(ml);
+ ml = LBITS(ml);
+ for (j=0;j<num;++j)
+ mul(tp[j],ap[j],ml,mh,c0);
+#else
+ for (j=0;j<num;++j)
+ mul(tp[j],ap[j],ml,c0);
+#endif
+
+ tp[num] = c0;
+ tp[num+1] = 0;
+ goto enter;
+
+ for(i=0;i<num;i++)
+ {
+ c0 = 0;
+ ml = bp[i];
+#ifdef mul64
+ mh = HBITS(ml);
+ ml = LBITS(ml);
+ for (j=0;j<num;++j)
+ mul_add(tp[j],ap[j],ml,mh,c0);
+#else
+ for (j=0;j<num;++j)
+ mul_add(tp[j],ap[j],ml,c0);
+#endif
+ c1 = (tp[num] + c0)&BN_MASK2;
+ tp[num] = c1;
+ tp[num+1] = (c1<c0?1:0);
+ enter:
+ c1 = tp[0];
+ ml = (c1*n0)&BN_MASK2;
+ c0 = 0;
+#ifdef mul64
+ mh = HBITS(ml);
+ ml = LBITS(ml);
+ mul_add(c1,np[0],ml,mh,c0);
+#else
+ mul_add(c1,ml,np[0],c0);
+#endif
+ for(j=1;j<num;j++)
+ {
+ c1 = tp[j];
+#ifdef mul64
+ mul_add(c1,np[j],ml,mh,c0);
+#else
+ mul_add(c1,ml,np[j],c0);
+#endif
+ tp[j-1] = c1&BN_MASK2;
+ }
+ c1 = (tp[num] + c0)&BN_MASK2;
+ tp[num-1] = c1;
+ tp[num] = tp[num+1] + (c1<c0?1:0);
+ }
+
+ if (tp[num]!=0 || tp[num-1]>=np[num-1])
+ {
+ c0 = bn_sub_words(rp,tp,np,num);
+ if (tp[num]!=0 || c0==0)
+ {
+ for(i=0;i<num+2;i++) vp[i] = 0;
+ return 1;
+ }
+ }
+ for(i=0;i<num;i++) rp[i] = tp[i], vp[i] = 0;
+ vp[num] = 0;
+ vp[num+1] = 0;
+ return 1;
+ }
+#else
+/*
+ * Return value of 0 indicates that multiplication/convolution was not
+ * performed to signal the caller to fall down to alternative/original
+ * code-path.
+ */
+int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num)
+{ return 0; }
+#endif /* OPENSSL_BN_ASM_MONT */
+#endif
+
#else /* !BN_MUL_COMBA */
/* hmm... is it faster just to do a multiply? */
#undef bn_sqr_comba4
-void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
+void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
{
BN_ULONG t[8];
bn_sqr_normal(r,a,4,t);
}
#undef bn_sqr_comba8
-void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
+void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
{
BN_ULONG t[16];
bn_sqr_normal(r,a,8,t);
@@ -857,4 +980,51 @@ void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
r[15]=bn_mul_add_words(&(r[7]),a,8,b[7]);
}
+#ifdef OPENSSL_NO_ASM
+#ifdef OPENSSL_BN_ASM_MONT
+#include <alloca.h>
+int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0p, int num)
+ {
+ BN_ULONG c0,c1,*tp,n0=*n0p;
+ volatile BN_ULONG *vp;
+ int i=0,j;
+
+ vp = tp = alloca((num+2)*sizeof(BN_ULONG));
+
+ for(i=0;i<=num;i++) tp[i]=0;
+
+ for(i=0;i<num;i++)
+ {
+ c0 = bn_mul_add_words(tp,ap,num,bp[i]);
+ c1 = (tp[num] + c0)&BN_MASK2;
+ tp[num] = c1;
+ tp[num+1] = (c1<c0?1:0);
+
+ c0 = bn_mul_add_words(tp,np,num,tp[0]*n0);
+ c1 = (tp[num] + c0)&BN_MASK2;
+ tp[num] = c1;
+ tp[num+1] += (c1<c0?1:0);
+ for(j=0;j<=num;j++) tp[j]=tp[j+1];
+ }
+
+ if (tp[num]!=0 || tp[num-1]>=np[num-1])
+ {
+ c0 = bn_sub_words(rp,tp,np,num);
+ if (tp[num]!=0 || c0==0)
+ {
+ for(i=0;i<num+2;i++) vp[i] = 0;
+ return 1;
+ }
+ }
+ for(i=0;i<num;i++) rp[i] = tp[i], vp[i] = 0;
+ vp[num] = 0;
+ vp[num+1] = 0;
+ return 1;
+ }
+#else
+int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num)
+{ return 0; }
+#endif /* OPENSSL_BN_ASM_MONT */
+#endif
+
#endif /* !BN_MUL_COMBA */
diff --git a/crypto/bn/bn_blind.c b/crypto/bn/bn_blind.c
index ca7f996bc8a1..9ed8bc2b40b8 100644
--- a/crypto/bn/bn_blind.c
+++ b/crypto/bn/bn_blind.c
@@ -1,6 +1,6 @@
/* crypto/bn/bn_blind.c */
/* ====================================================================
- * Copyright (c) 1998-2005 The OpenSSL Project. All rights reserved.
+ * Copyright (c) 1998-2006 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -121,8 +121,11 @@ struct bn_blinding_st
BIGNUM *Ai;
BIGNUM *e;
BIGNUM *mod; /* just a reference */
+#ifndef OPENSSL_NO_DEPRECATED
unsigned long thread_id; /* added in OpenSSL 0.9.6j and 0.9.7b;
* used only by crypto/rsa/rsa_eay.c, rsa_lib.c */
+#endif
+ CRYPTO_THREADID tid;
int counter;
unsigned long flags;
BN_MONT_CTX *m_ctx;
@@ -131,7 +134,7 @@ struct bn_blinding_st
BN_MONT_CTX *m_ctx);
};
-BN_BLINDING *BN_BLINDING_new(const BIGNUM *A, const BIGNUM *Ai, /* const */ BIGNUM *mod)
+BN_BLINDING *BN_BLINDING_new(const BIGNUM *A, const BIGNUM *Ai, BIGNUM *mod)
{
BN_BLINDING *ret=NULL;
@@ -161,6 +164,7 @@ BN_BLINDING *BN_BLINDING_new(const BIGNUM *A, const BIGNUM *Ai, /* const */ BIGN
* to indicate that this is never-used fresh blinding
* that does not need updating before first use. */
ret->counter = -1;
+ CRYPTO_THREADID_current(&ret->tid);
return(ret);
err:
if (ret != NULL) BN_BLINDING_free(ret);
@@ -272,6 +276,7 @@ int BN_BLINDING_invert_ex(BIGNUM *n, const BIGNUM *r, BN_BLINDING *b, BN_CTX *ct
return(ret);
}
+#ifndef OPENSSL_NO_DEPRECATED
unsigned long BN_BLINDING_get_thread_id(const BN_BLINDING *b)
{
return b->thread_id;
@@ -281,6 +286,12 @@ void BN_BLINDING_set_thread_id(BN_BLINDING *b, unsigned long n)
{
b->thread_id = n;
}
+#endif
+
+CRYPTO_THREADID *BN_BLINDING_thread_id(BN_BLINDING *b)
+ {
+ return &b->tid;
+ }
unsigned long BN_BLINDING_get_flags(const BN_BLINDING *b)
{
@@ -293,7 +304,7 @@ void BN_BLINDING_set_flags(BN_BLINDING *b, unsigned long flags)
}
BN_BLINDING *BN_BLINDING_create_param(BN_BLINDING *b,
- const BIGNUM *e, /* const */ BIGNUM *m, BN_CTX *ctx,
+ const BIGNUM *e, BIGNUM *m, BN_CTX *ctx,
int (*bn_mod_exp)(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx),
BN_MONT_CTX *m_ctx)
diff --git a/crypto/bn/bn_ctx.c b/crypto/bn/bn_ctx.c
index b3452f1a91e3..3f2256f67575 100644
--- a/crypto/bn/bn_ctx.c
+++ b/crypto/bn/bn_ctx.c
@@ -161,7 +161,7 @@ static void ctxdbg(BN_CTX *ctx)
fprintf(stderr,"(%08x): ", (unsigned int)ctx);
while(bnidx < ctx->used)
{
- fprintf(stderr,"%02x ", item->vals[bnidx++ % BN_CTX_POOL_SIZE].dmax);
+ fprintf(stderr,"%03x ", item->vals[bnidx++ % BN_CTX_POOL_SIZE].dmax);
if(!(bnidx % BN_CTX_POOL_SIZE))
item = item->next;
}
@@ -171,8 +171,8 @@ static void ctxdbg(BN_CTX *ctx)
while(fpidx < stack->depth)
{
while(bnidx++ < stack->indexes[fpidx])
- fprintf(stderr," ");
- fprintf(stderr,"^^ ");
+ fprintf(stderr," ");
+ fprintf(stderr,"^^^ ");
bnidx++;
fpidx++;
}
diff --git a/crypto/bn/bn_div.c b/crypto/bn/bn_div.c
index 78c6507113bf..52b3304293a5 100644
--- a/crypto/bn/bn_div.c
+++ b/crypto/bn/bn_div.c
@@ -169,15 +169,13 @@ int BN_div(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, const BIGNUM *d,
#endif /* OPENSSL_NO_ASM */
-/* BN_div[_no_branch] computes dv := num / divisor, rounding towards
+/* BN_div computes dv := num / divisor, rounding towards
* zero, and sets up rm such that dv*divisor + rm = num holds.
* Thus:
* dv->neg == num->neg ^ divisor->neg (unless the result is zero)
* rm->neg == num->neg (unless the remainder is zero)
* If 'dv' or 'rm' is NULL, the respective value is not returned.
*/
-static int BN_div_no_branch(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num,
- const BIGNUM *divisor, BN_CTX *ctx);
int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
BN_CTX *ctx)
{
@@ -186,6 +184,7 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
BN_ULONG *resp,*wnump;
BN_ULONG d0,d1;
int num_n,div_n;
+ int no_branch=0;
/* Invalid zero-padding would have particularly bad consequences
* in the case of 'num', so don't just rely on bn_check_top() for this one
@@ -200,7 +199,7 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
if ((BN_get_flags(num, BN_FLG_CONSTTIME) != 0) || (BN_get_flags(divisor, BN_FLG_CONSTTIME) != 0))
{
- return BN_div_no_branch(dv, rm, num, divisor, ctx);
+ no_branch=1;
}
bn_check_top(dv);
@@ -214,7 +213,7 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
return(0);
}
- if (BN_ucmp(num,divisor) < 0)
+ if (!no_branch && BN_ucmp(num,divisor) < 0)
{
if (rm != NULL)
{ if (BN_copy(rm,num) == NULL) return(0); }
@@ -239,242 +238,25 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
norm_shift+=BN_BITS2;
if (!(BN_lshift(snum,num,norm_shift))) goto err;
snum->neg=0;
- div_n=sdiv->top;
- num_n=snum->top;
- loop=num_n-div_n;
- /* Lets setup a 'window' into snum
- * This is the part that corresponds to the current
- * 'area' being divided */
- wnum.neg = 0;
- wnum.d = &(snum->d[loop]);
- wnum.top = div_n;
- /* only needed when BN_ucmp messes up the values between top and max */
- wnum.dmax = snum->dmax - loop; /* so we don't step out of bounds */
-
- /* Get the top 2 words of sdiv */
- /* div_n=sdiv->top; */
- d0=sdiv->d[div_n-1];
- d1=(div_n == 1)?0:sdiv->d[div_n-2];
-
- /* pointer to the 'top' of snum */
- wnump= &(snum->d[num_n-1]);
-
- /* Setup to 'res' */
- res->neg= (num->neg^divisor->neg);
- if (!bn_wexpand(res,(loop+1))) goto err;
- res->top=loop;
- resp= &(res->d[loop-1]);
-
- /* space for temp */
- if (!bn_wexpand(tmp,(div_n+1))) goto err;
- if (BN_ucmp(&wnum,sdiv) >= 0)
+ if (no_branch)
{
- /* If BN_DEBUG_RAND is defined BN_ucmp changes (via
- * bn_pollute) the const bignum arguments =>
- * clean the values between top and max again */
- bn_clear_top2max(&wnum);
- bn_sub_words(wnum.d, wnum.d, sdiv->d, div_n);
- *resp=1;
- }
- else
- res->top--;
- /* if res->top == 0 then clear the neg value otherwise decrease
- * the resp pointer */
- if (res->top == 0)
- res->neg = 0;
- else
- resp--;
-
- for (i=0; i<loop-1; i++, wnump--, resp--)
- {
- BN_ULONG q,l0;
- /* the first part of the loop uses the top two words of
- * snum and sdiv to calculate a BN_ULONG q such that
- * | wnum - sdiv * q | < sdiv */
-#if defined(BN_DIV3W) && !defined(OPENSSL_NO_ASM)
- BN_ULONG bn_div_3_words(BN_ULONG*,BN_ULONG,BN_ULONG);
- q=bn_div_3_words(wnump,d1,d0);
-#else
- BN_ULONG n0,n1,rem=0;
-
- n0=wnump[0];
- n1=wnump[-1];
- if (n0 == d0)
- q=BN_MASK2;
- else /* n0 < d0 */
+ /* Since we don't know whether snum is larger than sdiv,
+ * we pad snum with enough zeroes without changing its
+ * value.
+ */
+ if (snum->top <= sdiv->top+1)
{
-#ifdef BN_LLONG
- BN_ULLONG t2;
-
-#if defined(BN_LLONG) && defined(BN_DIV2W) && !defined(bn_div_words)
- q=(BN_ULONG)(((((BN_ULLONG)n0)<<BN_BITS2)|n1)/d0);
-#else
- q=bn_div_words(n0,n1,d0);
-#ifdef BN_DEBUG_LEVITTE
- fprintf(stderr,"DEBUG: bn_div_words(0x%08X,0x%08X,0x%08\
-X) -> 0x%08X\n",
- n0, n1, d0, q);
-#endif
-#endif
-
-#ifndef REMAINDER_IS_ALREADY_CALCULATED
- /*
- * rem doesn't have to be BN_ULLONG. The least we
- * know it's less that d0, isn't it?
- */
- rem=(n1-q*d0)&BN_MASK2;
-#endif
- t2=(BN_ULLONG)d1*q;
-
- for (;;)
- {
- if (t2 <= ((((BN_ULLONG)rem)<<BN_BITS2)|wnump[-2]))
- break;
- q--;
- rem += d0;
- if (rem < d0) break; /* don't let rem overflow */
- t2 -= d1;
- }
-#else /* !BN_LLONG */
- BN_ULONG t2l,t2h;
-#if !defined(BN_UMULT_LOHI) && !defined(BN_UMULT_HIGH)
- BN_ULONG ql,qh;
-#endif
-
- q=bn_div_words(n0,n1,d0);
-#ifdef BN_DEBUG_LEVITTE
- fprintf(stderr,"DEBUG: bn_div_words(0x%08X,0x%08X,0x%08\
-X) -> 0x%08X\n",
- n0, n1, d0, q);
-#endif
-#ifndef REMAINDER_IS_ALREADY_CALCULATED
- rem=(n1-q*d0)&BN_MASK2;
-#endif
-
-#if defined(BN_UMULT_LOHI)
- BN_UMULT_LOHI(t2l,t2h,d1,q);
-#elif defined(BN_UMULT_HIGH)
- t2l = d1 * q;
- t2h = BN_UMULT_HIGH(d1,q);
-#else
- t2l=LBITS(d1); t2h=HBITS(d1);
- ql =LBITS(q); qh =HBITS(q);
- mul64(t2l,t2h,ql,qh); /* t2=(BN_ULLONG)d1*q; */
-#endif
-
- for (;;)
- {
- if ((t2h < rem) ||
- ((t2h == rem) && (t2l <= wnump[-2])))
- break;
- q--;
- rem += d0;
- if (rem < d0) break; /* don't let rem overflow */
- if (t2l < d1) t2h--; t2l -= d1;
- }
-#endif /* !BN_LLONG */
+ if (bn_wexpand(snum, sdiv->top + 2) == NULL) goto err;
+ for (i = snum->top; i < sdiv->top + 2; i++) snum->d[i] = 0;
+ snum->top = sdiv->top + 2;
}
-#endif /* !BN_DIV3W */
-
- l0=bn_mul_words(tmp->d,sdiv->d,div_n,q);
- tmp->d[div_n]=l0;
- wnum.d--;
- /* ingore top values of the bignums just sub the two
- * BN_ULONG arrays with bn_sub_words */
- if (bn_sub_words(wnum.d, wnum.d, tmp->d, div_n+1))
+ else
{
- /* Note: As we have considered only the leading
- * two BN_ULONGs in the calculation of q, sdiv * q
- * might be greater than wnum (but then (q-1) * sdiv
- * is less or equal than wnum)
- */
- q--;
- if (bn_add_words(wnum.d, wnum.d, sdiv->d, div_n))
- /* we can't have an overflow here (assuming
- * that q != 0, but if q == 0 then tmp is
- * zero anyway) */
- (*wnump)++;
+ if (bn_wexpand(snum, snum->top + 1) == NULL) goto err;
+ snum->d[snum->top] = 0;
+ snum->top ++;
}
- /* store part of the result */
- *resp = q;
- }
- bn_correct_top(snum);
- if (rm != NULL)
- {
- /* Keep a copy of the neg flag in num because if rm==num
- * BN_rshift() will overwrite it.
- */
- int neg = num->neg;
- BN_rshift(rm,snum,norm_shift);
- if (!BN_is_zero(rm))
- rm->neg = neg;
- bn_check_top(rm);
- }
- BN_CTX_end(ctx);
- return(1);
-err:
- bn_check_top(rm);
- BN_CTX_end(ctx);
- return(0);
- }
-
-
-/* BN_div_no_branch is a special version of BN_div. It does not contain
- * branches that may leak sensitive information.
- */
-static int BN_div_no_branch(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num,
- const BIGNUM *divisor, BN_CTX *ctx)
- {
- int norm_shift,i,loop;
- BIGNUM *tmp,wnum,*snum,*sdiv,*res;
- BN_ULONG *resp,*wnump;
- BN_ULONG d0,d1;
- int num_n,div_n;
-
- bn_check_top(dv);
- bn_check_top(rm);
- /* bn_check_top(num); */ /* 'num' has been checked in BN_div() */
- bn_check_top(divisor);
-
- if (BN_is_zero(divisor))
- {
- BNerr(BN_F_BN_DIV_NO_BRANCH,BN_R_DIV_BY_ZERO);
- return(0);
- }
-
- BN_CTX_start(ctx);
- tmp=BN_CTX_get(ctx);
- snum=BN_CTX_get(ctx);
- sdiv=BN_CTX_get(ctx);
- if (dv == NULL)
- res=BN_CTX_get(ctx);
- else res=dv;
- if (sdiv == NULL || res == NULL) goto err;
-
- /* First we normalise the numbers */
- norm_shift=BN_BITS2-((BN_num_bits(divisor))%BN_BITS2);
- if (!(BN_lshift(sdiv,divisor,norm_shift))) goto err;
- sdiv->neg=0;
- norm_shift+=BN_BITS2;
- if (!(BN_lshift(snum,num,norm_shift))) goto err;
- snum->neg=0;
-
- /* Since we don't know whether snum is larger than sdiv,
- * we pad snum with enough zeroes without changing its
- * value.
- */
- if (snum->top <= sdiv->top+1)
- {
- if (bn_wexpand(snum, sdiv->top + 2) == NULL) goto err;
- for (i = snum->top; i < sdiv->top + 2; i++) snum->d[i] = 0;
- snum->top = sdiv->top + 2;
- }
- else
- {
- if (bn_wexpand(snum, snum->top + 1) == NULL) goto err;
- snum->d[snum->top] = 0;
- snum->top ++;
}
div_n=sdiv->top;
@@ -500,12 +282,27 @@ static int BN_div_no_branch(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num,
/* Setup to 'res' */
res->neg= (num->neg^divisor->neg);
if (!bn_wexpand(res,(loop+1))) goto err;
- res->top=loop-1;
+ res->top=loop-no_branch;
resp= &(res->d[loop-1]);
/* space for temp */
if (!bn_wexpand(tmp,(div_n+1))) goto err;
+ if (!no_branch)
+ {
+ if (BN_ucmp(&wnum,sdiv) >= 0)
+ {
+ /* If BN_DEBUG_RAND is defined BN_ucmp changes (via
+ * bn_pollute) the const bignum arguments =>
+ * clean the values between top and max again */
+ bn_clear_top2max(&wnum);
+ bn_sub_words(wnum.d, wnum.d, sdiv->d, div_n);
+ *resp=1;
+ }
+ else
+ res->top--;
+ }
+
/* if res->top == 0 then clear the neg value otherwise decrease
* the resp pointer */
if (res->top == 0)
@@ -565,9 +362,6 @@ X) -> 0x%08X\n",
}
#else /* !BN_LLONG */
BN_ULONG t2l,t2h;
-#if !defined(BN_UMULT_LOHI) && !defined(BN_UMULT_HIGH)
- BN_ULONG ql,qh;
-#endif
q=bn_div_words(n0,n1,d0);
#ifdef BN_DEBUG_LEVITTE
@@ -585,9 +379,12 @@ X) -> 0x%08X\n",
t2l = d1 * q;
t2h = BN_UMULT_HIGH(d1,q);
#else
+ {
+ BN_ULONG ql, qh;
t2l=LBITS(d1); t2h=HBITS(d1);
ql =LBITS(q); qh =HBITS(q);
mul64(t2l,t2h,ql,qh); /* t2=(BN_ULLONG)d1*q; */
+ }
#endif
for (;;)
@@ -638,7 +435,7 @@ X) -> 0x%08X\n",
rm->neg = neg;
bn_check_top(rm);
}
- bn_correct_top(res);
+ if (no_branch) bn_correct_top(res);
BN_CTX_end(ctx);
return(1);
err:
@@ -646,5 +443,4 @@ err:
BN_CTX_end(ctx);
return(0);
}
-
#endif
diff --git a/crypto/bn/bn_exp.c b/crypto/bn/bn_exp.c
index d9b6c737fc82..2abf6fd67871 100644
--- a/crypto/bn/bn_exp.c
+++ b/crypto/bn/bn_exp.c
@@ -113,6 +113,18 @@
#include "cryptlib.h"
#include "bn_lcl.h"
+#include <stdlib.h>
+#ifdef _WIN32
+# include <malloc.h>
+# ifndef alloca
+# define alloca _alloca
+# endif
+#elif defined(__GNUC__)
+# ifndef alloca
+# define alloca(s) __builtin_alloca((s))
+# endif
+#endif
+
/* maximum precomputation table size for *variable* sliding windows */
#define TABLE_SIZE 32
@@ -522,23 +534,17 @@ err:
* as cache lines are concerned. The following functions are used to transfer a BIGNUM
* from/to that table. */
-static int MOD_EXP_CTIME_COPY_TO_PREBUF(BIGNUM *b, int top, unsigned char *buf, int idx, int width)
+static int MOD_EXP_CTIME_COPY_TO_PREBUF(const BIGNUM *b, int top, unsigned char *buf, int idx, int width)
{
size_t i, j;
- if (bn_wexpand(b, top) == NULL)
- return 0;
- while (b->top < top)
- {
- b->d[b->top++] = 0;
- }
-
+ if (top > b->top)
+ top = b->top; /* this works because 'buf' is explicitly zeroed */
for (i = 0, j=idx; i < top * sizeof b->d[0]; i++, j+=width)
{
buf[j] = ((unsigned char*)b->d)[i];
}
- bn_correct_top(b);
return 1;
}
@@ -561,7 +567,7 @@ static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top, unsigned char *buf
/* Given a pointer value, compute the next address that is a cache line multiple. */
#define MOD_EXP_CTIME_ALIGN(x_) \
- ((unsigned char*)(x_) + (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - (((BN_ULONG)(x_)) & (MOD_EXP_CTIME_MIN_CACHE_LINE_MASK))))
+ ((unsigned char*)(x_) + (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - (((size_t)(x_)) & (MOD_EXP_CTIME_MIN_CACHE_LINE_MASK))))
/* This variant of BN_mod_exp_mont() uses fixed windows and the special
* precomputation memory layout to limit data-dependency to a minimum
@@ -572,17 +578,15 @@ static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top, unsigned char *buf
int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont)
{
- int i,bits,ret=0,idx,window,wvalue;
+ int i,bits,ret=0,window,wvalue;
int top;
- BIGNUM *r;
- const BIGNUM *aa;
BN_MONT_CTX *mont=NULL;
int numPowers;
unsigned char *powerbufFree=NULL;
int powerbufLen = 0;
unsigned char *powerbuf=NULL;
- BIGNUM *computeTemp=NULL, *am=NULL;
+ BIGNUM tmp, am;
bn_check_top(a);
bn_check_top(p);
@@ -602,10 +606,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
return ret;
}
- /* Initialize BIGNUM context and allocate intermediate result */
BN_CTX_start(ctx);
- r = BN_CTX_get(ctx);
- if (r == NULL) goto err;
/* Allocate a montgomery context if it was not supplied by the caller.
* If this is not done, things will break in the montgomery part.
@@ -620,40 +621,154 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
/* Get the window size to use with size of p. */
window = BN_window_bits_for_ctime_exponent_size(bits);
+#if defined(OPENSSL_BN_ASM_MONT5)
+ if (window==6 && bits<=1024) window=5; /* ~5% improvement of 2048-bit RSA sign */
+#endif
/* Allocate a buffer large enough to hold all of the pre-computed
- * powers of a.
+ * powers of am, am itself and tmp.
*/
numPowers = 1 << window;
- powerbufLen = sizeof(m->d[0])*top*numPowers;
+ powerbufLen = sizeof(m->d[0])*(top*numPowers +
+ ((2*top)>numPowers?(2*top):numPowers));
+#ifdef alloca
+ if (powerbufLen < 3072)
+ powerbufFree = alloca(powerbufLen+MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH);
+ else
+#endif
if ((powerbufFree=(unsigned char*)OPENSSL_malloc(powerbufLen+MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH)) == NULL)
goto err;
powerbuf = MOD_EXP_CTIME_ALIGN(powerbufFree);
memset(powerbuf, 0, powerbufLen);
- /* Initialize the intermediate result. Do this early to save double conversion,
- * once each for a^0 and intermediate result.
- */
- if (!BN_to_montgomery(r,BN_value_one(),mont,ctx)) goto err;
- if (!MOD_EXP_CTIME_COPY_TO_PREBUF(r, top, powerbuf, 0, numPowers)) goto err;
+#ifdef alloca
+ if (powerbufLen < 3072)
+ powerbufFree = NULL;
+#endif
- /* Initialize computeTemp as a^1 with montgomery precalcs */
- computeTemp = BN_CTX_get(ctx);
- am = BN_CTX_get(ctx);
- if (computeTemp==NULL || am==NULL) goto err;
+ /* lay down tmp and am right after powers table */
+ tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0])*top*numPowers);
+ am.d = tmp.d + top;
+ tmp.top = am.top = 0;
+ tmp.dmax = am.dmax = top;
+ tmp.neg = am.neg = 0;
+ tmp.flags = am.flags = BN_FLG_STATIC_DATA;
+
+ /* prepare a^0 in Montgomery domain */
+#if 1
+ if (!BN_to_montgomery(&tmp,BN_value_one(),mont,ctx)) goto err;
+#else
+ tmp.d[0] = (0-m->d[0])&BN_MASK2; /* 2^(top*BN_BITS2) - m */
+ for (i=1;i<top;i++)
+ tmp.d[i] = (~m->d[i])&BN_MASK2;
+ tmp.top = top;
+#endif
+ /* prepare a^1 in Montgomery domain */
if (a->neg || BN_ucmp(a,m) >= 0)
{
- if (!BN_mod(am,a,m,ctx))
- goto err;
- aa= am;
+ if (!BN_mod(&am,a,m,ctx)) goto err;
+ if (!BN_to_montgomery(&am,&am,mont,ctx)) goto err;
}
- else
- aa=a;
- if (!BN_to_montgomery(am,aa,mont,ctx)) goto err;
- if (!BN_copy(computeTemp, am)) goto err;
- if (!MOD_EXP_CTIME_COPY_TO_PREBUF(am, top, powerbuf, 1, numPowers)) goto err;
+ else if (!BN_to_montgomery(&am,a,mont,ctx)) goto err;
+
+#if defined(OPENSSL_BN_ASM_MONT5)
+ /* This optimization uses ideas from http://eprint.iacr.org/2011/239,
+ * specifically optimization of cache-timing attack countermeasures
+ * and pre-computation optimization. */
+
+ /* Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as
+ * 512-bit RSA is hardly relevant, we omit it to spare size... */
+ if (window==5)
+ {
+ void bn_mul_mont_gather5(BN_ULONG *rp,const BN_ULONG *ap,
+ const void *table,const BN_ULONG *np,
+ const BN_ULONG *n0,int num,int power);
+ void bn_scatter5(const BN_ULONG *inp,size_t num,
+ void *table,size_t power);
+ void bn_gather5(BN_ULONG *out,size_t num,
+ void *table,size_t power);
+
+ BN_ULONG *np=mont->N.d, *n0=mont->n0;
+
+ /* BN_to_montgomery can contaminate words above .top
+ * [in BN_DEBUG[_DEBUG] build]... */
+ for (i=am.top; i<top; i++) am.d[i]=0;
+ for (i=tmp.top; i<top; i++) tmp.d[i]=0;
+
+ bn_scatter5(tmp.d,top,powerbuf,0);
+ bn_scatter5(am.d,am.top,powerbuf,1);
+ bn_mul_mont(tmp.d,am.d,am.d,np,n0,top);
+ bn_scatter5(tmp.d,top,powerbuf,2);
+
+#if 0
+ for (i=3; i<32; i++)
+ {
+ /* Calculate a^i = a^(i-1) * a */
+ bn_mul_mont_gather5(tmp.d,am.d,powerbuf,np,n0,top,i-1);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ }
+#else
+ /* same as above, but uses squaring for 1/2 of operations */
+ for (i=4; i<32; i*=2)
+ {
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ }
+ for (i=3; i<8; i+=2)
+ {
+ int j;
+ bn_mul_mont_gather5(tmp.d,am.d,powerbuf,np,n0,top,i-1);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ for (j=2*i; j<32; j*=2)
+ {
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_scatter5(tmp.d,top,powerbuf,j);
+ }
+ }
+ for (; i<16; i+=2)
+ {
+ bn_mul_mont_gather5(tmp.d,am.d,powerbuf,np,n0,top,i-1);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_scatter5(tmp.d,top,powerbuf,2*i);
+ }
+ for (; i<32; i+=2)
+ {
+ bn_mul_mont_gather5(tmp.d,am.d,powerbuf,np,n0,top,i-1);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ }
+#endif
+ bits--;
+ for (wvalue=0, i=bits%5; i>=0; i--,bits--)
+ wvalue = (wvalue<<1)+BN_is_bit_set(p,bits);
+ bn_gather5(tmp.d,top,powerbuf,wvalue);
+
+ /* Scan the exponent one window at a time starting from the most
+ * significant bits.
+ */
+ while (bits >= 0)
+ {
+ for (wvalue=0, i=0; i<5; i++,bits--)
+ wvalue = (wvalue<<1)+BN_is_bit_set(p,bits);
+
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont_gather5(tmp.d,tmp.d,powerbuf,np,n0,top,wvalue);
+ }
+
+ tmp.top=top;
+ bn_correct_top(&tmp);
+ }
+ else
+#endif
+ {
+ if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, numPowers)) goto err;
+ if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, numPowers)) goto err;
/* If the window size is greater than 1, then calculate
* val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1)
@@ -662,62 +777,54 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
*/
if (window > 1)
{
- for (i=2; i<numPowers; i++)
+ if (!BN_mod_mul_montgomery(&tmp,&am,&am,mont,ctx)) goto err;
+ if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 2, numPowers)) goto err;
+ for (i=3; i<numPowers; i++)
{
/* Calculate a^i = a^(i-1) * a */
- if (!BN_mod_mul_montgomery(computeTemp,am,computeTemp,mont,ctx))
+ if (!BN_mod_mul_montgomery(&tmp,&am,&tmp,mont,ctx))
goto err;
- if (!MOD_EXP_CTIME_COPY_TO_PREBUF(computeTemp, top, powerbuf, i, numPowers)) goto err;
+ if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, i, numPowers)) goto err;
}
}
- /* Adjust the number of bits up to a multiple of the window size.
- * If the exponent length is not a multiple of the window size, then
- * this pads the most significant bits with zeros to normalize the
- * scanning loop to there's no special cases.
- *
- * * NOTE: Making the window size a power of two less than the native
- * * word size ensures that the padded bits won't go past the last
- * * word in the internal BIGNUM structure. Going past the end will
- * * still produce the correct result, but causes a different branch
- * * to be taken in the BN_is_bit_set function.
- */
- bits = ((bits+window-1)/window)*window;
- idx=bits-1; /* The top bit of the window */
-
- /* Scan the exponent one window at a time starting from the most
- * significant bits.
- */
- while (idx >= 0)
+ bits--;
+ for (wvalue=0, i=bits%window; i>=0; i--,bits--)
+ wvalue = (wvalue<<1)+BN_is_bit_set(p,bits);
+ if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&tmp,top,powerbuf,wvalue,numPowers)) goto err;
+
+ /* Scan the exponent one window at a time starting from the most
+ * significant bits.
+ */
+ while (bits >= 0)
{
wvalue=0; /* The 'value' of the window */
/* Scan the window, squaring the result as we go */
- for (i=0; i<window; i++,idx--)
+ for (i=0; i<window; i++,bits--)
{
- if (!BN_mod_mul_montgomery(r,r,r,mont,ctx)) goto err;
- wvalue = (wvalue<<1)+BN_is_bit_set(p,idx);
+ if (!BN_mod_mul_montgomery(&tmp,&tmp,&tmp,mont,ctx)) goto err;
+ wvalue = (wvalue<<1)+BN_is_bit_set(p,bits);
}
/* Fetch the appropriate pre-computed value from the pre-buf */
- if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(computeTemp, top, powerbuf, wvalue, numPowers)) goto err;
+ if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&am, top, powerbuf, wvalue, numPowers)) goto err;
/* Multiply the result into the intermediate result */
- if (!BN_mod_mul_montgomery(r,r,computeTemp,mont,ctx)) goto err;
+ if (!BN_mod_mul_montgomery(&tmp,&tmp,&am,mont,ctx)) goto err;
}
+ }
/* Convert the final result from montgomery to standard format */
- if (!BN_from_montgomery(rr,r,mont,ctx)) goto err;
+ if (!BN_from_montgomery(rr,&tmp,mont,ctx)) goto err;
ret=1;
err:
if ((in_mont == NULL) && (mont != NULL)) BN_MONT_CTX_free(mont);
if (powerbuf!=NULL)
{
OPENSSL_cleanse(powerbuf,powerbufLen);
- OPENSSL_free(powerbufFree);
+ if (powerbufFree) OPENSSL_free(powerbufFree);
}
- if (am!=NULL) BN_clear(am);
- if (computeTemp!=NULL) BN_clear(computeTemp);
BN_CTX_end(ctx);
return(ret);
}
@@ -988,4 +1095,3 @@ err:
bn_check_top(r);
return(ret);
}
-
diff --git a/crypto/bn/bn_gf2m.c b/crypto/bn/bn_gf2m.c
index 5d90f1e88be2..8a4dc20ad980 100644
--- a/crypto/bn/bn_gf2m.c
+++ b/crypto/bn/bn_gf2m.c
@@ -94,6 +94,8 @@
#include "cryptlib.h"
#include "bn_lcl.h"
+#ifndef OPENSSL_NO_EC2M
+
/* Maximum number of iterations before BN_GF2m_mod_solve_quad_arr should fail. */
#define MAX_ITERATIONS 50
@@ -121,74 +123,13 @@ static const BN_ULONG SQR_tb[16] =
SQR_tb[(w) >> 12 & 0xF] << 24 | SQR_tb[(w) >> 8 & 0xF] << 16 | \
SQR_tb[(w) >> 4 & 0xF] << 8 | SQR_tb[(w) & 0xF]
#endif
-#ifdef SIXTEEN_BIT
-#define SQR1(w) \
- SQR_tb[(w) >> 12 & 0xF] << 8 | SQR_tb[(w) >> 8 & 0xF]
-#define SQR0(w) \
- SQR_tb[(w) >> 4 & 0xF] << 8 | SQR_tb[(w) & 0xF]
-#endif
-#ifdef EIGHT_BIT
-#define SQR1(w) \
- SQR_tb[(w) >> 4 & 0xF]
-#define SQR0(w) \
- SQR_tb[(w) & 15]
-#endif
+#if !defined(OPENSSL_BN_ASM_GF2m)
/* Product of two polynomials a, b each with degree < BN_BITS2 - 1,
* result is a polynomial r with degree < 2 * BN_BITS - 1
* The caller MUST ensure that the variables have the right amount
* of space allocated.
*/
-#ifdef EIGHT_BIT
-static void bn_GF2m_mul_1x1(BN_ULONG *r1, BN_ULONG *r0, const BN_ULONG a, const BN_ULONG b)
- {
- register BN_ULONG h, l, s;
- BN_ULONG tab[4], top1b = a >> 7;
- register BN_ULONG a1, a2;
-
- a1 = a & (0x7F); a2 = a1 << 1;
-
- tab[0] = 0; tab[1] = a1; tab[2] = a2; tab[3] = a1^a2;
-
- s = tab[b & 0x3]; l = s;
- s = tab[b >> 2 & 0x3]; l ^= s << 2; h = s >> 6;
- s = tab[b >> 4 & 0x3]; l ^= s << 4; h ^= s >> 4;
- s = tab[b >> 6 ]; l ^= s << 6; h ^= s >> 2;
-
- /* compensate for the top bit of a */
-
- if (top1b & 01) { l ^= b << 7; h ^= b >> 1; }
-
- *r1 = h; *r0 = l;
- }
-#endif
-#ifdef SIXTEEN_BIT
-static void bn_GF2m_mul_1x1(BN_ULONG *r1, BN_ULONG *r0, const BN_ULONG a, const BN_ULONG b)
- {
- register BN_ULONG h, l, s;
- BN_ULONG tab[4], top1b = a >> 15;
- register BN_ULONG a1, a2;
-
- a1 = a & (0x7FFF); a2 = a1 << 1;
-
- tab[0] = 0; tab[1] = a1; tab[2] = a2; tab[3] = a1^a2;
-
- s = tab[b & 0x3]; l = s;
- s = tab[b >> 2 & 0x3]; l ^= s << 2; h = s >> 14;
- s = tab[b >> 4 & 0x3]; l ^= s << 4; h ^= s >> 12;
- s = tab[b >> 6 & 0x3]; l ^= s << 6; h ^= s >> 10;
- s = tab[b >> 8 & 0x3]; l ^= s << 8; h ^= s >> 8;
- s = tab[b >>10 & 0x3]; l ^= s << 10; h ^= s >> 6;
- s = tab[b >>12 & 0x3]; l ^= s << 12; h ^= s >> 4;
- s = tab[b >>14 ]; l ^= s << 14; h ^= s >> 2;
-
- /* compensate for the top bit of a */
-
- if (top1b & 01) { l ^= b << 15; h ^= b >> 1; }
-
- *r1 = h; *r0 = l;
- }
-#endif
#ifdef THIRTY_TWO_BIT
static void bn_GF2m_mul_1x1(BN_ULONG *r1, BN_ULONG *r0, const BN_ULONG a, const BN_ULONG b)
{
@@ -278,7 +219,9 @@ static void bn_GF2m_mul_2x2(BN_ULONG *r, const BN_ULONG a1, const BN_ULONG a0, c
r[2] ^= m1 ^ r[1] ^ r[3]; /* h0 ^= m1 ^ l1 ^ h1; */
r[1] = r[3] ^ r[2] ^ r[0] ^ m1 ^ m0; /* l1 ^= l0 ^ h0 ^ m0; */
}
-
+#else
+void bn_GF2m_mul_2x2(BN_ULONG *r, BN_ULONG a1, BN_ULONG a0, BN_ULONG b1, BN_ULONG b0);
+#endif
/* Add polynomials a and b and store result in r; r could be a or b, a and b
* could be equal; r is the bitwise XOR of a and b.
@@ -321,7 +264,7 @@ int BN_GF2m_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b)
/* Performs modular reduction of a and store result in r. r could be a. */
-int BN_GF2m_mod_arr(BIGNUM *r, const BIGNUM *a, const unsigned int p[])
+int BN_GF2m_mod_arr(BIGNUM *r, const BIGNUM *a, const int p[])
{
int j, k;
int n, dN, d0, d1;
@@ -422,21 +365,17 @@ int BN_GF2m_mod_arr(BIGNUM *r, const BIGNUM *a, const unsigned int p[])
int BN_GF2m_mod(BIGNUM *r, const BIGNUM *a, const BIGNUM *p)
{
int ret = 0;
- const int max = BN_num_bits(p);
- unsigned int *arr=NULL;
+ int arr[6];
bn_check_top(a);
bn_check_top(p);
- if ((arr = (unsigned int *)OPENSSL_malloc(sizeof(unsigned int) * max)) == NULL) goto err;
- ret = BN_GF2m_poly2arr(p, arr, max);
- if (!ret || ret > max)
+ ret = BN_GF2m_poly2arr(p, arr, sizeof(arr)/sizeof(arr[0]));
+ if (!ret || ret > (int)(sizeof(arr)/sizeof(arr[0])))
{
BNerr(BN_F_BN_GF2M_MOD,BN_R_INVALID_LENGTH);
- goto err;
+ return 0;
}
ret = BN_GF2m_mod_arr(r, a, arr);
bn_check_top(r);
-err:
- if (arr) OPENSSL_free(arr);
return ret;
}
@@ -444,7 +383,7 @@ err:
/* Compute the product of two polynomials a and b, reduce modulo p, and store
* the result in r. r could be a or b; a could be b.
*/
-int BN_GF2m_mod_mul_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const unsigned int p[], BN_CTX *ctx)
+int BN_GF2m_mod_mul_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const int p[], BN_CTX *ctx)
{
int zlen, i, j, k, ret = 0;
BIGNUM *s;
@@ -500,12 +439,12 @@ err:
int BN_GF2m_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *p, BN_CTX *ctx)
{
int ret = 0;
- const int max = BN_num_bits(p);
- unsigned int *arr=NULL;
+ const int max = BN_num_bits(p) + 1;
+ int *arr=NULL;
bn_check_top(a);
bn_check_top(b);
bn_check_top(p);
- if ((arr = (unsigned int *)OPENSSL_malloc(sizeof(unsigned int) * max)) == NULL) goto err;
+ if ((arr = (int *)OPENSSL_malloc(sizeof(int) * max)) == NULL) goto err;
ret = BN_GF2m_poly2arr(p, arr, max);
if (!ret || ret > max)
{
@@ -521,7 +460,7 @@ err:
/* Square a, reduce the result mod p, and store it in a. r could be a. */
-int BN_GF2m_mod_sqr_arr(BIGNUM *r, const BIGNUM *a, const unsigned int p[], BN_CTX *ctx)
+int BN_GF2m_mod_sqr_arr(BIGNUM *r, const BIGNUM *a, const int p[], BN_CTX *ctx)
{
int i, ret = 0;
BIGNUM *s;
@@ -556,12 +495,12 @@ err:
int BN_GF2m_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
{
int ret = 0;
- const int max = BN_num_bits(p);
- unsigned int *arr=NULL;
+ const int max = BN_num_bits(p) + 1;
+ int *arr=NULL;
bn_check_top(a);
bn_check_top(p);
- if ((arr = (unsigned int *)OPENSSL_malloc(sizeof(unsigned int) * max)) == NULL) goto err;
+ if ((arr = (int *)OPENSSL_malloc(sizeof(int) * max)) == NULL) goto err;
ret = BN_GF2m_poly2arr(p, arr, max);
if (!ret || ret > max)
{
@@ -583,7 +522,7 @@ err:
*/
int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
{
- BIGNUM *b, *c, *u, *v, *tmp;
+ BIGNUM *b, *c = NULL, *u = NULL, *v = NULL, *tmp;
int ret = 0;
bn_check_top(a);
@@ -591,18 +530,18 @@ int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
BN_CTX_start(ctx);
- b = BN_CTX_get(ctx);
- c = BN_CTX_get(ctx);
- u = BN_CTX_get(ctx);
- v = BN_CTX_get(ctx);
- if (v == NULL) goto err;
+ if ((b = BN_CTX_get(ctx))==NULL) goto err;
+ if ((c = BN_CTX_get(ctx))==NULL) goto err;
+ if ((u = BN_CTX_get(ctx))==NULL) goto err;
+ if ((v = BN_CTX_get(ctx))==NULL) goto err;
- if (!BN_one(b)) goto err;
if (!BN_GF2m_mod(u, a, p)) goto err;
- if (!BN_copy(v, p)) goto err;
-
if (BN_is_zero(u)) goto err;
+ if (!BN_copy(v, p)) goto err;
+#if 0
+ if (!BN_one(b)) goto err;
+
while (1)
{
while (!BN_is_odd(u))
@@ -627,13 +566,89 @@ int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
if (!BN_GF2m_add(u, u, v)) goto err;
if (!BN_GF2m_add(b, b, c)) goto err;
}
+#else
+ {
+ int i, ubits = BN_num_bits(u),
+ vbits = BN_num_bits(v), /* v is copy of p */
+ top = p->top;
+ BN_ULONG *udp,*bdp,*vdp,*cdp;
+
+ bn_wexpand(u,top); udp = u->d;
+ for (i=u->top;i<top;i++) udp[i] = 0;
+ u->top = top;
+ bn_wexpand(b,top); bdp = b->d;
+ bdp[0] = 1;
+ for (i=1;i<top;i++) bdp[i] = 0;
+ b->top = top;
+ bn_wexpand(c,top); cdp = c->d;
+ for (i=0;i<top;i++) cdp[i] = 0;
+ c->top = top;
+ vdp = v->d; /* It pays off to "cache" *->d pointers, because
+ * it allows optimizer to be more aggressive.
+ * But we don't have to "cache" p->d, because *p
+ * is declared 'const'... */
+ while (1)
+ {
+ while (ubits && !(udp[0]&1))
+ {
+ BN_ULONG u0,u1,b0,b1,mask;
+
+ u0 = udp[0];
+ b0 = bdp[0];
+ mask = (BN_ULONG)0-(b0&1);
+ b0 ^= p->d[0]&mask;
+ for (i=0;i<top-1;i++)
+ {
+ u1 = udp[i+1];
+ udp[i] = ((u0>>1)|(u1<<(BN_BITS2-1)))&BN_MASK2;
+ u0 = u1;
+ b1 = bdp[i+1]^(p->d[i+1]&mask);
+ bdp[i] = ((b0>>1)|(b1<<(BN_BITS2-1)))&BN_MASK2;
+ b0 = b1;
+ }
+ udp[i] = u0>>1;
+ bdp[i] = b0>>1;
+ ubits--;
+ }
+
+ if (ubits<=BN_BITS2 && udp[0]==1) break;
+ if (ubits<vbits)
+ {
+ i = ubits; ubits = vbits; vbits = i;
+ tmp = u; u = v; v = tmp;
+ tmp = b; b = c; c = tmp;
+ udp = vdp; vdp = v->d;
+ bdp = cdp; cdp = c->d;
+ }
+ for(i=0;i<top;i++)
+ {
+ udp[i] ^= vdp[i];
+ bdp[i] ^= cdp[i];
+ }
+ if (ubits==vbits)
+ {
+ BN_ULONG ul;
+ int utop = (ubits-1)/BN_BITS2;
+
+ while ((ul=udp[utop])==0 && utop) utop--;
+ ubits = utop*BN_BITS2 + BN_num_bits_word(ul);
+ }
+ }
+ bn_correct_top(b);
+ }
+#endif
if (!BN_copy(r, b)) goto err;
bn_check_top(r);
ret = 1;
err:
+#ifdef BN_DEBUG /* BN_CTX_end would complain about the expanded form */
+ bn_correct_top(c);
+ bn_correct_top(u);
+ bn_correct_top(v);
+#endif
BN_CTX_end(ctx);
return ret;
}
@@ -644,7 +659,7 @@ err:
* function is only provided for convenience; for best performance, use the
* BN_GF2m_mod_inv function.
*/
-int BN_GF2m_mod_inv_arr(BIGNUM *r, const BIGNUM *xx, const unsigned int p[], BN_CTX *ctx)
+int BN_GF2m_mod_inv_arr(BIGNUM *r, const BIGNUM *xx, const int p[], BN_CTX *ctx)
{
BIGNUM *field;
int ret = 0;
@@ -770,7 +785,7 @@ err:
* function is only provided for convenience; for best performance, use the
* BN_GF2m_mod_div function.
*/
-int BN_GF2m_mod_div_arr(BIGNUM *r, const BIGNUM *yy, const BIGNUM *xx, const unsigned int p[], BN_CTX *ctx)
+int BN_GF2m_mod_div_arr(BIGNUM *r, const BIGNUM *yy, const BIGNUM *xx, const int p[], BN_CTX *ctx)
{
BIGNUM *field;
int ret = 0;
@@ -795,7 +810,7 @@ err:
* the result in r. r could be a.
* Uses simple square-and-multiply algorithm A.5.1 from IEEE P1363.
*/
-int BN_GF2m_mod_exp_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const unsigned int p[], BN_CTX *ctx)
+int BN_GF2m_mod_exp_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const int p[], BN_CTX *ctx)
{
int ret = 0, i, n;
BIGNUM *u;
@@ -841,12 +856,12 @@ err:
int BN_GF2m_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *p, BN_CTX *ctx)
{
int ret = 0;
- const int max = BN_num_bits(p);
- unsigned int *arr=NULL;
+ const int max = BN_num_bits(p) + 1;
+ int *arr=NULL;
bn_check_top(a);
bn_check_top(b);
bn_check_top(p);
- if ((arr = (unsigned int *)OPENSSL_malloc(sizeof(unsigned int) * max)) == NULL) goto err;
+ if ((arr = (int *)OPENSSL_malloc(sizeof(int) * max)) == NULL) goto err;
ret = BN_GF2m_poly2arr(p, arr, max);
if (!ret || ret > max)
{
@@ -864,7 +879,7 @@ err:
* the result in r. r could be a.
* Uses exponentiation as in algorithm A.4.1 from IEEE P1363.
*/
-int BN_GF2m_mod_sqrt_arr(BIGNUM *r, const BIGNUM *a, const unsigned int p[], BN_CTX *ctx)
+int BN_GF2m_mod_sqrt_arr(BIGNUM *r, const BIGNUM *a, const int p[], BN_CTX *ctx)
{
int ret = 0;
BIGNUM *u;
@@ -900,11 +915,11 @@ err:
int BN_GF2m_mod_sqrt(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
{
int ret = 0;
- const int max = BN_num_bits(p);
- unsigned int *arr=NULL;
+ const int max = BN_num_bits(p) + 1;
+ int *arr=NULL;
bn_check_top(a);
bn_check_top(p);
- if ((arr = (unsigned int *)OPENSSL_malloc(sizeof(unsigned int) * max)) == NULL) goto err;
+ if ((arr = (int *)OPENSSL_malloc(sizeof(int) * max)) == NULL) goto err;
ret = BN_GF2m_poly2arr(p, arr, max);
if (!ret || ret > max)
{
@@ -921,10 +936,9 @@ err:
/* Find r such that r^2 + r = a mod p. r could be a. If no r exists returns 0.
* Uses algorithms A.4.7 and A.4.6 from IEEE P1363.
*/
-int BN_GF2m_mod_solve_quad_arr(BIGNUM *r, const BIGNUM *a_, const unsigned int p[], BN_CTX *ctx)
+int BN_GF2m_mod_solve_quad_arr(BIGNUM *r, const BIGNUM *a_, const int p[], BN_CTX *ctx)
{
- int ret = 0, count = 0;
- unsigned int j;
+ int ret = 0, count = 0, j;
BIGNUM *a, *z, *rho, *w, *w2, *tmp;
bn_check_top(a_);
@@ -1019,11 +1033,11 @@ err:
int BN_GF2m_mod_solve_quad(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
{
int ret = 0;
- const int max = BN_num_bits(p);
- unsigned int *arr=NULL;
+ const int max = BN_num_bits(p) + 1;
+ int *arr=NULL;
bn_check_top(a);
bn_check_top(p);
- if ((arr = (unsigned int *)OPENSSL_malloc(sizeof(unsigned int) *
+ if ((arr = (int *)OPENSSL_malloc(sizeof(int) *
max)) == NULL) goto err;
ret = BN_GF2m_poly2arr(p, arr, max);
if (!ret || ret > max)
@@ -1039,20 +1053,17 @@ err:
}
/* Convert the bit-string representation of a polynomial
- * ( \sum_{i=0}^n a_i * x^i , where a_0 is *not* zero) into an array
- * of integers corresponding to the bits with non-zero coefficient.
+ * ( \sum_{i=0}^n a_i * x^i) into an array of integers corresponding
+ * to the bits with non-zero coefficient. Array is terminated with -1.
* Up to max elements of the array will be filled. Return value is total
- * number of coefficients that would be extracted if array was large enough.
+ * number of array elements that would be filled if array was large enough.
*/
-int BN_GF2m_poly2arr(const BIGNUM *a, unsigned int p[], int max)
+int BN_GF2m_poly2arr(const BIGNUM *a, int p[], int max)
{
int i, j, k = 0;
BN_ULONG mask;
- if (BN_is_zero(a) || !BN_is_bit_set(a, 0))
- /* a_0 == 0 => return error (the unsigned int array
- * must be terminated by 0)
- */
+ if (BN_is_zero(a))
return 0;
for (i = a->top - 1; i >= 0; i--)
@@ -1072,26 +1083,31 @@ int BN_GF2m_poly2arr(const BIGNUM *a, unsigned int p[], int max)
}
}
+ if (k < max) {
+ p[k] = -1;
+ k++;
+ }
+
return k;
}
/* Convert the coefficient array representation of a polynomial to a
- * bit-string. The array must be terminated by 0.
+ * bit-string. The array must be terminated by -1.
*/
-int BN_GF2m_arr2poly(const unsigned int p[], BIGNUM *a)
+int BN_GF2m_arr2poly(const int p[], BIGNUM *a)
{
int i;
bn_check_top(a);
BN_zero(a);
- for (i = 0; p[i] != 0; i++)
+ for (i = 0; p[i] != -1; i++)
{
if (BN_set_bit(a, p[i]) == 0)
return 0;
}
- BN_set_bit(a, 0);
bn_check_top(a);
return 1;
}
+#endif
diff --git a/crypto/bn/bn_lcl.h b/crypto/bn/bn_lcl.h
index 27ac4397a151..eecfd8cc99e7 100644
--- a/crypto/bn/bn_lcl.h
+++ b/crypto/bn/bn_lcl.h
@@ -238,7 +238,7 @@ extern "C" {
# if defined(__DECC)
# include <c_asm.h>
# define BN_UMULT_HIGH(a,b) (BN_ULONG)asm("umulh %a0,%a1,%v0",(a),(b))
-# elif defined(__GNUC__)
+# elif defined(__GNUC__) && __GNUC__>=2
# define BN_UMULT_HIGH(a,b) ({ \
register BN_ULONG ret; \
asm ("umulh %1,%2,%0" \
@@ -247,7 +247,7 @@ extern "C" {
ret; })
# endif /* compiler */
# elif defined(_ARCH_PPC) && defined(__64BIT__) && defined(SIXTY_FOUR_BIT_LONG)
-# if defined(__GNUC__)
+# if defined(__GNUC__) && __GNUC__>=2
# define BN_UMULT_HIGH(a,b) ({ \
register BN_ULONG ret; \
asm ("mulhdu %0,%1,%2" \
@@ -255,8 +255,9 @@ extern "C" {
: "r"(a), "r"(b)); \
ret; })
# endif /* compiler */
-# elif defined(__x86_64) && defined(SIXTY_FOUR_BIT_LONG)
-# if defined(__GNUC__)
+# elif (defined(__x86_64) || defined(__x86_64__)) && \
+ (defined(SIXTY_FOUR_BIT_LONG) || defined(SIXTY_FOUR_BIT))
+# if defined(__GNUC__) && __GNUC__>=2
# define BN_UMULT_HIGH(a,b) ({ \
register BN_ULONG ret,discard; \
asm ("mulq %3" \
@@ -279,6 +280,19 @@ extern "C" {
# define BN_UMULT_HIGH(a,b) __umulh((a),(b))
# define BN_UMULT_LOHI(low,high,a,b) ((low)=_umul128((a),(b),&(high)))
# endif
+# elif defined(__mips) && (defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG))
+# if defined(__GNUC__) && __GNUC__>=2
+# define BN_UMULT_HIGH(a,b) ({ \
+ register BN_ULONG ret; \
+ asm ("dmultu %1,%2" \
+ : "=h"(ret) \
+ : "r"(a), "r"(b) : "l"); \
+ ret; })
+# define BN_UMULT_LOHI(low,high,a,b) \
+ asm ("dmultu %2,%3" \
+ : "=l"(low),"=h"(high) \
+ : "r"(a), "r"(b));
+# endif
# endif /* cpu */
#endif /* OPENSSL_NO_ASM */
@@ -458,6 +472,10 @@ extern "C" {
}
#endif /* !BN_LLONG */
+#if defined(OPENSSL_DOING_MAKEDEPEND) && defined(OPENSSL_FIPS)
+#undef bn_div_words
+#endif
+
void bn_mul_normal(BN_ULONG *r,BN_ULONG *a,int na,BN_ULONG *b,int nb);
void bn_mul_comba8(BN_ULONG *r,BN_ULONG *a,BN_ULONG *b);
void bn_mul_comba4(BN_ULONG *r,BN_ULONG *a,BN_ULONG *b);
diff --git a/crypto/bn/bn_lib.c b/crypto/bn/bn_lib.c
index 32a8fbaf51ee..7a5676de6927 100644
--- a/crypto/bn/bn_lib.c
+++ b/crypto/bn/bn_lib.c
@@ -133,15 +133,15 @@ int BN_get_params(int which)
const BIGNUM *BN_value_one(void)
{
- static BN_ULONG data_one=1L;
- static BIGNUM const_one={&data_one,1,1,0,BN_FLG_STATIC_DATA};
+ static const BN_ULONG data_one=1L;
+ static const BIGNUM const_one={(BN_ULONG *)&data_one,1,1,0,BN_FLG_STATIC_DATA};
return(&const_one);
}
int BN_num_bits_word(BN_ULONG l)
{
- static const char bits[256]={
+ static const unsigned char bits[256]={
0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
@@ -216,7 +216,7 @@ int BN_num_bits_word(BN_ULONG l)
else
#endif
{
-#if defined(SIXTEEN_BIT) || defined(THIRTY_TWO_BIT) || defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG)
+#if defined(THIRTY_TWO_BIT) || defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG)
if (l & 0xff00L)
return(bits[(int)(l>>8)]+8);
else
@@ -744,7 +744,7 @@ int BN_is_bit_set(const BIGNUM *a, int n)
i=n/BN_BITS2;
j=n%BN_BITS2;
if (a->top <= i) return 0;
- return(((a->d[i])>>j)&((BN_ULONG)1));
+ return (int)(((a->d[i])>>j)&((BN_ULONG)1));
}
int BN_mask_bits(BIGNUM *a, int n)
diff --git a/crypto/bn/bn_mont.c b/crypto/bn/bn_mont.c
index 4799b152ddcb..427b5cf4df95 100644
--- a/crypto/bn/bn_mont.c
+++ b/crypto/bn/bn_mont.c
@@ -122,26 +122,10 @@
#define MONT_WORD /* use the faster word-based algorithm */
-#if defined(MONT_WORD) && defined(OPENSSL_BN_ASM_MONT) && (BN_BITS2<=32)
-/* This condition means we have a specific non-default build:
- * In the 0.9.8 branch, OPENSSL_BN_ASM_MONT is normally not set for any
- * BN_BITS2<=32 platform; an explicit "enable-montasm" is required.
- * I.e., if we are here, the user intentionally deviates from the
- * normal stable build to get better Montgomery performance from
- * the 0.9.9-dev backport.
- *
- * In this case only, we also enable BN_from_montgomery_word()
- * (another non-stable feature from 0.9.9-dev).
- */
-#define MONT_FROM_WORD___NON_DEFAULT_0_9_8_BUILD
-#endif
-
-#ifdef MONT_FROM_WORD___NON_DEFAULT_0_9_8_BUILD
+#ifdef MONT_WORD
static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont);
#endif
-
-
int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
BN_MONT_CTX *mont, BN_CTX *ctx)
{
@@ -153,11 +137,7 @@ int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
if (num>1 && a->top==num && b->top==num)
{
if (bn_wexpand(r,num) == NULL) return(0);
-#if 0 /* for OpenSSL 0.9.9 mont->n0 */
if (bn_mul_mont(r->d,a->d,b->d,mont->N.d,mont->n0,num))
-#else
- if (bn_mul_mont(r->d,a->d,b->d,mont->N.d,&mont->n0,num))
-#endif
{
r->neg = a->neg^b->neg;
r->top = num;
@@ -181,7 +161,7 @@ int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
if (!BN_mul(tmp,a,b,ctx)) goto err;
}
/* reduce from aRR to aR */
-#ifdef MONT_FROM_WORD___NON_DEFAULT_0_9_8_BUILD
+#ifdef MONT_WORD
if (!BN_from_montgomery_word(r,tmp,mont)) goto err;
#else
if (!BN_from_montgomery(r,tmp,mont,ctx)) goto err;
@@ -193,44 +173,39 @@ err:
return(ret);
}
-#ifdef MONT_FROM_WORD___NON_DEFAULT_0_9_8_BUILD
+#ifdef MONT_WORD
static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont)
{
BIGNUM *n;
- BN_ULONG *ap,*np,*rp,n0,v,*nrp;
- int al,nl,max,i,x,ri;
+ BN_ULONG *ap,*np,*rp,n0,v,carry;
+ int nl,max,i;
n= &(mont->N);
- /* mont->ri is the size of mont->N in bits (rounded up
- to the word size) */
- al=ri=mont->ri/BN_BITS2;
-
nl=n->top;
- if ((al == 0) || (nl == 0)) { ret->top=0; return(1); }
+ if (nl == 0) { ret->top=0; return(1); }
- max=(nl+al+1); /* allow for overflow (no?) XXX */
+ max=(2*nl); /* carry is stored separately */
if (bn_wexpand(r,max) == NULL) return(0);
r->neg^=n->neg;
np=n->d;
rp=r->d;
- nrp= &(r->d[nl]);
/* clear the top words of T */
+#if 1
for (i=r->top; i<max; i++) /* memset? XXX */
- r->d[i]=0;
+ rp[i]=0;
+#else
+ memset(&(rp[r->top]),0,(max-r->top)*sizeof(BN_ULONG));
+#endif
r->top=max;
-#if 0 /* for OpenSSL 0.9.9 mont->n0 */
n0=mont->n0[0];
-#else
- n0=mont->n0;
-#endif
#ifdef BN_COUNT
fprintf(stderr,"word BN_from_montgomery_word %d * %d\n",nl,nl);
#endif
- for (i=0; i<nl; i++)
+ for (carry=0, i=0; i<nl; i++, rp++)
{
#ifdef __TANDEM
{
@@ -248,59 +223,33 @@ static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont)
#else
v=bn_mul_add_words(rp,np,nl,(rp[0]*n0)&BN_MASK2);
#endif
- nrp++;
- rp++;
- if (((nrp[-1]+=v)&BN_MASK2) >= v)
- continue;
- else
- {
- if (((++nrp[0])&BN_MASK2) != 0) continue;
- if (((++nrp[1])&BN_MASK2) != 0) continue;
- for (x=2; (((++nrp[x])&BN_MASK2) == 0); x++) ;
- }
+ v = (v+carry+rp[nl])&BN_MASK2;
+ carry |= (v != rp[nl]);
+ carry &= (v <= rp[nl]);
+ rp[nl]=v;
}
- bn_correct_top(r);
- /* mont->ri will be a multiple of the word size and below code
- * is kind of BN_rshift(ret,r,mont->ri) equivalent */
- if (r->top <= ri)
- {
- ret->top=0;
- return(1);
- }
- al=r->top-ri;
-
- if (bn_wexpand(ret,ri) == NULL) return(0);
- x=0-(((al-ri)>>(sizeof(al)*8-1))&1);
- ret->top=x=(ri&~x)|(al&x); /* min(ri,al) */
+ if (bn_wexpand(ret,nl) == NULL) return(0);
+ ret->top=nl;
ret->neg=r->neg;
rp=ret->d;
- ap=&(r->d[ri]);
+ ap=&(r->d[nl]);
+#define BRANCH_FREE 1
+#if BRANCH_FREE
{
- size_t m1,m2;
-
- v=bn_sub_words(rp,ap,np,ri);
- /* this ----------------^^ works even in al<ri case
- * thanks to zealous zeroing of top of the vector in the
- * beginning. */
+ BN_ULONG *nrp;
+ size_t m;
- /* if (al==ri && !v) || al>ri) nrp=rp; else nrp=ap; */
- /* in other words if subtraction result is real, then
+ v=bn_sub_words(rp,ap,np,nl)-carry;
+ /* if subtraction result is real, then
* trick unconditional memcpy below to perform in-place
* "refresh" instead of actual copy. */
- m1=0-(size_t)(((al-ri)>>(sizeof(al)*8-1))&1); /* al<ri */
- m2=0-(size_t)(((ri-al)>>(sizeof(al)*8-1))&1); /* al>ri */
- m1|=m2; /* (al!=ri) */
- m1|=(0-(size_t)v); /* (al!=ri || v) */
- m1&=~m2; /* (al!=ri || v) && !al>ri */
- nrp=(BN_ULONG *)(((size_t)rp&~m1)|((size_t)ap&m1));
- }
+ m=(0-(size_t)v);
+ nrp=(BN_ULONG *)(((PTR_SIZE_INT)rp&~m)|((PTR_SIZE_INT)ap&m));
- /* 'i<ri' is chosen to eliminate dependency on input data, even
- * though it results in redundant copy in al<ri case. */
- for (i=0,ri-=4; i<ri; i+=4)
+ for (i=0,nl-=4; i<nl; i+=4)
{
BN_ULONG t1,t2,t3,t4;
@@ -313,192 +262,33 @@ static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont)
rp[i+2]=t3;
rp[i+3]=t4;
}
- for (ri+=4; i<ri; i++)
+ for (nl+=4; i<nl; i++)
rp[i]=nrp[i], ap[i]=0;
+ }
+#else
+ if (bn_sub_words (rp,ap,np,nl)-carry)
+ memcpy(rp,ap,nl*sizeof(BN_ULONG));
+#endif
bn_correct_top(r);
bn_correct_top(ret);
bn_check_top(ret);
return(1);
}
+#endif /* MONT_WORD */
int BN_from_montgomery(BIGNUM *ret, const BIGNUM *a, BN_MONT_CTX *mont,
BN_CTX *ctx)
{
int retn=0;
+#ifdef MONT_WORD
BIGNUM *t;
BN_CTX_start(ctx);
if ((t = BN_CTX_get(ctx)) && BN_copy(t,a))
retn = BN_from_montgomery_word(ret,t,mont);
BN_CTX_end(ctx);
- return retn;
- }
-
-#else /* !MONT_FROM_WORD___NON_DEFAULT_0_9_8_BUILD */
-
-int BN_from_montgomery(BIGNUM *ret, const BIGNUM *a, BN_MONT_CTX *mont,
- BN_CTX *ctx)
- {
- int retn=0;
-
-#ifdef MONT_WORD
- BIGNUM *n,*r;
- BN_ULONG *ap,*np,*rp,n0,v,*nrp;
- int al,nl,max,i,x,ri;
-
- BN_CTX_start(ctx);
- if ((r = BN_CTX_get(ctx)) == NULL) goto err;
-
- if (!BN_copy(r,a)) goto err;
- n= &(mont->N);
-
- ap=a->d;
- /* mont->ri is the size of mont->N in bits (rounded up
- to the word size) */
- al=ri=mont->ri/BN_BITS2;
-
- nl=n->top;
- if ((al == 0) || (nl == 0)) { r->top=0; return(1); }
-
- max=(nl+al+1); /* allow for overflow (no?) XXX */
- if (bn_wexpand(r,max) == NULL) goto err;
-
- r->neg=a->neg^n->neg;
- np=n->d;
- rp=r->d;
- nrp= &(r->d[nl]);
-
- /* clear the top words of T */
-#if 1
- for (i=r->top; i<max; i++) /* memset? XXX */
- r->d[i]=0;
-#else
- memset(&(r->d[r->top]),0,(max-r->top)*sizeof(BN_ULONG));
-#endif
-
- r->top=max;
- n0=mont->n0;
-
-#ifdef BN_COUNT
- fprintf(stderr,"word BN_from_montgomery %d * %d\n",nl,nl);
-#endif
- for (i=0; i<nl; i++)
- {
-#ifdef __TANDEM
- {
- long long t1;
- long long t2;
- long long t3;
- t1 = rp[0] * (n0 & 0177777);
- t2 = 037777600000l;
- t2 = n0 & t2;
- t3 = rp[0] & 0177777;
- t2 = (t3 * t2) & BN_MASK2;
- t1 = t1 + t2;
- v=bn_mul_add_words(rp,np,nl,(BN_ULONG) t1);
- }
-#else
- v=bn_mul_add_words(rp,np,nl,(rp[0]*n0)&BN_MASK2);
-#endif
- nrp++;
- rp++;
- if (((nrp[-1]+=v)&BN_MASK2) >= v)
- continue;
- else
- {
- if (((++nrp[0])&BN_MASK2) != 0) continue;
- if (((++nrp[1])&BN_MASK2) != 0) continue;
- for (x=2; (((++nrp[x])&BN_MASK2) == 0); x++) ;
- }
- }
- bn_correct_top(r);
-
- /* mont->ri will be a multiple of the word size and below code
- * is kind of BN_rshift(ret,r,mont->ri) equivalent */
- if (r->top <= ri)
- {
- ret->top=0;
- retn=1;
- goto err;
- }
- al=r->top-ri;
-
-# define BRANCH_FREE 1
-# if BRANCH_FREE
- if (bn_wexpand(ret,ri) == NULL) goto err;
- x=0-(((al-ri)>>(sizeof(al)*8-1))&1);
- ret->top=x=(ri&~x)|(al&x); /* min(ri,al) */
- ret->neg=r->neg;
-
- rp=ret->d;
- ap=&(r->d[ri]);
-
- {
- size_t m1,m2;
-
- v=bn_sub_words(rp,ap,np,ri);
- /* this ----------------^^ works even in al<ri case
- * thanks to zealous zeroing of top of the vector in the
- * beginning. */
-
- /* if (al==ri && !v) || al>ri) nrp=rp; else nrp=ap; */
- /* in other words if subtraction result is real, then
- * trick unconditional memcpy below to perform in-place
- * "refresh" instead of actual copy. */
- m1=0-(size_t)(((al-ri)>>(sizeof(al)*8-1))&1); /* al<ri */
- m2=0-(size_t)(((ri-al)>>(sizeof(al)*8-1))&1); /* al>ri */
- m1|=m2; /* (al!=ri) */
- m1|=(0-(size_t)v); /* (al!=ri || v) */
- m1&=~m2; /* (al!=ri || v) && !al>ri */
- nrp=(BN_ULONG *)(((size_t)rp&~m1)|((size_t)ap&m1));
- }
-
- /* 'i<ri' is chosen to eliminate dependency on input data, even
- * though it results in redundant copy in al<ri case. */
- for (i=0,ri-=4; i<ri; i+=4)
- {
- BN_ULONG t1,t2,t3,t4;
-
- t1=nrp[i+0];
- t2=nrp[i+1];
- t3=nrp[i+2]; ap[i+0]=0;
- t4=nrp[i+3]; ap[i+1]=0;
- rp[i+0]=t1; ap[i+2]=0;
- rp[i+1]=t2; ap[i+3]=0;
- rp[i+2]=t3;
- rp[i+3]=t4;
- }
- for (ri+=4; i<ri; i++)
- rp[i]=nrp[i], ap[i]=0;
- bn_correct_top(r);
- bn_correct_top(ret);
-# else
- if (bn_wexpand(ret,al) == NULL) goto err;
- ret->top=al;
- ret->neg=r->neg;
-
- rp=ret->d;
- ap=&(r->d[ri]);
- al-=4;
- for (i=0; i<al; i+=4)
- {
- BN_ULONG t1,t2,t3,t4;
-
- t1=ap[i+0];
- t2=ap[i+1];
- t3=ap[i+2];
- t4=ap[i+3];
- rp[i+0]=t1;
- rp[i+1]=t2;
- rp[i+2]=t3;
- rp[i+3]=t4;
- }
- al+=4;
- for (; i<al; i++)
- rp[i]=ap[i];
-# endif
-#else /* !MONT_WORD */
+#else /* !MONT_WORD */
BIGNUM *t1,*t2;
BN_CTX_start(ctx);
@@ -515,21 +305,18 @@ int BN_from_montgomery(BIGNUM *ret, const BIGNUM *a, BN_MONT_CTX *mont,
if (!BN_mul(t1,t2,&mont->N,ctx)) goto err;
if (!BN_add(t2,a,t1)) goto err;
if (!BN_rshift(ret,t2,mont->ri)) goto err;
-#endif /* MONT_WORD */
-#if !defined(BRANCH_FREE) || BRANCH_FREE==0
if (BN_ucmp(ret, &(mont->N)) >= 0)
{
if (!BN_usub(ret,ret,&(mont->N))) goto err;
}
-#endif
retn=1;
bn_check_top(ret);
err:
BN_CTX_end(ctx);
+#endif /* MONT_WORD */
return(retn);
}
-#endif /* MONT_FROM_WORD___NON_DEFAULT_0_9_8_BUILD */
BN_MONT_CTX *BN_MONT_CTX_new(void)
{
@@ -549,11 +336,7 @@ void BN_MONT_CTX_init(BN_MONT_CTX *ctx)
BN_init(&(ctx->RR));
BN_init(&(ctx->N));
BN_init(&(ctx->Ni));
-#if 0 /* for OpenSSL 0.9.9 mont->n0 */
ctx->n0[0] = ctx->n0[1] = 0;
-#else
- ctx->n0 = 0;
-#endif
ctx->flags=0;
}
@@ -585,26 +368,22 @@ int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx)
BIGNUM tmod;
BN_ULONG buf[2];
- mont->ri=(BN_num_bits(mod)+(BN_BITS2-1))/BN_BITS2*BN_BITS2;
- BN_zero(R);
-#if 0 /* for OpenSSL 0.9.9 mont->n0, would be "#if defined(OPENSSL_BN_ASM_MONT) && (BN_BITS2<=32)",
- only certain BN_BITS2<=32 platforms actually need this */
- if (!(BN_set_bit(R,2*BN_BITS2))) goto err; /* R */
-#else
- if (!(BN_set_bit(R,BN_BITS2))) goto err; /* R */
-#endif
-
- buf[0]=mod->d[0]; /* tmod = N mod word size */
- buf[1]=0;
-
BN_init(&tmod);
tmod.d=buf;
- tmod.top = buf[0] != 0 ? 1 : 0;
tmod.dmax=2;
tmod.neg=0;
-#if 0 /* for OpenSSL 0.9.9 mont->n0, would be "#if defined(OPENSSL_BN_ASM_MONT) && (BN_BITS2<=32)";
- only certain BN_BITS2<=32 platforms actually need this */
+ mont->ri=(BN_num_bits(mod)+(BN_BITS2-1))/BN_BITS2*BN_BITS2;
+
+#if defined(OPENSSL_BN_ASM_MONT) && (BN_BITS2<=32)
+ /* Only certain BN_BITS2<=32 platforms actually make use of
+ * n0[1], and we could use the #else case (with a shorter R
+ * value) for the others. However, currently only the assembler
+ * files do know which is which. */
+
+ BN_zero(R);
+ if (!(BN_set_bit(R,2*BN_BITS2))) goto err;
+
tmod.top=0;
if ((buf[0] = mod->d[0])) tmod.top=1;
if ((buf[1] = mod->top>1 ? mod->d[1] : 0)) tmod.top=2;
@@ -632,6 +411,12 @@ int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx)
mont->n0[0] = (Ri->top > 0) ? Ri->d[0] : 0;
mont->n0[1] = (Ri->top > 1) ? Ri->d[1] : 0;
#else
+ BN_zero(R);
+ if (!(BN_set_bit(R,BN_BITS2))) goto err; /* R */
+
+ buf[0]=mod->d[0]; /* tmod = N mod word size */
+ buf[1]=0;
+ tmod.top = buf[0] != 0 ? 1 : 0;
/* Ri = R^-1 mod N*/
if ((BN_mod_inverse(Ri,R,&tmod,ctx)) == NULL)
goto err;
@@ -647,12 +432,8 @@ int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx)
if (!BN_div(Ri,NULL,Ri,&tmod,ctx)) goto err;
/* Ni = (R*Ri-1)/N,
* keep only least significant word: */
-# if 0 /* for OpenSSL 0.9.9 mont->n0 */
mont->n0[0] = (Ri->top > 0) ? Ri->d[0] : 0;
mont->n0[1] = 0;
-# else
- mont->n0 = (Ri->top > 0) ? Ri->d[0] : 0;
-# endif
#endif
}
#else /* !MONT_WORD */
@@ -689,12 +470,8 @@ BN_MONT_CTX *BN_MONT_CTX_copy(BN_MONT_CTX *to, BN_MONT_CTX *from)
if (!BN_copy(&(to->N),&(from->N))) return NULL;
if (!BN_copy(&(to->Ni),&(from->Ni))) return NULL;
to->ri=from->ri;
-#if 0 /* for OpenSSL 0.9.9 mont->n0 */
to->n0[0]=from->n0[0];
to->n0[1]=from->n0[1];
-#else
- to->n0=from->n0;
-#endif
return(to);
}
diff --git a/crypto/bn/bn_nist.c b/crypto/bn/bn_nist.c
index 2ca5b0139111..43caee477031 100644
--- a/crypto/bn/bn_nist.c
+++ b/crypto/bn/bn_nist.c
@@ -319,6 +319,13 @@ static void nist_cp_bn(BN_ULONG *buf, BN_ULONG *a, int top)
:(to[(n)/2] =((m)&1)?(from[(m)/2]>>32):(from[(m)/2]&BN_MASK2l)))
#define bn_32_set_0(to, n) (((n)&1)?(to[(n)/2]&=BN_MASK2l):(to[(n)/2]=0));
#define bn_cp_32(to,n,from,m) ((m)>=0)?bn_cp_32_naked(to,n,from,m):bn_32_set_0(to,n)
+# if defined(L_ENDIAN)
+# if defined(__arch64__)
+# define NIST_INT64 long
+# else
+# define NIST_INT64 long long
+# endif
+# endif
#else
#define bn_cp_64(to, n, from, m) \
{ \
@@ -330,13 +337,15 @@ static void nist_cp_bn(BN_ULONG *buf, BN_ULONG *a, int top)
bn_32_set_0(to, (n)*2); \
bn_32_set_0(to, (n)*2+1); \
}
-#if BN_BITS2 == 32
#define bn_cp_32(to, n, from, m) (to)[n] = (m>=0)?((from)[m]):0;
#define bn_32_set_0(to, n) (to)[n] = (BN_ULONG)0;
-#endif
+# if defined(_WIN32) && !defined(__GNUC__)
+# define NIST_INT64 __int64
+# elif defined(BN_LLONG)
+# define NIST_INT64 long long
+# endif
#endif /* BN_BITS2 != 64 */
-
#define nist_set_192(to, from, a1, a2, a3) \
{ \
bn_cp_64(to, 0, from, (a3) - 3) \
@@ -350,11 +359,13 @@ int BN_nist_mod_192(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
int top = a->top, i;
int carry;
register BN_ULONG *r_d, *a_d = a->d;
- BN_ULONG t_d[BN_NIST_192_TOP],
- buf[BN_NIST_192_TOP],
- c_d[BN_NIST_192_TOP],
+ union {
+ BN_ULONG bn[BN_NIST_192_TOP];
+ unsigned int ui[BN_NIST_192_TOP*sizeof(BN_ULONG)/sizeof(unsigned int)];
+ } buf;
+ BN_ULONG c_d[BN_NIST_192_TOP],
*res;
- size_t mask;
+ PTR_SIZE_INT mask;
static const BIGNUM _bignum_nist_p_192_sqr = {
(BN_ULONG *)_nist_p_192_sqr,
sizeof(_nist_p_192_sqr)/sizeof(_nist_p_192_sqr[0]),
@@ -385,15 +396,48 @@ int BN_nist_mod_192(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
else
r_d = a_d;
- nist_cp_bn_0(buf, a_d + BN_NIST_192_TOP, top - BN_NIST_192_TOP, BN_NIST_192_TOP);
+ nist_cp_bn_0(buf.bn, a_d + BN_NIST_192_TOP, top - BN_NIST_192_TOP, BN_NIST_192_TOP);
+
+#if defined(NIST_INT64)
+ {
+ NIST_INT64 acc; /* accumulator */
+ unsigned int *rp=(unsigned int *)r_d;
+ const unsigned int *bp=(const unsigned int *)buf.ui;
+
+ acc = rp[0]; acc += bp[3*2-6];
+ acc += bp[5*2-6]; rp[0] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[1]; acc += bp[3*2-5];
+ acc += bp[5*2-5]; rp[1] = (unsigned int)acc; acc >>= 32;
- nist_set_192(t_d, buf, 0, 3, 3);
+ acc += rp[2]; acc += bp[3*2-6];
+ acc += bp[4*2-6];
+ acc += bp[5*2-6]; rp[2] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[3]; acc += bp[3*2-5];
+ acc += bp[4*2-5];
+ acc += bp[5*2-5]; rp[3] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[4]; acc += bp[4*2-6];
+ acc += bp[5*2-6]; rp[4] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[5]; acc += bp[4*2-5];
+ acc += bp[5*2-5]; rp[5] = (unsigned int)acc;
+
+ carry = (int)(acc>>32);
+ }
+#else
+ {
+ BN_ULONG t_d[BN_NIST_192_TOP];
+
+ nist_set_192(t_d, buf.bn, 0, 3, 3);
carry = (int)bn_add_words(r_d, r_d, t_d, BN_NIST_192_TOP);
- nist_set_192(t_d, buf, 4, 4, 0);
+ nist_set_192(t_d, buf.bn, 4, 4, 0);
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_192_TOP);
- nist_set_192(t_d, buf, 5, 5, 5)
+ nist_set_192(t_d, buf.bn, 5, 5, 5)
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_192_TOP);
-
+ }
+#endif
if (carry > 0)
carry = (int)bn_sub_words(r_d,r_d,_nist_p_192[carry-1],BN_NIST_192_TOP);
else
@@ -405,9 +449,10 @@ int BN_nist_mod_192(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
* 'tmp=result-modulus; if (!carry || !borrow) result=tmp;'
* this is what happens below, but without explicit if:-) a.
*/
- mask = 0-(size_t)bn_sub_words(c_d,r_d,_nist_p_192[0],BN_NIST_192_TOP);
- mask &= 0-(size_t)carry;
- res = (BN_ULONG *)(((size_t)c_d&~mask) | ((size_t)r_d&mask));
+ mask = 0-(PTR_SIZE_INT)bn_sub_words(c_d,r_d,_nist_p_192[0],BN_NIST_192_TOP);
+ mask &= 0-(PTR_SIZE_INT)carry;
+ res = (BN_ULONG *)
+ (((PTR_SIZE_INT)c_d&~mask) | ((PTR_SIZE_INT)r_d&mask));
nist_cp_bn(r_d, res, BN_NIST_192_TOP);
r->top = BN_NIST_192_TOP;
bn_correct_top(r);
@@ -434,12 +479,11 @@ int BN_nist_mod_224(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
int top = a->top, i;
int carry;
BN_ULONG *r_d, *a_d = a->d;
- BN_ULONG t_d[BN_NIST_224_TOP],
- buf[BN_NIST_224_TOP],
+ BN_ULONG buf[BN_NIST_224_TOP],
c_d[BN_NIST_224_TOP],
*res;
- size_t mask;
- union { bn_addsub_f f; size_t p; } u;
+ PTR_SIZE_INT mask;
+ union { bn_addsub_f f; PTR_SIZE_INT p; } u;
static const BIGNUM _bignum_nist_p_224_sqr = {
(BN_ULONG *)_nist_p_224_sqr,
sizeof(_nist_p_224_sqr)/sizeof(_nist_p_224_sqr[0]),
@@ -473,14 +517,54 @@ int BN_nist_mod_224(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
#if BN_BITS2==64
/* copy upper 256 bits of 448 bit number ... */
- nist_cp_bn_0(t_d, a_d + (BN_NIST_224_TOP-1), top - (BN_NIST_224_TOP-1), BN_NIST_224_TOP);
+ nist_cp_bn_0(c_d, a_d + (BN_NIST_224_TOP-1), top - (BN_NIST_224_TOP-1), BN_NIST_224_TOP);
/* ... and right shift by 32 to obtain upper 224 bits */
- nist_set_224(buf, t_d, 14, 13, 12, 11, 10, 9, 8);
+ nist_set_224(buf, c_d, 14, 13, 12, 11, 10, 9, 8);
/* truncate lower part to 224 bits too */
r_d[BN_NIST_224_TOP-1] &= BN_MASK2l;
#else
nist_cp_bn_0(buf, a_d + BN_NIST_224_TOP, top - BN_NIST_224_TOP, BN_NIST_224_TOP);
#endif
+
+#if defined(NIST_INT64) && BN_BITS2!=64
+ {
+ NIST_INT64 acc; /* accumulator */
+ unsigned int *rp=(unsigned int *)r_d;
+ const unsigned int *bp=(const unsigned int *)buf;
+
+ acc = rp[0]; acc -= bp[7-7];
+ acc -= bp[11-7]; rp[0] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[1]; acc -= bp[8-7];
+ acc -= bp[12-7]; rp[1] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[2]; acc -= bp[9-7];
+ acc -= bp[13-7]; rp[2] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[3]; acc += bp[7-7];
+ acc += bp[11-7];
+ acc -= bp[10-7]; rp[3] = (unsigned int)acc; acc>>= 32;
+
+ acc += rp[4]; acc += bp[8-7];
+ acc += bp[12-7];
+ acc -= bp[11-7]; rp[4] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[5]; acc += bp[9-7];
+ acc += bp[13-7];
+ acc -= bp[12-7]; rp[5] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[6]; acc += bp[10-7];
+ acc -= bp[13-7]; rp[6] = (unsigned int)acc;
+
+ carry = (int)(acc>>32);
+# if BN_BITS2==64
+ rp[7] = carry;
+# endif
+ }
+#else
+ {
+ BN_ULONG t_d[BN_NIST_224_TOP];
+
nist_set_224(t_d, buf, 10, 9, 8, 7, 0, 0, 0);
carry = (int)bn_add_words(r_d, r_d, t_d, BN_NIST_224_TOP);
nist_set_224(t_d, buf, 0, 13, 12, 11, 0, 0, 0);
@@ -493,6 +577,8 @@ int BN_nist_mod_224(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
#if BN_BITS2==64
carry = (int)(r_d[BN_NIST_224_TOP-1]>>32);
#endif
+ }
+#endif
u.f = bn_sub_words;
if (carry > 0)
{
@@ -510,16 +596,18 @@ int BN_nist_mod_224(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
* to be compared to the modulus and conditionally
* adjusted by *subtracting* the latter. */
carry = (int)bn_add_words(r_d,r_d,_nist_p_224[-carry-1],BN_NIST_224_TOP);
- mask = 0-(size_t)carry;
- u.p = ((size_t)bn_sub_words&mask) | ((size_t)bn_add_words&~mask);
+ mask = 0-(PTR_SIZE_INT)carry;
+ u.p = ((PTR_SIZE_INT)bn_sub_words&mask) |
+ ((PTR_SIZE_INT)bn_add_words&~mask);
}
else
carry = 1;
/* otherwise it's effectively same as in BN_nist_mod_192... */
- mask = 0-(size_t)(*u.f)(c_d,r_d,_nist_p_224[0],BN_NIST_224_TOP);
- mask &= 0-(size_t)carry;
- res = (BN_ULONG *)(((size_t)c_d&~mask) | ((size_t)r_d&mask));
+ mask = 0-(PTR_SIZE_INT)(*u.f)(c_d,r_d,_nist_p_224[0],BN_NIST_224_TOP);
+ mask &= 0-(PTR_SIZE_INT)carry;
+ res = (BN_ULONG *)(((PTR_SIZE_INT)c_d&~mask) |
+ ((PTR_SIZE_INT)r_d&mask));
nist_cp_bn(r_d, res, BN_NIST_224_TOP);
r->top = BN_NIST_224_TOP;
bn_correct_top(r);
@@ -545,12 +633,14 @@ int BN_nist_mod_256(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
int i, top = a->top;
int carry = 0;
register BN_ULONG *a_d = a->d, *r_d;
- BN_ULONG t_d[BN_NIST_256_TOP],
- buf[BN_NIST_256_TOP],
- c_d[BN_NIST_256_TOP],
+ union {
+ BN_ULONG bn[BN_NIST_256_TOP];
+ unsigned int ui[BN_NIST_256_TOP*sizeof(BN_ULONG)/sizeof(unsigned int)];
+ } buf;
+ BN_ULONG c_d[BN_NIST_256_TOP],
*res;
- size_t mask;
- union { bn_addsub_f f; size_t p; } u;
+ PTR_SIZE_INT mask;
+ union { bn_addsub_f f; PTR_SIZE_INT p; } u;
static const BIGNUM _bignum_nist_p_256_sqr = {
(BN_ULONG *)_nist_p_256_sqr,
sizeof(_nist_p_256_sqr)/sizeof(_nist_p_256_sqr[0]),
@@ -581,12 +671,87 @@ int BN_nist_mod_256(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
else
r_d = a_d;
- nist_cp_bn_0(buf, a_d + BN_NIST_256_TOP, top - BN_NIST_256_TOP, BN_NIST_256_TOP);
+ nist_cp_bn_0(buf.bn, a_d + BN_NIST_256_TOP, top - BN_NIST_256_TOP, BN_NIST_256_TOP);
+
+#if defined(NIST_INT64)
+ {
+ NIST_INT64 acc; /* accumulator */
+ unsigned int *rp=(unsigned int *)r_d;
+ const unsigned int *bp=(const unsigned int *)buf.ui;
+
+ acc = rp[0]; acc += bp[8-8];
+ acc += bp[9-8];
+ acc -= bp[11-8];
+ acc -= bp[12-8];
+ acc -= bp[13-8];
+ acc -= bp[14-8]; rp[0] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[1]; acc += bp[9-8];
+ acc += bp[10-8];
+ acc -= bp[12-8];
+ acc -= bp[13-8];
+ acc -= bp[14-8];
+ acc -= bp[15-8]; rp[1] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[2]; acc += bp[10-8];
+ acc += bp[11-8];
+ acc -= bp[13-8];
+ acc -= bp[14-8];
+ acc -= bp[15-8]; rp[2] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[3]; acc += bp[11-8];
+ acc += bp[11-8];
+ acc += bp[12-8];
+ acc += bp[12-8];
+ acc += bp[13-8];
+ acc -= bp[15-8];
+ acc -= bp[8-8];
+ acc -= bp[9-8]; rp[3] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[4]; acc += bp[12-8];
+ acc += bp[12-8];
+ acc += bp[13-8];
+ acc += bp[13-8];
+ acc += bp[14-8];
+ acc -= bp[9-8];
+ acc -= bp[10-8]; rp[4] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[5]; acc += bp[13-8];
+ acc += bp[13-8];
+ acc += bp[14-8];
+ acc += bp[14-8];
+ acc += bp[15-8];
+ acc -= bp[10-8];
+ acc -= bp[11-8]; rp[5] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[6]; acc += bp[14-8];
+ acc += bp[14-8];
+ acc += bp[15-8];
+ acc += bp[15-8];
+ acc += bp[14-8];
+ acc += bp[13-8];
+ acc -= bp[8-8];
+ acc -= bp[9-8]; rp[6] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[7]; acc += bp[15-8];
+ acc += bp[15-8];
+ acc += bp[15-8];
+ acc += bp[8 -8];
+ acc -= bp[10-8];
+ acc -= bp[11-8];
+ acc -= bp[12-8];
+ acc -= bp[13-8]; rp[7] = (unsigned int)acc;
+
+ carry = (int)(acc>>32);
+ }
+#else
+ {
+ BN_ULONG t_d[BN_NIST_256_TOP];
/*S1*/
- nist_set_256(t_d, buf, 15, 14, 13, 12, 11, 0, 0, 0);
+ nist_set_256(t_d, buf.bn, 15, 14, 13, 12, 11, 0, 0, 0);
/*S2*/
- nist_set_256(c_d, buf, 0, 15, 14, 13, 12, 0, 0, 0);
+ nist_set_256(c_d, buf.bn, 0, 15, 14, 13, 12, 0, 0, 0);
carry = (int)bn_add_words(t_d, t_d, c_d, BN_NIST_256_TOP);
/* left shift */
{
@@ -604,24 +769,26 @@ int BN_nist_mod_256(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
}
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_256_TOP);
/*S3*/
- nist_set_256(t_d, buf, 15, 14, 0, 0, 0, 10, 9, 8);
+ nist_set_256(t_d, buf.bn, 15, 14, 0, 0, 0, 10, 9, 8);
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_256_TOP);
/*S4*/
- nist_set_256(t_d, buf, 8, 13, 15, 14, 13, 11, 10, 9);
+ nist_set_256(t_d, buf.bn, 8, 13, 15, 14, 13, 11, 10, 9);
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_256_TOP);
/*D1*/
- nist_set_256(t_d, buf, 10, 8, 0, 0, 0, 13, 12, 11);
+ nist_set_256(t_d, buf.bn, 10, 8, 0, 0, 0, 13, 12, 11);
carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_256_TOP);
/*D2*/
- nist_set_256(t_d, buf, 11, 9, 0, 0, 15, 14, 13, 12);
+ nist_set_256(t_d, buf.bn, 11, 9, 0, 0, 15, 14, 13, 12);
carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_256_TOP);
/*D3*/
- nist_set_256(t_d, buf, 12, 0, 10, 9, 8, 15, 14, 13);
+ nist_set_256(t_d, buf.bn, 12, 0, 10, 9, 8, 15, 14, 13);
carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_256_TOP);
/*D4*/
- nist_set_256(t_d, buf, 13, 0, 11, 10, 9, 0, 15, 14);
+ nist_set_256(t_d, buf.bn, 13, 0, 11, 10, 9, 0, 15, 14);
carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_256_TOP);
+ }
+#endif
/* see BN_nist_mod_224 for explanation */
u.f = bn_sub_words;
if (carry > 0)
@@ -629,15 +796,17 @@ int BN_nist_mod_256(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
else if (carry < 0)
{
carry = (int)bn_add_words(r_d,r_d,_nist_p_256[-carry-1],BN_NIST_256_TOP);
- mask = 0-(size_t)carry;
- u.p = ((size_t)bn_sub_words&mask) | ((size_t)bn_add_words&~mask);
+ mask = 0-(PTR_SIZE_INT)carry;
+ u.p = ((PTR_SIZE_INT)bn_sub_words&mask) |
+ ((PTR_SIZE_INT)bn_add_words&~mask);
}
else
carry = 1;
- mask = 0-(size_t)(*u.f)(c_d,r_d,_nist_p_256[0],BN_NIST_256_TOP);
- mask &= 0-(size_t)carry;
- res = (BN_ULONG *)(((size_t)c_d&~mask) | ((size_t)r_d&mask));
+ mask = 0-(PTR_SIZE_INT)(*u.f)(c_d,r_d,_nist_p_256[0],BN_NIST_256_TOP);
+ mask &= 0-(PTR_SIZE_INT)carry;
+ res = (BN_ULONG *)(((PTR_SIZE_INT)c_d&~mask) |
+ ((PTR_SIZE_INT)r_d&mask));
nist_cp_bn(r_d, res, BN_NIST_256_TOP);
r->top = BN_NIST_256_TOP;
bn_correct_top(r);
@@ -667,12 +836,14 @@ int BN_nist_mod_384(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
int i, top = a->top;
int carry = 0;
register BN_ULONG *r_d, *a_d = a->d;
- BN_ULONG t_d[BN_NIST_384_TOP],
- buf[BN_NIST_384_TOP],
- c_d[BN_NIST_384_TOP],
+ union {
+ BN_ULONG bn[BN_NIST_384_TOP];
+ unsigned int ui[BN_NIST_384_TOP*sizeof(BN_ULONG)/sizeof(unsigned int)];
+ } buf;
+ BN_ULONG c_d[BN_NIST_384_TOP],
*res;
- size_t mask;
- union { bn_addsub_f f; size_t p; } u;
+ PTR_SIZE_INT mask;
+ union { bn_addsub_f f; PTR_SIZE_INT p; } u;
static const BIGNUM _bignum_nist_p_384_sqr = {
(BN_ULONG *)_nist_p_384_sqr,
sizeof(_nist_p_384_sqr)/sizeof(_nist_p_384_sqr[0]),
@@ -704,10 +875,100 @@ int BN_nist_mod_384(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
else
r_d = a_d;
- nist_cp_bn_0(buf, a_d + BN_NIST_384_TOP, top - BN_NIST_384_TOP, BN_NIST_384_TOP);
+ nist_cp_bn_0(buf.bn, a_d + BN_NIST_384_TOP, top - BN_NIST_384_TOP, BN_NIST_384_TOP);
+
+#if defined(NIST_INT64)
+ {
+ NIST_INT64 acc; /* accumulator */
+ unsigned int *rp=(unsigned int *)r_d;
+ const unsigned int *bp=(const unsigned int *)buf.ui;
+
+ acc = rp[0]; acc += bp[12-12];
+ acc += bp[21-12];
+ acc += bp[20-12];
+ acc -= bp[23-12]; rp[0] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[1]; acc += bp[13-12];
+ acc += bp[22-12];
+ acc += bp[23-12];
+ acc -= bp[12-12];
+ acc -= bp[20-12]; rp[1] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[2]; acc += bp[14-12];
+ acc += bp[23-12];
+ acc -= bp[13-12];
+ acc -= bp[21-12]; rp[2] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[3]; acc += bp[15-12];
+ acc += bp[12-12];
+ acc += bp[20-12];
+ acc += bp[21-12];
+ acc -= bp[14-12];
+ acc -= bp[22-12];
+ acc -= bp[23-12]; rp[3] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[4]; acc += bp[21-12];
+ acc += bp[21-12];
+ acc += bp[16-12];
+ acc += bp[13-12];
+ acc += bp[12-12];
+ acc += bp[20-12];
+ acc += bp[22-12];
+ acc -= bp[15-12];
+ acc -= bp[23-12];
+ acc -= bp[23-12]; rp[4] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[5]; acc += bp[22-12];
+ acc += bp[22-12];
+ acc += bp[17-12];
+ acc += bp[14-12];
+ acc += bp[13-12];
+ acc += bp[21-12];
+ acc += bp[23-12];
+ acc -= bp[16-12]; rp[5] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[6]; acc += bp[23-12];
+ acc += bp[23-12];
+ acc += bp[18-12];
+ acc += bp[15-12];
+ acc += bp[14-12];
+ acc += bp[22-12];
+ acc -= bp[17-12]; rp[6] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[7]; acc += bp[19-12];
+ acc += bp[16-12];
+ acc += bp[15-12];
+ acc += bp[23-12];
+ acc -= bp[18-12]; rp[7] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[8]; acc += bp[20-12];
+ acc += bp[17-12];
+ acc += bp[16-12];
+ acc -= bp[19-12]; rp[8] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[9]; acc += bp[21-12];
+ acc += bp[18-12];
+ acc += bp[17-12];
+ acc -= bp[20-12]; rp[9] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[10]; acc += bp[22-12];
+ acc += bp[19-12];
+ acc += bp[18-12];
+ acc -= bp[21-12]; rp[10] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[11]; acc += bp[23-12];
+ acc += bp[20-12];
+ acc += bp[19-12];
+ acc -= bp[22-12]; rp[11] = (unsigned int)acc;
+
+ carry = (int)(acc>>32);
+ }
+#else
+ {
+ BN_ULONG t_d[BN_NIST_384_TOP];
/*S1*/
- nist_set_256(t_d, buf, 0, 0, 0, 0, 0, 23-4, 22-4, 21-4);
+ nist_set_256(t_d, buf.bn, 0, 0, 0, 0, 0, 23-4, 22-4, 21-4);
/* left shift */
{
register BN_ULONG *ap,t,c;
@@ -724,29 +985,31 @@ int BN_nist_mod_384(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
carry = (int)bn_add_words(r_d+(128/BN_BITS2), r_d+(128/BN_BITS2),
t_d, BN_NIST_256_TOP);
/*S2 */
- carry += (int)bn_add_words(r_d, r_d, buf, BN_NIST_384_TOP);
+ carry += (int)bn_add_words(r_d, r_d, buf.bn, BN_NIST_384_TOP);
/*S3*/
- nist_set_384(t_d,buf,20,19,18,17,16,15,14,13,12,23,22,21);
+ nist_set_384(t_d,buf.bn,20,19,18,17,16,15,14,13,12,23,22,21);
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_384_TOP);
/*S4*/
- nist_set_384(t_d,buf,19,18,17,16,15,14,13,12,20,0,23,0);
+ nist_set_384(t_d,buf.bn,19,18,17,16,15,14,13,12,20,0,23,0);
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_384_TOP);
/*S5*/
- nist_set_384(t_d, buf,0,0,0,0,23,22,21,20,0,0,0,0);
+ nist_set_384(t_d, buf.bn,0,0,0,0,23,22,21,20,0,0,0,0);
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_384_TOP);
/*S6*/
- nist_set_384(t_d,buf,0,0,0,0,0,0,23,22,21,0,0,20);
+ nist_set_384(t_d,buf.bn,0,0,0,0,0,0,23,22,21,0,0,20);
carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_384_TOP);
/*D1*/
- nist_set_384(t_d,buf,22,21,20,19,18,17,16,15,14,13,12,23);
+ nist_set_384(t_d,buf.bn,22,21,20,19,18,17,16,15,14,13,12,23);
carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_384_TOP);
/*D2*/
- nist_set_384(t_d,buf,0,0,0,0,0,0,0,23,22,21,20,0);
+ nist_set_384(t_d,buf.bn,0,0,0,0,0,0,0,23,22,21,20,0);
carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_384_TOP);
/*D3*/
- nist_set_384(t_d,buf,0,0,0,0,0,0,0,23,23,0,0,0);
+ nist_set_384(t_d,buf.bn,0,0,0,0,0,0,0,23,23,0,0,0);
carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_384_TOP);
+ }
+#endif
/* see BN_nist_mod_224 for explanation */
u.f = bn_sub_words;
if (carry > 0)
@@ -754,15 +1017,17 @@ int BN_nist_mod_384(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
else if (carry < 0)
{
carry = (int)bn_add_words(r_d,r_d,_nist_p_384[-carry-1],BN_NIST_384_TOP);
- mask = 0-(size_t)carry;
- u.p = ((size_t)bn_sub_words&mask) | ((size_t)bn_add_words&~mask);
+ mask = 0-(PTR_SIZE_INT)carry;
+ u.p = ((PTR_SIZE_INT)bn_sub_words&mask) |
+ ((PTR_SIZE_INT)bn_add_words&~mask);
}
else
carry = 1;
- mask = 0-(size_t)(*u.f)(c_d,r_d,_nist_p_384[0],BN_NIST_384_TOP);
- mask &= 0-(size_t)carry;
- res = (BN_ULONG *)(((size_t)c_d&~mask) | ((size_t)r_d&mask));
+ mask = 0-(PTR_SIZE_INT)(*u.f)(c_d,r_d,_nist_p_384[0],BN_NIST_384_TOP);
+ mask &= 0-(PTR_SIZE_INT)carry;
+ res = (BN_ULONG *)(((PTR_SIZE_INT)c_d&~mask) |
+ ((PTR_SIZE_INT)r_d&mask));
nist_cp_bn(r_d, res, BN_NIST_384_TOP);
r->top = BN_NIST_384_TOP;
bn_correct_top(r);
@@ -781,7 +1046,7 @@ int BN_nist_mod_521(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
BN_ULONG *r_d, *a_d = a->d,
t_d[BN_NIST_521_TOP],
val,tmp,*res;
- size_t mask;
+ PTR_SIZE_INT mask;
static const BIGNUM _bignum_nist_p_521_sqr = {
(BN_ULONG *)_nist_p_521_sqr,
sizeof(_nist_p_521_sqr)/sizeof(_nist_p_521_sqr[0]),
@@ -826,8 +1091,9 @@ int BN_nist_mod_521(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
r_d[i] &= BN_NIST_521_TOP_MASK;
bn_add_words(r_d,r_d,t_d,BN_NIST_521_TOP);
- mask = 0-(size_t)bn_sub_words(t_d,r_d,_nist_p_521,BN_NIST_521_TOP);
- res = (BN_ULONG *)(((size_t)t_d&~mask) | ((size_t)r_d&mask));
+ mask = 0-(PTR_SIZE_INT)bn_sub_words(t_d,r_d,_nist_p_521,BN_NIST_521_TOP);
+ res = (BN_ULONG *)(((PTR_SIZE_INT)t_d&~mask) |
+ ((PTR_SIZE_INT)r_d&mask));
nist_cp_bn(r_d,res,BN_NIST_521_TOP);
r->top = BN_NIST_521_TOP;
bn_correct_top(r);
diff --git a/crypto/bn/bn_opt.c b/crypto/bn/bn_opt.c
deleted file mode 100644
index 21cbb38f622b..000000000000
--- a/crypto/bn/bn_opt.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/* crypto/bn/bn_opt.c */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to. The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code. The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * "This product includes cryptographic software written by
- * Eric Young (eay@cryptsoft.com)"
- * The word 'cryptographic' can be left out if the rouines from the library
- * being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- * the apps directory (application code) you must include an acknowledgement:
- * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed. i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef BN_DEBUG
-# undef NDEBUG /* avoid conflicting definitions */
-# define NDEBUG
-#endif
-
-#include <assert.h>
-#include <limits.h>
-#include <stdio.h>
-#include "cryptlib.h"
-#include "bn_lcl.h"
-
-char *BN_options(void)
- {
- static int init=0;
- static char data[16];
-
- if (!init)
- {
- init++;
-#ifdef BN_LLONG
- BIO_snprintf(data,sizeof data,"bn(%d,%d)",
- (int)sizeof(BN_ULLONG)*8,(int)sizeof(BN_ULONG)*8);
-#else
- BIO_snprintf(data,sizeof data,"bn(%d,%d)",
- (int)sizeof(BN_ULONG)*8,(int)sizeof(BN_ULONG)*8);
-#endif
- }
- return(data);
- }
diff --git a/crypto/bn/bn_print.c b/crypto/bn/bn_print.c
index 810dde34e15c..1743b6a7e212 100644
--- a/crypto/bn/bn_print.c
+++ b/crypto/bn/bn_print.c
@@ -294,6 +294,27 @@ err:
return(0);
}
+int BN_asc2bn(BIGNUM **bn, const char *a)
+ {
+ const char *p = a;
+ if (*p == '-')
+ p++;
+
+ if (p[0] == '0' && (p[1] == 'X' || p[1] == 'x'))
+ {
+ if (!BN_hex2bn(bn, p + 2))
+ return 0;
+ }
+ else
+ {
+ if (!BN_dec2bn(bn, p))
+ return 0;
+ }
+ if (*a == '-')
+ (*bn)->neg = 1;
+ return 1;
+ }
+
#ifndef OPENSSL_NO_BIO
#ifndef OPENSSL_NO_FP_API
int BN_print_fp(FILE *fp, const BIGNUM *a)
@@ -336,3 +357,22 @@ end:
return(ret);
}
#endif
+
+char *BN_options(void)
+ {
+ static int init=0;
+ static char data[16];
+
+ if (!init)
+ {
+ init++;
+#ifdef BN_LLONG
+ BIO_snprintf(data,sizeof data,"bn(%d,%d)",
+ (int)sizeof(BN_ULLONG)*8,(int)sizeof(BN_ULONG)*8);
+#else
+ BIO_snprintf(data,sizeof data,"bn(%d,%d)",
+ (int)sizeof(BN_ULONG)*8,(int)sizeof(BN_ULONG)*8);
+#endif
+ }
+ return(data);
+ }
diff --git a/crypto/bn/bn_shift.c b/crypto/bn/bn_shift.c
index c4d301afc467..a6fca2c424f0 100644
--- a/crypto/bn/bn_shift.c
+++ b/crypto/bn/bn_shift.c
@@ -99,7 +99,7 @@ int BN_lshift1(BIGNUM *r, const BIGNUM *a)
int BN_rshift1(BIGNUM *r, const BIGNUM *a)
{
BN_ULONG *ap,*rp,t,c;
- int i;
+ int i,j;
bn_check_top(r);
bn_check_top(a);
@@ -109,22 +109,25 @@ int BN_rshift1(BIGNUM *r, const BIGNUM *a)
BN_zero(r);
return(1);
}
+ i = a->top;
+ ap= a->d;
+ j = i-(ap[i-1]==1);
if (a != r)
{
- if (bn_wexpand(r,a->top) == NULL) return(0);
- r->top=a->top;
+ if (bn_wexpand(r,j) == NULL) return(0);
r->neg=a->neg;
}
- ap=a->d;
rp=r->d;
- c=0;
- for (i=a->top-1; i>=0; i--)
+ t=ap[--i];
+ c=(t&1)?BN_TBIT:0;
+ if (t>>=1) rp[i]=t;
+ while (i>0)
{
- t=ap[i];
+ t=ap[--i];
rp[i]=((t>>1)&BN_MASK2)|c;
c=(t&1)?BN_TBIT:0;
}
- bn_correct_top(r);
+ r->top=j;
bn_check_top(r);
return(1);
}
@@ -182,10 +185,11 @@ int BN_rshift(BIGNUM *r, const BIGNUM *a, int n)
BN_zero(r);
return(1);
}
+ i = (BN_num_bits(a)-n+(BN_BITS2-1))/BN_BITS2;
if (r != a)
{
r->neg=a->neg;
- if (bn_wexpand(r,a->top-nw+1) == NULL) return(0);
+ if (bn_wexpand(r,i) == NULL) return(0);
}
else
{
@@ -196,7 +200,7 @@ int BN_rshift(BIGNUM *r, const BIGNUM *a, int n)
f= &(a->d[nw]);
t=r->d;
j=a->top-nw;
- r->top=j;
+ r->top=i;
if (rb == 0)
{
@@ -212,9 +216,8 @@ int BN_rshift(BIGNUM *r, const BIGNUM *a, int n)
l= *(f++);
*(t++) =(tmp|(l<<lb))&BN_MASK2;
}
- *(t++) =(l>>rb)&BN_MASK2;
+ if ((l = (l>>rb)&BN_MASK2)) *(t) = l;
}
- bn_correct_top(r);
bn_check_top(r);
return(1);
}
diff --git a/crypto/bn/bntest.c b/crypto/bn/bntest.c
index d41daac5fedb..06f5954acc39 100644
--- a/crypto/bn/bntest.c
+++ b/crypto/bn/bntest.c
@@ -262,7 +262,7 @@ int main(int argc, char *argv[])
message(out,"BN_mod_sqrt");
if (!test_sqrt(out,ctx)) goto err;
(void)BIO_flush(out);
-
+#ifndef OPENSSL_NO_EC2M
message(out,"BN_GF2m_add");
if (!test_gf2m_add(out)) goto err;
(void)BIO_flush(out);
@@ -298,7 +298,7 @@ int main(int argc, char *argv[])
message(out,"BN_GF2m_mod_solve_quad");
if (!test_gf2m_mod_solve_quad(out,ctx)) goto err;
(void)BIO_flush(out);
-
+#endif
BN_CTX_free(ctx);
BIO_free(out);
@@ -486,7 +486,7 @@ static void print_word(BIO *bp,BN_ULONG w)
return;
}
#endif
- BIO_printf(bp,"%lX",w);
+ BIO_printf(bp,BN_HEX_FMT1,w);
}
int test_div_word(BIO *bp)
@@ -732,6 +732,8 @@ int test_mont(BIO *bp, BN_CTX *ctx)
BN_init(&n);
mont=BN_MONT_CTX_new();
+ if (mont == NULL)
+ return 0;
BN_bntest_rand(&a,100,0,0); /**/
BN_bntest_rand(&b,100,0,0); /**/
@@ -1059,7 +1061,7 @@ int test_exp(BIO *bp, BN_CTX *ctx)
BN_free(one);
return(1);
}
-
+#ifndef OPENSSL_NO_EC2M
int test_gf2m_add(BIO *bp)
{
BIGNUM a,b,c;
@@ -1116,8 +1118,8 @@ int test_gf2m_mod(BIO *bp)
{
BIGNUM *a,*b[2],*c,*d,*e;
int i, j, ret = 0;
- unsigned int p0[] = {163,7,6,3,0};
- unsigned int p1[] = {193,15,0};
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
a=BN_new();
b[0]=BN_new();
@@ -1174,8 +1176,8 @@ int test_gf2m_mod_mul(BIO *bp,BN_CTX *ctx)
{
BIGNUM *a,*b[2],*c,*d,*e,*f,*g,*h;
int i, j, ret = 0;
- unsigned int p0[] = {163,7,6,3,0};
- unsigned int p1[] = {193,15,0};
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
a=BN_new();
b[0]=BN_new();
@@ -1245,8 +1247,8 @@ int test_gf2m_mod_sqr(BIO *bp,BN_CTX *ctx)
{
BIGNUM *a,*b[2],*c,*d;
int i, j, ret = 0;
- unsigned int p0[] = {163,7,6,3,0};
- unsigned int p1[] = {193,15,0};
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
a=BN_new();
b[0]=BN_new();
@@ -1304,8 +1306,8 @@ int test_gf2m_mod_inv(BIO *bp,BN_CTX *ctx)
{
BIGNUM *a,*b[2],*c,*d;
int i, j, ret = 0;
- unsigned int p0[] = {163,7,6,3,0};
- unsigned int p1[] = {193,15,0};
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
a=BN_new();
b[0]=BN_new();
@@ -1359,8 +1361,8 @@ int test_gf2m_mod_div(BIO *bp,BN_CTX *ctx)
{
BIGNUM *a,*b[2],*c,*d,*e,*f;
int i, j, ret = 0;
- unsigned int p0[] = {163,7,6,3,0};
- unsigned int p1[] = {193,15,0};
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
a=BN_new();
b[0]=BN_new();
@@ -1422,8 +1424,8 @@ int test_gf2m_mod_exp(BIO *bp,BN_CTX *ctx)
{
BIGNUM *a,*b[2],*c,*d,*e,*f;
int i, j, ret = 0;
- unsigned int p0[] = {163,7,6,3,0};
- unsigned int p1[] = {193,15,0};
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
a=BN_new();
b[0]=BN_new();
@@ -1493,8 +1495,8 @@ int test_gf2m_mod_sqrt(BIO *bp,BN_CTX *ctx)
{
BIGNUM *a,*b[2],*c,*d,*e,*f;
int i, j, ret = 0;
- unsigned int p0[] = {163,7,6,3,0};
- unsigned int p1[] = {193,15,0};
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
a=BN_new();
b[0]=BN_new();
@@ -1552,8 +1554,8 @@ int test_gf2m_mod_solve_quad(BIO *bp,BN_CTX *ctx)
{
BIGNUM *a,*b[2],*c,*d,*e;
int i, j, s = 0, t, ret = 0;
- unsigned int p0[] = {163,7,6,3,0};
- unsigned int p1[] = {193,15,0};
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
a=BN_new();
b[0]=BN_new();
@@ -1634,7 +1636,7 @@ int test_gf2m_mod_solve_quad(BIO *bp,BN_CTX *ctx)
BN_free(e);
return ret;
}
-
+#endif
static int genprime_cb(int p, int n, BN_GENCB *arg)
{
char c='*';
diff --git a/crypto/bn/exptest.c b/crypto/bn/exptest.c
index f598a07cf5c9..074a8e882a8e 100644
--- a/crypto/bn/exptest.c
+++ b/crypto/bn/exptest.c
@@ -163,7 +163,7 @@ int main(int argc, char *argv[])
{
if (BN_cmp(r_simple,r_mont) != 0)
printf("\nsimple and mont results differ\n");
- if (BN_cmp(r_simple,r_mont) != 0)
+ if (BN_cmp(r_simple,r_mont_const) != 0)
printf("\nsimple and mont const time results differ\n");
if (BN_cmp(r_simple,r_recp) != 0)
printf("\nsimple and recp results differ\n");
@@ -187,7 +187,7 @@ int main(int argc, char *argv[])
BN_free(b);
BN_free(m);
BN_CTX_free(ctx);
- ERR_remove_state(0);
+ ERR_remove_thread_state(NULL);
CRYPTO_mem_leaks(out);
BIO_free(out);
printf(" done\n");