aboutsummaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorJung-uk Kim <jkim@FreeBSD.org>2015-12-03 17:22:58 +0000
committerJung-uk Kim <jkim@FreeBSD.org>2015-12-03 17:22:58 +0000
commit737d7e8d3945c206c037e139055821aa0c64bb8e (patch)
treeb0284af4e4144e27eb9f39e88c53868060774b16 /crypto
parente9fcefce9bb70f20c272a996443928c5f6ab8cd8 (diff)
downloadsrc-737d7e8d3945c206c037e139055821aa0c64bb8e.tar.gz
src-737d7e8d3945c206c037e139055821aa0c64bb8e.zip
Import OpenSSL 1.0.2e.vendor/openssl/1.0.2e
Notes
Notes: svn path=/vendor-crypto/openssl/dist/; revision=291707 svn path=/vendor-crypto/openssl/1.0.2e/; revision=291708; tag=vendor/openssl/1.0.2e
Diffstat (limited to 'crypto')
-rwxr-xr-xcrypto/aes/asm/aes-586.pl6
-rwxr-xr-xcrypto/aes/asm/aesni-mb-x86_64.pl2
-rwxr-xr-xcrypto/aes/asm/aesni-sha1-x86_64.pl2
-rwxr-xr-xcrypto/aes/asm/aesni-sha256-x86_64.pl9
-rwxr-xr-xcrypto/aes/asm/aesni-x86.pl2
-rwxr-xr-xcrypto/aes/asm/vpaes-ppc.pl198
-rw-r--r--crypto/asn1/asn1_par.c10
-rw-r--r--crypto/asn1/d2i_pr.c15
-rw-r--r--crypto/asn1/tasn_dec.c11
-rw-r--r--crypto/asn1/x_bignum.c5
-rw-r--r--crypto/asn1/x_pubkey.c5
-rw-r--r--crypto/asn1/x_x509.c9
-rw-r--r--crypto/asn1/x_x509a.c7
-rw-r--r--crypto/bio/b_dump.c1
-rw-r--r--crypto/bio/bss_file.c13
-rwxr-xr-xcrypto/bn/asm/armv4-gf2m.pl10
-rw-r--r--crypto/bn/asm/ia64.S4
-rwxr-xr-xcrypto/bn/asm/ppc64-mont.pl174
-rwxr-xr-xcrypto/bn/asm/rsaz-x86_64.pl2
-rwxr-xr-xcrypto/bn/asm/s390x-gf2m.pl6
-rwxr-xr-xcrypto/bn/asm/s390x.S109
-rwxr-xr-xcrypto/bn/asm/x86-gf2m.pl16
-rw-r--r--crypto/bn/asm/x86_64-gcc.c2
-rwxr-xr-xcrypto/bn/asm/x86_64-gf2m.pl16
-rwxr-xr-xcrypto/bn/asm/x86_64-mont.pl5
-rwxr-xr-xcrypto/bn/asm/x86_64-mont5.pl27
-rw-r--r--crypto/bn/bn_exp.c7
-rw-r--r--crypto/bn/bn_gcd.c2
-rw-r--r--crypto/bn/bn_gf2m.c11
-rw-r--r--crypto/bn/bn_mont.c9
-rw-r--r--crypto/bn/bn_recp.c4
-rw-r--r--crypto/bn/bn_x931p.c7
-rw-r--r--crypto/bn/bntest.c74
-rw-r--r--crypto/bn/rsaz_exp.h68
-rw-r--r--crypto/buffer/buf_str.c21
-rw-r--r--crypto/buffer/buffer.h6
-rw-r--r--crypto/cms/cms_enc.c2
-rw-r--r--crypto/cms/cms_pwri.c3
-rw-r--r--crypto/cms/cms_sd.c2
-rw-r--r--crypto/cms/cms_smime.c2
-rw-r--r--crypto/comp/c_zlib.c3
-rw-r--r--crypto/conf/conf_def.c3
-rw-r--r--crypto/conf/conf_sap.c1
-rw-r--r--crypto/cryptlib.c26
-rw-r--r--crypto/dh/dh.h2
-rw-r--r--crypto/dh/dhtest.c4
-rw-r--r--crypto/dsa/dsa_ameth.c1
-rw-r--r--crypto/dsa/dsa_gen.c19
-rw-r--r--crypto/ec/Makefile2
-rwxr-xr-xcrypto/ec/asm/ecp_nistz256-x86_64.pl2
-rw-r--r--crypto/ec/ec.h2
-rw-r--r--crypto/ec/ec_asn1.c8
-rw-r--r--crypto/ec/ec_key.c12
-rw-r--r--crypto/ecdsa/ecdsa.h2
-rw-r--r--crypto/ecdsa/ecs_lib.c2
-rw-r--r--crypto/engine/eng_cryptodev.c7
-rw-r--r--crypto/engine/eng_list.c1
-rw-r--r--crypto/evp/e_aes_cbc_hmac_sha256.c13
-rw-r--r--crypto/evp/e_des3.c2
-rw-r--r--crypto/evp/encode.c198
-rw-r--r--crypto/evp/evp_key.c6
-rw-r--r--crypto/evp/evp_lib.c36
-rw-r--r--crypto/evp/evp_pbe.c16
-rw-r--r--crypto/evp/p_lib.c2
-rw-r--r--crypto/evp/pmeth_gn.c9
-rw-r--r--crypto/hmac/hm_ameth.c9
-rw-r--r--crypto/jpake/jpake.c4
-rw-r--r--crypto/mem_clr.c4
-rwxr-xr-xcrypto/modes/asm/aesni-gcm-x86_64.pl2
-rwxr-xr-xcrypto/modes/asm/ghash-armv4.pl13
-rwxr-xr-xcrypto/modes/asm/ghash-sparcv9.pl18
-rwxr-xr-xcrypto/modes/asm/ghash-x86.pl2
-rwxr-xr-xcrypto/modes/asm/ghash-x86_64.pl10
-rwxr-xr-xcrypto/modes/asm/ghashp8-ppc.pl12
-rwxr-xr-xcrypto/modes/asm/ghashv8-armx.pl22
-rw-r--r--crypto/modes/wrap128.c4
-rwxr-xr-xcrypto/ocsp/ocsp_lib.c6
-rw-r--r--crypto/ocsp/ocsp_prn.c3
-rw-r--r--crypto/opensslconf.h2
-rw-r--r--crypto/opensslconf.h.in2
-rw-r--r--crypto/opensslv.h6
-rw-r--r--crypto/pem/pem_info.c6
-rw-r--r--crypto/pem/pvkfmt.c10
-rwxr-xr-xcrypto/perlasm/ppc-xlate.pl20
-rw-r--r--crypto/pkcs12/p12_add.c27
-rw-r--r--crypto/pkcs12/p12_crpt.c3
-rw-r--r--crypto/pkcs12/p12_mutl.c4
-rw-r--r--crypto/pkcs7/pk7_doit.c3
-rw-r--r--crypto/pkcs7/pk7_smime.c25
-rw-r--r--crypto/ppccap.c2
-rwxr-xr-xcrypto/rc4/asm/rc4-x86_64.pl2
-rw-r--r--crypto/rsa/rsa_ameth.c2
-rw-r--r--crypto/rsa/rsa_gen.c4
-rw-r--r--crypto/rsa/rsa_sign.c11
-rw-r--r--crypto/rsa/rsa_test.c32
-rw-r--r--crypto/sha/asm/sha1-586.pl4
-rwxr-xr-xcrypto/sha/asm/sha1-mb-x86_64.pl2
-rwxr-xr-xcrypto/sha/asm/sha1-x86_64.pl2
-rwxr-xr-xcrypto/sha/asm/sha256-586.pl2
-rwxr-xr-xcrypto/sha/asm/sha256-mb-x86_64.pl2
-rwxr-xr-xcrypto/sha/asm/sha512-586.pl2
-rwxr-xr-xcrypto/sha/asm/sha512-parisc.pl2
-rwxr-xr-xcrypto/sha/asm/sha512-x86_64.pl2
-rw-r--r--crypto/sparccpuid.S2
-rw-r--r--crypto/sparcv9cap.c47
-rw-r--r--crypto/srp/srp_vfy.c34
-rw-r--r--crypto/ts/ts_rsp_verify.c2
-rwxr-xr-xcrypto/whrlpool/asm/wp-mmx.pl2
-rw-r--r--crypto/x509/x509_lu.c2
-rw-r--r--crypto/x509/x509_vfy.c11
-rw-r--r--crypto/x509/x509_vpm.c15
-rw-r--r--crypto/x509v3/v3_cpols.c4
-rw-r--r--crypto/x509v3/v3_ncons.c2
-rw-r--r--crypto/x509v3/v3_pci.c2
-rw-r--r--crypto/x509v3/v3_pcia.c2
-rw-r--r--crypto/x509v3/v3_purp.c19
-rw-r--r--crypto/x509v3/v3_scts.c4
-rw-r--r--crypto/x509v3/v3_utl.c10
118 files changed, 1098 insertions, 611 deletions
diff --git a/crypto/aes/asm/aes-586.pl b/crypto/aes/asm/aes-586.pl
index 451d0e0ed1e2..60286ecb9645 100755
--- a/crypto/aes/asm/aes-586.pl
+++ b/crypto/aes/asm/aes-586.pl
@@ -45,7 +45,7 @@
# the undertaken effort was that it appeared that in tight IA-32
# register window little-endian flavor could achieve slightly higher
# Instruction Level Parallelism, and it indeed resulted in up to 15%
-# better performance on most recent µ-archs...
+# better performance on most recent µ-archs...
#
# Third version adds AES_cbc_encrypt implementation, which resulted in
# up to 40% performance imrovement of CBC benchmark results. 40% was
@@ -224,7 +224,7 @@ sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
$speed_limit=512; # chunks smaller than $speed_limit are
# processed with compact routine in CBC mode
$small_footprint=1; # $small_footprint=1 code is ~5% slower [on
- # recent µ-archs], but ~5 times smaller!
+ # recent µ-archs], but ~5 times smaller!
# I favor compact code to minimize cache
# contention and in hope to "collect" 5% back
# in real-life applications...
@@ -565,7 +565,7 @@ sub enctransform()
# Performance is not actually extraordinary in comparison to pure
# x86 code. In particular encrypt performance is virtually the same.
# Decrypt performance on the other hand is 15-20% better on newer
-# µ-archs [but we're thankful for *any* improvement here], and ~50%
+# µ-archs [but we're thankful for *any* improvement here], and ~50%
# better on PIII:-) And additionally on the pros side this code
# eliminates redundant references to stack and thus relieves/
# minimizes the pressure on the memory bus.
diff --git a/crypto/aes/asm/aesni-mb-x86_64.pl b/crypto/aes/asm/aesni-mb-x86_64.pl
index 33b1aed3c0b4..5a100fa8983b 100755
--- a/crypto/aes/asm/aesni-mb-x86_64.pl
+++ b/crypto/aes/asm/aesni-mb-x86_64.pl
@@ -63,7 +63,7 @@ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
$avx = ($1>=10) + ($1>=11);
}
-if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9]\.[0-9]+)/) {
$avx = ($2>=3.0) + ($2>3.0);
}
diff --git a/crypto/aes/asm/aesni-sha1-x86_64.pl b/crypto/aes/asm/aesni-sha1-x86_64.pl
index 97992adca7c3..c803cdebc112 100755
--- a/crypto/aes/asm/aesni-sha1-x86_64.pl
+++ b/crypto/aes/asm/aesni-sha1-x86_64.pl
@@ -94,7 +94,7 @@ $avx=1 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
$avx=1 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
`ml64 2>&1` =~ /Version ([0-9]+)\./ &&
$1>=10);
-$avx=1 if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/ && $2>=3.0);
+$avx=1 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9]\.[0-9]+)/ && $2>=3.0);
$shaext=1; ### set to zero if compiling for 1.0.1
diff --git a/crypto/aes/asm/aesni-sha256-x86_64.pl b/crypto/aes/asm/aesni-sha256-x86_64.pl
index 19b0433b3b1b..bfe29268c781 100755
--- a/crypto/aes/asm/aesni-sha256-x86_64.pl
+++ b/crypto/aes/asm/aesni-sha256-x86_64.pl
@@ -59,7 +59,7 @@ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
$avx = ($1>=10) + ($1>=12);
}
-if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9]\.[0-9]+)/) {
$avx = ($2>=3.0) + ($2>3.0);
}
@@ -139,11 +139,8 @@ $code.=<<___ if ($avx>1);
je ${func}_avx2
___
$code.=<<___;
- and \$`1<<30`,%eax # mask "Intel CPU" bit
- and \$`1<<28|1<<9`,%r10d # mask AVX+SSSE3 bits
- or %eax,%r10d
- cmp \$`1<<28|1<<9|1<<30`,%r10d
- je ${func}_avx
+ and \$`1<<28`,%r10d # check for AVX
+ jnz ${func}_avx
ud2
___
}
diff --git a/crypto/aes/asm/aesni-x86.pl b/crypto/aes/asm/aesni-x86.pl
index f67df8cf13da..9b2e37aafb1a 100755
--- a/crypto/aes/asm/aesni-x86.pl
+++ b/crypto/aes/asm/aesni-x86.pl
@@ -88,7 +88,7 @@ $inout3="xmm5"; $in1="xmm5";
$inout4="xmm6"; $in0="xmm6";
$inout5="xmm7"; $ivec="xmm7";
-# AESNI extenstion
+# AESNI extension
sub aeskeygenassist
{ my($dst,$src,$imm)=@_;
if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
diff --git a/crypto/aes/asm/vpaes-ppc.pl b/crypto/aes/asm/vpaes-ppc.pl
index 7fda60ed9e4d..1759ae9dcff2 100755
--- a/crypto/aes/asm/vpaes-ppc.pl
+++ b/crypto/aes/asm/vpaes-ppc.pl
@@ -337,24 +337,27 @@ Lenc_entry:
addi $inp, $inp, 15 # 15 is not a typo
?lvsr $outperm, 0, $out
?lvsl $keyperm, 0, $key # prepare for unaligned access
- vnor $outmask, v7, v7 # 0xff..ff
lvx $inptail, 0, $inp # redundant in aligned case
- ?vperm $outmask, v7, $outmask, $outperm
- lvx $outhead, 0, $out
?vperm v0, v0, $inptail, $inpperm
bl _vpaes_encrypt_core
+ andi. r8, $out, 15
+ li r9, 16
+ beq Lenc_out_aligned
+
vperm v0, v0, v0, $outperm # rotate right/left
- vsel v1, $outhead, v0, $outmask
- vmr $outhead, v0
- stvx v1, 0, $out
- addi $out, $out, 15 # 15 is not a typo
- ########
+ mtctr r9
+Lenc_out_unaligned:
+ stvebx v0, 0, $out
+ addi $out, $out, 1
+ bdnz Lenc_out_unaligned
+ b Lenc_done
- lvx v1, 0, $out # redundant in aligned case
- vsel v1, $outhead, v1, $outmask
- stvx v1, 0, $out
+.align 4
+Lenc_out_aligned:
+ stvx v0, 0, $out
+Lenc_done:
li r10,`15+6*$SIZE_T`
li r11,`31+6*$SIZE_T`
@@ -566,24 +569,27 @@ Ldec_entry:
addi $inp, $inp, 15 # 15 is not a typo
?lvsr $outperm, 0, $out
?lvsl $keyperm, 0, $key
- vnor $outmask, v7, v7 # 0xff..ff
lvx $inptail, 0, $inp # redundant in aligned case
- ?vperm $outmask, v7, $outmask, $outperm
- lvx $outhead, 0, $out
?vperm v0, v0, $inptail, $inpperm
bl _vpaes_decrypt_core
+ andi. r8, $out, 15
+ li r9, 16
+ beq Ldec_out_aligned
+
vperm v0, v0, v0, $outperm # rotate right/left
- vsel v1, $outhead, v0, $outmask
- vmr $outhead, v0
- stvx v1, 0, $out
- addi $out, $out, 15 # 15 is not a typo
- ########
+ mtctr r9
+Ldec_out_unaligned:
+ stvebx v0, 0, $out
+ addi $out, $out, 1
+ bdnz Ldec_out_unaligned
+ b Ldec_done
- lvx v1, 0, $out # redundant in aligned case
- vsel v1, $outhead, v1, $outmask
- stvx v1, 0, $out
+.align 4
+Ldec_out_aligned:
+ stvx v0, 0, $out
+Ldec_done:
li r10,`15+6*$SIZE_T`
li r11,`31+6*$SIZE_T`
@@ -658,11 +664,11 @@ Ldec_entry:
$PUSH r0, `$FRAME+$SIZE_T*2+$LRSAVE`($sp)
and r30, r5, r9 # copy length&-16
+ andi. r9, $out, 15 # is $out aligned?
mr r5, r6 # copy pointer to key
mr r31, r7 # copy pointer to iv
- blt Lcbc_abort
- cmpwi r8, 0 # test direction
li r6, -1
+ mcrf cr1, cr0 # put aside $out alignment flag
mr r7, r12 # copy vrsave
mtspr 256, r6 # preserve all AltiVec registers
@@ -672,6 +678,7 @@ Ldec_entry:
lvx v25, r9, r31
?vperm v24, v24, v25, $inpperm
+ cmpwi r8, 0 # test direction
neg r8, $inp # prepare for unaligned access
vxor v7, v7, v7
?lvsl $keyperm, 0, $key
@@ -681,13 +688,37 @@ Ldec_entry:
lvx $inptail, 0, $inp
?vperm $outmask, v7, $outmask, $outperm
addi $inp, $inp, 15 # 15 is not a typo
- lvx $outhead, 0, $out
beq Lcbc_decrypt
bl _vpaes_encrypt_preheat
li r0, 16
+ beq cr1, Lcbc_enc_loop # $out is aligned
+
+ vmr v0, $inptail
+ lvx $inptail, 0, $inp
+ addi $inp, $inp, 16
+ ?vperm v0, v0, $inptail, $inpperm
+ vxor v0, v0, v24 # ^= iv
+
+ bl _vpaes_encrypt_core
+
+ andi. r8, $out, 15
+ vmr v24, v0 # put aside iv
+ sub r9, $out, r8
+ vperm $outhead, v0, v0, $outperm # rotate right/left
+
+Lcbc_enc_head:
+ stvebx $outhead, r8, r9
+ cmpwi r8, 15
+ addi r8, r8, 1
+ bne Lcbc_enc_head
+
+ sub. r30, r30, r0 # len -= 16
+ addi $out, $out, 16
+ beq Lcbc_unaligned_done
+
Lcbc_enc_loop:
vmr v0, $inptail
lvx $inptail, 0, $inp
@@ -713,6 +744,32 @@ Lcbc_decrypt:
bl _vpaes_decrypt_preheat
li r0, 16
+ beq cr1, Lcbc_dec_loop # $out is aligned
+
+ vmr v0, $inptail
+ lvx $inptail, 0, $inp
+ addi $inp, $inp, 16
+ ?vperm v0, v0, $inptail, $inpperm
+ vmr v25, v0 # put aside input
+
+ bl _vpaes_decrypt_core
+
+ andi. r8, $out, 15
+ vxor v0, v0, v24 # ^= iv
+ vmr v24, v25
+ sub r9, $out, r8
+ vperm $outhead, v0, v0, $outperm # rotate right/left
+
+Lcbc_dec_head:
+ stvebx $outhead, r8, r9
+ cmpwi r8, 15
+ addi r8, r8, 1
+ bne Lcbc_dec_head
+
+ sub. r30, r30, r0 # len -= 16
+ addi $out, $out, 16
+ beq Lcbc_unaligned_done
+
Lcbc_dec_loop:
vmr v0, $inptail
lvx $inptail, 0, $inp
@@ -733,23 +790,29 @@ Lcbc_dec_loop:
bne Lcbc_dec_loop
Lcbc_done:
- addi $out, $out, -1
- lvx v1, 0, $out # redundant in aligned case
- vsel v1, $outhead, v1, $outmask
- stvx v1, 0, $out
-
+ beq cr1, Lcbc_write_iv # $out is aligned
+
+Lcbc_unaligned_done:
+ andi. r8, $out, 15
+ sub $out, $out, r8
+ li r9, 0
+Lcbc_tail:
+ stvebx $outhead, r9, $out
+ addi r9, r9, 1
+ cmpw r9, r8
+ bne Lcbc_tail
+
+Lcbc_write_iv:
neg r8, r31 # write [potentially unaligned] iv
+ li r10, 4
?lvsl $outperm, 0, r8
- li r6, 15
- vnor $outmask, v7, v7 # 0xff..ff
- ?vperm $outmask, v7, $outmask, $outperm
- lvx $outhead, 0, r31
+ li r11, 8
+ li r12, 12
vperm v24, v24, v24, $outperm # rotate right/left
- vsel v0, $outhead, v24, $outmask
- lvx v1, r6, r31
- stvx v0, 0, r31
- vsel v1, v24, v1, $outmask
- stvx v1, r6, r31
+ stvewx v24, 0, r31 # ivp is at least 32-bit aligned
+ stvewx v24, r10, r31
+ stvewx v24, r11, r31
+ stvewx v24, r12, r31
mtspr 256, r7 # restore vrsave
li r10,`15+6*$SIZE_T`
@@ -872,18 +935,21 @@ _vpaes_schedule_core:
# encrypting, output zeroth round key after transform
li r8, 0x30 # mov \$0x30,%r8d
- addi r10, r12, 0x80 # lea .Lk_sr(%rip),%r10
+ li r9, 4
+ li r10, 8
+ li r11, 12
?lvsr $outperm, 0, $out # prepare for unaligned access
vnor $outmask, v9, v9 # 0xff..ff
- lvx $outhead, 0, $out
?vperm $outmask, v9, $outmask, $outperm
#stvx v0, 0, $out # vmovdqu %xmm0, (%rdx)
- vperm v1, v0, v0, $outperm # rotate right/left
- vsel v2, $outhead, v1, $outmask
- vmr $outhead, v1
- stvx v2, 0, $out
+ vperm $outhead, v0, v0, $outperm # rotate right/left
+ stvewx $outhead, 0, $out # some are superfluous
+ stvewx $outhead, r9, $out
+ stvewx $outhead, r10, $out
+ addi r10, r12, 0x80 # lea .Lk_sr(%rip),%r10
+ stvewx $outhead, r11, $out
b Lschedule_go
Lschedule_am_decrypting:
@@ -893,20 +959,24 @@ Lschedule_am_decrypting:
addi r10, r12, 0x80 # lea .Lk_sr(%rip),%r10
# decrypting, output zeroth round key after shiftrows
lvx v1, r8, r10 # vmovdqa (%r8,%r10), %xmm1
+ li r9, 4
+ li r10, 8
+ li r11, 12
vperm v4, v3, v3, v1 # vpshufb %xmm1, %xmm3, %xmm3
neg r0, $out # prepare for unaligned access
?lvsl $outperm, 0, r0
- addi $out, $out, 15 # 15 is not typo
vnor $outmask, v9, v9 # 0xff..ff
- lvx $outhead, 0, $out
?vperm $outmask, $outmask, v9, $outperm
#stvx v4, 0, $out # vmovdqu %xmm3, (%rdx)
- vperm v4, v4, v4, $outperm # rotate right/left
- vsel v2, $outhead, v4, $outmask
- vmr $outhead, v4
- stvx v2, 0, $out
+ vperm $outhead, v4, v4, $outperm # rotate right/left
+ stvewx $outhead, 0, $out # some are superfluous
+ stvewx $outhead, r9, $out
+ stvewx $outhead, r10, $out
+ addi r10, r12, 0x80 # lea .Lk_sr(%rip),%r10
+ stvewx $outhead, r11, $out
+ addi $out, $out, 15 # 15 is not typo
xori r8, r8, 0x30 # xor \$0x30, %r8
Lschedule_go:
@@ -1038,14 +1108,15 @@ Lschedule_mangle_last:
#stvx v0, r0, $out # vmovdqu %xmm0, (%rdx) # save last key
vperm v0, v0, v0, $outperm # rotate right/left
+ li r10, 4
vsel v2, $outhead, v0, $outmask
- vmr $outhead, v0
+ li r11, 8
stvx v2, 0, $out
-
- addi $out, $out, 15 # 15 is not typo
- lvx v1, 0, $out # redundant in aligned case
- vsel v1, $outhead, v1, $outmask
- stvx v1, 0, $out
+ li r12, 12
+ stvewx v0, 0, $out # some (or all) are redundant
+ stvewx v0, r10, $out
+ stvewx v0, r11, $out
+ stvewx v0, r12, $out
b Lschedule_mangle_done
.align 4
@@ -1057,15 +1128,18 @@ Lschedule_mangle_last_dec:
bl _vpaes_schedule_transform # output transform
#stvx v0, r0, $out # vmovdqu %xmm0, (%rdx) # save last key
+ addi r9, $out, -15 # -15 is not typo
vperm v0, v0, v0, $outperm # rotate right/left
+ li r10, 4
vsel v2, $outhead, v0, $outmask
- vmr $outhead, v0
+ li r11, 8
stvx v2, 0, $out
+ li r12, 12
+ stvewx v0, 0, r9 # some (or all) are redundant
+ stvewx v0, r10, r9
+ stvewx v0, r11, r9
+ stvewx v0, r12, r9
- addi $out, $out, -15 # -15 is not typo
- lvx v1, 0, $out # redundant in aligned case
- vsel v1, $outhead, v1, $outmask
- stvx v1, 0, $out
Lschedule_mangle_done:
mtlr r7
diff --git a/crypto/asn1/asn1_par.c b/crypto/asn1/asn1_par.c
index a5d2da10bb72..0ca985a2be1e 100644
--- a/crypto/asn1/asn1_par.c
+++ b/crypto/asn1/asn1_par.c
@@ -62,6 +62,10 @@
#include <openssl/objects.h>
#include <openssl/asn1.h>
+#ifndef ASN1_PARSE_MAXDEPTH
+#define ASN1_PARSE_MAXDEPTH 128
+#endif
+
static int asn1_print_info(BIO *bp, int tag, int xclass, int constructed,
int indent);
static int asn1_parse2(BIO *bp, const unsigned char **pp, long length,
@@ -128,6 +132,12 @@ static int asn1_parse2(BIO *bp, const unsigned char **pp, long length,
#else
dump_indent = 6; /* Because we know BIO_dump_indent() */
#endif
+
+ if (depth > ASN1_PARSE_MAXDEPTH) {
+ BIO_puts(bp, "BAD RECURSION DEPTH\n");
+ return 0;
+ }
+
p = *pp;
tot = p + length;
op = p - 1;
diff --git a/crypto/asn1/d2i_pr.c b/crypto/asn1/d2i_pr.c
index c96da091d39c..d21829af192f 100644
--- a/crypto/asn1/d2i_pr.c
+++ b/crypto/asn1/d2i_pr.c
@@ -72,6 +72,7 @@ EVP_PKEY *d2i_PrivateKey(int type, EVP_PKEY **a, const unsigned char **pp,
long length)
{
EVP_PKEY *ret;
+ const unsigned char *p = *pp;
if ((a == NULL) || (*a == NULL)) {
if ((ret = EVP_PKEY_new()) == NULL) {
@@ -94,21 +95,23 @@ EVP_PKEY *d2i_PrivateKey(int type, EVP_PKEY **a, const unsigned char **pp,
}
if (!ret->ameth->old_priv_decode ||
- !ret->ameth->old_priv_decode(ret, pp, length)) {
+ !ret->ameth->old_priv_decode(ret, &p, length)) {
if (ret->ameth->priv_decode) {
PKCS8_PRIV_KEY_INFO *p8 = NULL;
- p8 = d2i_PKCS8_PRIV_KEY_INFO(NULL, pp, length);
+ p8 = d2i_PKCS8_PRIV_KEY_INFO(NULL, &p, length);
if (!p8)
goto err;
EVP_PKEY_free(ret);
ret = EVP_PKCS82PKEY(p8);
PKCS8_PRIV_KEY_INFO_free(p8);
-
+ if (ret == NULL)
+ goto err;
} else {
ASN1err(ASN1_F_D2I_PRIVATEKEY, ERR_R_ASN1_LIB);
goto err;
}
}
+ *pp = p;
if (a != NULL)
(*a) = ret;
return (ret);
@@ -136,6 +139,7 @@ EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **a, const unsigned char **pp,
* input is surrounded by an ASN1 SEQUENCE.
*/
inkey = d2i_ASN1_SEQUENCE_ANY(NULL, &p, length);
+ p = *pp;
/*
* Since we only need to discern "traditional format" RSA and DSA keys we
* can just count the elements.
@@ -146,7 +150,7 @@ EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **a, const unsigned char **pp,
keytype = EVP_PKEY_EC;
else if (sk_ASN1_TYPE_num(inkey) == 3) { /* This seems to be PKCS8, not
* traditional format */
- PKCS8_PRIV_KEY_INFO *p8 = d2i_PKCS8_PRIV_KEY_INFO(NULL, pp, length);
+ PKCS8_PRIV_KEY_INFO *p8 = d2i_PKCS8_PRIV_KEY_INFO(NULL, &p, length);
EVP_PKEY *ret;
sk_ASN1_TYPE_pop_free(inkey, ASN1_TYPE_free);
@@ -157,6 +161,9 @@ EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **a, const unsigned char **pp,
}
ret = EVP_PKCS82PKEY(p8);
PKCS8_PRIV_KEY_INFO_free(p8);
+ if (ret == NULL)
+ return NULL;
+ *pp = p;
if (a) {
*a = ret;
}
diff --git a/crypto/asn1/tasn_dec.c b/crypto/asn1/tasn_dec.c
index 7fd336a40226..9256049d1588 100644
--- a/crypto/asn1/tasn_dec.c
+++ b/crypto/asn1/tasn_dec.c
@@ -180,6 +180,8 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len,
int otag;
int ret = 0;
ASN1_VALUE **pchptr, *ptmpval;
+ int combine = aclass & ASN1_TFLG_COMBINE;
+ aclass &= ~ASN1_TFLG_COMBINE;
if (!pval)
return 0;
if (aux && aux->asn1_cb)
@@ -350,9 +352,9 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len,
}
asn1_set_choice_selector(pval, i, it);
- *in = p;
if (asn1_cb && !asn1_cb(ASN1_OP_D2I_POST, pval, it, NULL))
goto auxerr;
+ *in = p;
return 1;
case ASN1_ITYPE_NDEF_SEQUENCE:
@@ -489,9 +491,9 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len,
/* Save encoding */
if (!asn1_enc_save(pval, *in, p - *in, it))
goto auxerr;
- *in = p;
if (asn1_cb && !asn1_cb(ASN1_OP_D2I_POST, pval, it, NULL))
goto auxerr;
+ *in = p;
return 1;
default:
@@ -500,7 +502,8 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len,
auxerr:
ASN1err(ASN1_F_ASN1_ITEM_EX_D2I, ASN1_R_AUX_ERROR);
err:
- ASN1_item_ex_free(pval, it);
+ if (combine == 0)
+ ASN1_item_ex_free(pval, it);
if (errtt)
ERR_add_error_data(4, "Field=", errtt->field_name,
", Type=", it->sname);
@@ -689,7 +692,7 @@ static int asn1_template_noexp_d2i(ASN1_VALUE **val,
} else {
/* Nothing special */
ret = ASN1_item_ex_d2i(val, &p, len, ASN1_ITEM_ptr(tt->item),
- -1, 0, opt, ctx);
+ -1, tt->flags & ASN1_TFLG_COMBINE, opt, ctx);
if (!ret) {
ASN1err(ASN1_F_ASN1_TEMPLATE_NOEXP_D2I, ERR_R_NESTED_ASN1_ERROR);
goto err;
diff --git a/crypto/asn1/x_bignum.c b/crypto/asn1/x_bignum.c
index a5a403c26e10..eaf046639d6a 100644
--- a/crypto/asn1/x_bignum.c
+++ b/crypto/asn1/x_bignum.c
@@ -141,8 +141,9 @@ static int bn_c2i(ASN1_VALUE **pval, const unsigned char *cont, int len,
int utype, char *free_cont, const ASN1_ITEM *it)
{
BIGNUM *bn;
- if (!*pval)
- bn_new(pval, it);
+
+ if (*pval == NULL && !bn_new(pval, it))
+ return 0;
bn = (BIGNUM *)*pval;
if (!BN_bin2bn(cont, len, bn)) {
bn_free(pval, it);
diff --git a/crypto/asn1/x_pubkey.c b/crypto/asn1/x_pubkey.c
index 4b682018c2de..6c57a7971c9d 100644
--- a/crypto/asn1/x_pubkey.c
+++ b/crypto/asn1/x_pubkey.c
@@ -188,13 +188,16 @@ EVP_PKEY *d2i_PUBKEY(EVP_PKEY **a, const unsigned char **pp, long length)
{
X509_PUBKEY *xpk;
EVP_PKEY *pktmp;
- xpk = d2i_X509_PUBKEY(NULL, pp, length);
+ const unsigned char *q;
+ q = *pp;
+ xpk = d2i_X509_PUBKEY(NULL, &q, length);
if (!xpk)
return NULL;
pktmp = X509_PUBKEY_get(xpk);
X509_PUBKEY_free(xpk);
if (!pktmp)
return NULL;
+ *pp = q;
if (a) {
EVP_PKEY_free(*a);
*a = pktmp;
diff --git a/crypto/asn1/x_x509.c b/crypto/asn1/x_x509.c
index 5f266a26b4c2..e2cac836943d 100644
--- a/crypto/asn1/x_x509.c
+++ b/crypto/asn1/x_x509.c
@@ -180,16 +180,15 @@ X509 *d2i_X509_AUX(X509 **a, const unsigned char **pp, long length)
if (!a || *a == NULL) {
freeret = 1;
}
- ret = d2i_X509(a, pp, length);
+ ret = d2i_X509(a, &q, length);
/* If certificate unreadable then forget it */
if (!ret)
return NULL;
/* update length */
- length -= *pp - q;
- if (!length)
- return ret;
- if (!d2i_X509_CERT_AUX(&ret->aux, pp, length))
+ length -= q - *pp;
+ if (length > 0 && !d2i_X509_CERT_AUX(&ret->aux, &q, length))
goto err;
+ *pp = q;
return ret;
err:
if (freeret) {
diff --git a/crypto/asn1/x_x509a.c b/crypto/asn1/x_x509a.c
index 76bbc1370ff7..ad93592a714a 100644
--- a/crypto/asn1/x_x509a.c
+++ b/crypto/asn1/x_x509a.c
@@ -163,10 +163,13 @@ int X509_add1_reject_object(X509 *x, ASN1_OBJECT *obj)
if (!(objtmp = OBJ_dup(obj)))
return 0;
if (!(aux = aux_get(x)))
- return 0;
+ goto err;
if (!aux->reject && !(aux->reject = sk_ASN1_OBJECT_new_null()))
- return 0;
+ goto err;
return sk_ASN1_OBJECT_push(aux->reject, objtmp);
+ err:
+ ASN1_OBJECT_free(objtmp);
+ return 0;
}
void X509_trust_clear(X509 *x)
diff --git a/crypto/bio/b_dump.c b/crypto/bio/b_dump.c
index ed8e521449a4..ccf0e287c4e8 100644
--- a/crypto/bio/b_dump.c
+++ b/crypto/bio/b_dump.c
@@ -104,7 +104,6 @@ int BIO_dump_indent_cb(int (*cb) (const void *data, size_t len, void *u),
if ((rows * dump_width) < len)
rows++;
for (i = 0; i < rows; i++) {
- buf[0] = '\0'; /* start with empty string */
BUF_strlcpy(buf, str, sizeof buf);
BIO_snprintf(tmp, sizeof tmp, "%04x - ", i * dump_width);
BUF_strlcat(buf, tmp, sizeof buf);
diff --git a/crypto/bio/bss_file.c b/crypto/bio/bss_file.c
index d7f15b0699c9..bfba93e62bbd 100644
--- a/crypto/bio/bss_file.c
+++ b/crypto/bio/bss_file.c
@@ -115,9 +115,8 @@ static BIO_METHOD methods_filep = {
NULL,
};
-BIO *BIO_new_file(const char *filename, const char *mode)
+static FILE *file_fopen(const char *filename, const char *mode)
{
- BIO *ret;
FILE *file = NULL;
# if defined(_WIN32) && defined(CP_UTF8)
@@ -164,6 +163,14 @@ BIO *BIO_new_file(const char *filename, const char *mode)
# else
file = fopen(filename, mode);
# endif
+ return (file);
+}
+
+BIO *BIO_new_file(const char *filename, const char *mode)
+{
+ BIO *ret;
+ FILE *file = file_fopen(filename, mode);
+
if (file == NULL) {
SYSerr(SYS_F_FOPEN, get_last_sys_error());
ERR_add_error_data(5, "fopen('", filename, "','", mode, "')");
@@ -386,7 +393,7 @@ static long MS_CALLBACK file_ctrl(BIO *b, int cmd, long num, void *ptr)
else
strcat(p, "t");
# endif
- fp = fopen(ptr, p);
+ fp = file_fopen(ptr, p);
if (fp == NULL) {
SYSerr(SYS_F_FOPEN, get_last_sys_error());
ERR_add_error_data(5, "fopen('", ptr, "','", p, "')");
diff --git a/crypto/bn/asm/armv4-gf2m.pl b/crypto/bn/asm/armv4-gf2m.pl
index 8f529c95cf05..72381a77240c 100755
--- a/crypto/bn/asm/armv4-gf2m.pl
+++ b/crypto/bn/asm/armv4-gf2m.pl
@@ -27,7 +27,7 @@
# referred below, which improves ECDH and ECDSA verify benchmarks
# by 18-40%.
#
-# Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software
+# Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software
# Polynomial Multiplication on ARM Processors using the NEON Engine.
#
# http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf
@@ -136,7 +136,7 @@ ___
################
# void bn_GF2m_mul_2x2(BN_ULONG *r,
# BN_ULONG a1,BN_ULONG a0,
-# BN_ULONG b1,BN_ULONG b0); # r[3..0]=a1a0·b1b0
+# BN_ULONG b1,BN_ULONG b0); # r[3..0]=a1a0·b1b0
{
$code.=<<___;
.global bn_GF2m_mul_2x2
@@ -159,7 +159,7 @@ $code.=<<___;
mov $mask,#7<<2
sub sp,sp,#32 @ allocate tab[8]
- bl mul_1x1_ialu @ a1·b1
+ bl mul_1x1_ialu @ a1·b1
str $lo,[$ret,#8]
str $hi,[$ret,#12]
@@ -169,13 +169,13 @@ $code.=<<___;
eor r2,r2,$a
eor $b,$b,r3
eor $a,$a,r2
- bl mul_1x1_ialu @ a0·b0
+ bl mul_1x1_ialu @ a0·b0
str $lo,[$ret]
str $hi,[$ret,#4]
eor $a,$a,r2
eor $b,$b,r3
- bl mul_1x1_ialu @ (a1+a0)·(b1+b0)
+ bl mul_1x1_ialu @ (a1+a0)·(b1+b0)
___
@r=map("r$_",(6..9));
$code.=<<___;
diff --git a/crypto/bn/asm/ia64.S b/crypto/bn/asm/ia64.S
index 951abc53ea5b..a9a42abfc302 100644
--- a/crypto/bn/asm/ia64.S
+++ b/crypto/bn/asm/ia64.S
@@ -422,7 +422,7 @@ bn_mul_add_words:
// This loop spins in 3*(n+10) ticks on Itanium and in 2*(n+10) on
// Itanium 2. Yes, unlike previous versions it scales:-) Previous
-// version was peforming *all* additions in IALU and was starving
+// version was performing *all* additions in IALU and was starving
// for those even on Itanium 2. In this version one addition is
// moved to FPU and is folded with multiplication. This is at cost
// of propogating the result from previous call to this subroutine
@@ -568,7 +568,7 @@ bn_sqr_comba8:
// I've estimated this routine to run in ~120 ticks, but in reality
// (i.e. according to ar.itc) it takes ~160 ticks. Are those extra
// cycles consumed for instructions fetch? Or did I misinterpret some
-// clause in Itanium µ-architecture manual? Comments are welcomed and
+// clause in Itanium µ-architecture manual? Comments are welcomed and
// highly appreciated.
//
// On Itanium 2 it takes ~190 ticks. This is because of stalls on
diff --git a/crypto/bn/asm/ppc64-mont.pl b/crypto/bn/asm/ppc64-mont.pl
index 68e3733e3f79..9e3c12d788e5 100755
--- a/crypto/bn/asm/ppc64-mont.pl
+++ b/crypto/bn/asm/ppc64-mont.pl
@@ -94,6 +94,8 @@ if ($flavour =~ /32/) {
$POP= "ld";
} else { die "nonsense $flavour"; }
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? 4 : 0;
+
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
@@ -294,12 +296,12 @@ $code.=<<___ if ($SIZE_T==8);
extrdi $t0,$a0,32,32 ; lwz $t0,4($ap)
extrdi $t1,$a0,32,0 ; lwz $t1,0($ap)
- lwz $t2,12($ap) ; load a[1] as 32-bit word pair
- lwz $t3,8($ap)
- lwz $t4,4($np) ; load n[0] as 32-bit word pair
- lwz $t5,0($np)
- lwz $t6,12($np) ; load n[1] as 32-bit word pair
- lwz $t7,8($np)
+ lwz $t2,`12^$LITTLE_ENDIAN`($ap) ; load a[1] as 32-bit word pair
+ lwz $t3,`8^$LITTLE_ENDIAN`($ap)
+ lwz $t4,`4^$LITTLE_ENDIAN`($np) ; load n[0] as 32-bit word pair
+ lwz $t5,`0^$LITTLE_ENDIAN`($np)
+ lwz $t6,`12^$LITTLE_ENDIAN`($np) ; load n[1] as 32-bit word pair
+ lwz $t7,`8^$LITTLE_ENDIAN`($np)
___
$code.=<<___ if ($SIZE_T==4);
lwz $a0,0($ap) ; pull ap[0,1] value
@@ -463,14 +465,14 @@ $code.=<<___;
L1st:
___
$code.=<<___ if ($SIZE_T==8);
- lwz $t0,4($ap) ; load a[j] as 32-bit word pair
- lwz $t1,0($ap)
- lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair
- lwz $t3,8($ap)
- lwz $t4,4($np) ; load n[j] as 32-bit word pair
- lwz $t5,0($np)
- lwz $t6,12($np) ; load n[j+1] as 32-bit word pair
- lwz $t7,8($np)
+ lwz $t0,`4^$LITTLE_ENDIAN`($ap) ; load a[j] as 32-bit word pair
+ lwz $t1,`0^$LITTLE_ENDIAN`($ap)
+ lwz $t2,`12^$LITTLE_ENDIAN`($ap) ; load a[j+1] as 32-bit word pair
+ lwz $t3,`8^$LITTLE_ENDIAN`($ap)
+ lwz $t4,`4^$LITTLE_ENDIAN`($np) ; load n[j] as 32-bit word pair
+ lwz $t5,`0^$LITTLE_ENDIAN`($np)
+ lwz $t6,`12^$LITTLE_ENDIAN`($np) ; load n[j+1] as 32-bit word pair
+ lwz $t7,`8^$LITTLE_ENDIAN`($np)
___
$code.=<<___ if ($SIZE_T==4);
lwz $t0,0($ap) ; load a[j..j+3] as 32-bit word pairs
@@ -505,14 +507,14 @@ $code.=<<___;
___
} else {
$code.=<<___;
- lwz $t1,`$FRAME+0`($sp)
- lwz $t0,`$FRAME+4`($sp)
- lwz $t3,`$FRAME+8`($sp)
- lwz $t2,`$FRAME+12`($sp)
- lwz $t5,`$FRAME+16`($sp)
- lwz $t4,`$FRAME+20`($sp)
- lwz $t7,`$FRAME+24`($sp)
- lwz $t6,`$FRAME+28`($sp)
+ lwz $t1,`$FRAME+0^$LITTLE_ENDIAN`($sp)
+ lwz $t0,`$FRAME+4^$LITTLE_ENDIAN`($sp)
+ lwz $t3,`$FRAME+8^$LITTLE_ENDIAN`($sp)
+ lwz $t2,`$FRAME+12^$LITTLE_ENDIAN`($sp)
+ lwz $t5,`$FRAME+16^$LITTLE_ENDIAN`($sp)
+ lwz $t4,`$FRAME+20^$LITTLE_ENDIAN`($sp)
+ lwz $t7,`$FRAME+24^$LITTLE_ENDIAN`($sp)
+ lwz $t6,`$FRAME+28^$LITTLE_ENDIAN`($sp)
___
}
$code.=<<___;
@@ -651,8 +653,8 @@ $code.=<<___;
fmadd $T1a,$N1,$na,$T1a
fmadd $T1b,$N1,$nb,$T1b
- lwz $t3,`$FRAME+32`($sp) ; permuted $t1
- lwz $t2,`$FRAME+36`($sp) ; permuted $t0
+ lwz $t3,`$FRAME+32^$LITTLE_ENDIAN`($sp) ; permuted $t1
+ lwz $t2,`$FRAME+36^$LITTLE_ENDIAN`($sp) ; permuted $t0
addc $t4,$t4,$carry
adde $t5,$t5,$c1
srwi $carry,$t4,16
@@ -673,8 +675,8 @@ $code.=<<___;
fmadd $T1a,$N0,$nc,$T1a
fmadd $T1b,$N0,$nd,$T1b
- lwz $t7,`$FRAME+40`($sp) ; permuted $t3
- lwz $t6,`$FRAME+44`($sp) ; permuted $t2
+ lwz $t7,`$FRAME+40^$LITTLE_ENDIAN`($sp) ; permuted $t3
+ lwz $t6,`$FRAME+44^$LITTLE_ENDIAN`($sp) ; permuted $t2
addc $t2,$t2,$carry
adde $t3,$t3,$c1
srwi $carry,$t2,16
@@ -686,8 +688,8 @@ $code.=<<___;
insrwi $carry,$t3,16,0
fmadd $T3a,$N2,$nc,$T3a
fmadd $T3b,$N2,$nd,$T3b
- lwz $t1,`$FRAME+48`($sp) ; permuted $t5
- lwz $t0,`$FRAME+52`($sp) ; permuted $t4
+ lwz $t1,`$FRAME+48^$LITTLE_ENDIAN`($sp) ; permuted $t5
+ lwz $t0,`$FRAME+52^$LITTLE_ENDIAN`($sp) ; permuted $t4
addc $t6,$t6,$carry
adde $t7,$t7,$c1
srwi $carry,$t6,16
@@ -699,8 +701,8 @@ $code.=<<___;
fctid $T0a,$T0a
fctid $T0b,$T0b
- lwz $t5,`$FRAME+56`($sp) ; permuted $t7
- lwz $t4,`$FRAME+60`($sp) ; permuted $t6
+ lwz $t5,`$FRAME+56^$LITTLE_ENDIAN`($sp) ; permuted $t7
+ lwz $t4,`$FRAME+60^$LITTLE_ENDIAN`($sp) ; permuted $t6
addc $t0,$t0,$carry
adde $t1,$t1,$c1
srwi $carry,$t0,16
@@ -787,14 +789,14 @@ $code.=<<___;
___
} else {
$code.=<<___;
- lwz $t1,`$FRAME+0`($sp)
- lwz $t0,`$FRAME+4`($sp)
- lwz $t3,`$FRAME+8`($sp)
- lwz $t2,`$FRAME+12`($sp)
- lwz $t5,`$FRAME+16`($sp)
- lwz $t4,`$FRAME+20`($sp)
- lwz $t7,`$FRAME+24`($sp)
- lwz $t6,`$FRAME+28`($sp)
+ lwz $t1,`$FRAME+0^$LITTLE_ENDIAN`($sp)
+ lwz $t0,`$FRAME+4^$LITTLE_ENDIAN`($sp)
+ lwz $t3,`$FRAME+8^$LITTLE_ENDIAN`($sp)
+ lwz $t2,`$FRAME+12^$LITTLE_ENDIAN`($sp)
+ lwz $t5,`$FRAME+16^$LITTLE_ENDIAN`($sp)
+ lwz $t4,`$FRAME+20^$LITTLE_ENDIAN`($sp)
+ lwz $t7,`$FRAME+24^$LITTLE_ENDIAN`($sp)
+ lwz $t6,`$FRAME+28^$LITTLE_ENDIAN`($sp)
stfd $dota,`$FRAME+64`($sp)
stfd $dotb,`$FRAME+72`($sp)
@@ -823,14 +825,14 @@ $code.=<<___;
stw $t0,12($tp) ; tp[j-1]
stw $t4,8($tp)
- lwz $t3,`$FRAME+32`($sp) ; permuted $t1
- lwz $t2,`$FRAME+36`($sp) ; permuted $t0
- lwz $t7,`$FRAME+40`($sp) ; permuted $t3
- lwz $t6,`$FRAME+44`($sp) ; permuted $t2
- lwz $t1,`$FRAME+48`($sp) ; permuted $t5
- lwz $t0,`$FRAME+52`($sp) ; permuted $t4
- lwz $t5,`$FRAME+56`($sp) ; permuted $t7
- lwz $t4,`$FRAME+60`($sp) ; permuted $t6
+ lwz $t3,`$FRAME+32^$LITTLE_ENDIAN`($sp) ; permuted $t1
+ lwz $t2,`$FRAME+36^$LITTLE_ENDIAN`($sp) ; permuted $t0
+ lwz $t7,`$FRAME+40^$LITTLE_ENDIAN`($sp) ; permuted $t3
+ lwz $t6,`$FRAME+44^$LITTLE_ENDIAN`($sp) ; permuted $t2
+ lwz $t1,`$FRAME+48^$LITTLE_ENDIAN`($sp) ; permuted $t5
+ lwz $t0,`$FRAME+52^$LITTLE_ENDIAN`($sp) ; permuted $t4
+ lwz $t5,`$FRAME+56^$LITTLE_ENDIAN`($sp) ; permuted $t7
+ lwz $t4,`$FRAME+60^$LITTLE_ENDIAN`($sp) ; permuted $t6
addc $t2,$t2,$carry
adde $t3,$t3,$c1
@@ -857,10 +859,10 @@ $code.=<<___;
stw $t2,20($tp) ; tp[j]
stwu $t0,16($tp)
- lwz $t7,`$FRAME+64`($sp)
- lwz $t6,`$FRAME+68`($sp)
- lwz $t5,`$FRAME+72`($sp)
- lwz $t4,`$FRAME+76`($sp)
+ lwz $t7,`$FRAME+64^$LITTLE_ENDIAN`($sp)
+ lwz $t6,`$FRAME+68^$LITTLE_ENDIAN`($sp)
+ lwz $t5,`$FRAME+72^$LITTLE_ENDIAN`($sp)
+ lwz $t4,`$FRAME+76^$LITTLE_ENDIAN`($sp)
addc $t6,$t6,$carry
adde $t7,$t7,$c1
@@ -1165,23 +1167,23 @@ ___
$code.=<<___;
fmadd $T1a,$N1,$na,$T1a
fmadd $T1b,$N1,$nb,$T1b
- lwz $t1,`$FRAME+0`($sp)
- lwz $t0,`$FRAME+4`($sp)
+ lwz $t1,`$FRAME+0^$LITTLE_ENDIAN`($sp)
+ lwz $t0,`$FRAME+4^$LITTLE_ENDIAN`($sp)
fmadd $T2a,$N2,$na,$T2a
fmadd $T2b,$N2,$nb,$T2b
- lwz $t3,`$FRAME+8`($sp)
- lwz $t2,`$FRAME+12`($sp)
+ lwz $t3,`$FRAME+8^$LITTLE_ENDIAN`($sp)
+ lwz $t2,`$FRAME+12^$LITTLE_ENDIAN`($sp)
fmadd $T3a,$N3,$na,$T3a
fmadd $T3b,$N3,$nb,$T3b
- lwz $t5,`$FRAME+16`($sp)
- lwz $t4,`$FRAME+20`($sp)
+ lwz $t5,`$FRAME+16^$LITTLE_ENDIAN`($sp)
+ lwz $t4,`$FRAME+20^$LITTLE_ENDIAN`($sp)
addc $t0,$t0,$carry
adde $t1,$t1,$c1
srwi $carry,$t0,16
fmadd $T0a,$N0,$na,$T0a
fmadd $T0b,$N0,$nb,$T0b
- lwz $t7,`$FRAME+24`($sp)
- lwz $t6,`$FRAME+28`($sp)
+ lwz $t7,`$FRAME+24^$LITTLE_ENDIAN`($sp)
+ lwz $t6,`$FRAME+28^$LITTLE_ENDIAN`($sp)
srwi $c1,$t1,16
insrwi $carry,$t1,16,0
@@ -1218,8 +1220,8 @@ $code.=<<___;
fctid $T1a,$T1a
addc $t0,$t0,$t2
adde $t4,$t4,$t3
- lwz $t3,`$FRAME+32`($sp) ; permuted $t1
- lwz $t2,`$FRAME+36`($sp) ; permuted $t0
+ lwz $t3,`$FRAME+32^$LITTLE_ENDIAN`($sp) ; permuted $t1
+ lwz $t2,`$FRAME+36^$LITTLE_ENDIAN`($sp) ; permuted $t0
fctid $T1b,$T1b
addze $carry,$carry
addze $c1,$c1
@@ -1229,19 +1231,19 @@ $code.=<<___;
addc $t2,$t2,$carry
adde $t3,$t3,$c1
srwi $carry,$t2,16
- lwz $t7,`$FRAME+40`($sp) ; permuted $t3
- lwz $t6,`$FRAME+44`($sp) ; permuted $t2
+ lwz $t7,`$FRAME+40^$LITTLE_ENDIAN`($sp) ; permuted $t3
+ lwz $t6,`$FRAME+44^$LITTLE_ENDIAN`($sp) ; permuted $t2
fctid $T2b,$T2b
srwi $c1,$t3,16
insrwi $carry,$t3,16,0
- lwz $t1,`$FRAME+48`($sp) ; permuted $t5
- lwz $t0,`$FRAME+52`($sp) ; permuted $t4
+ lwz $t1,`$FRAME+48^$LITTLE_ENDIAN`($sp) ; permuted $t5
+ lwz $t0,`$FRAME+52^$LITTLE_ENDIAN`($sp) ; permuted $t4
fctid $T3a,$T3a
addc $t6,$t6,$carry
adde $t7,$t7,$c1
srwi $carry,$t6,16
- lwz $t5,`$FRAME+56`($sp) ; permuted $t7
- lwz $t4,`$FRAME+60`($sp) ; permuted $t6
+ lwz $t5,`$FRAME+56^$LITTLE_ENDIAN`($sp) ; permuted $t7
+ lwz $t4,`$FRAME+60^$LITTLE_ENDIAN`($sp) ; permuted $t6
fctid $T3b,$T3b
insrwi $t2,$t6,16,0 ; 64..95 bits
@@ -1354,14 +1356,14 @@ $code.=<<___;
___
} else {
$code.=<<___;
- lwz $t1,`$FRAME+0`($sp)
- lwz $t0,`$FRAME+4`($sp)
- lwz $t3,`$FRAME+8`($sp)
- lwz $t2,`$FRAME+12`($sp)
- lwz $t5,`$FRAME+16`($sp)
- lwz $t4,`$FRAME+20`($sp)
- lwz $t7,`$FRAME+24`($sp)
- lwz $t6,`$FRAME+28`($sp)
+ lwz $t1,`$FRAME+0^$LITTLE_ENDIAN`($sp)
+ lwz $t0,`$FRAME+4^$LITTLE_ENDIAN`($sp)
+ lwz $t3,`$FRAME+8^$LITTLE_ENDIAN`($sp)
+ lwz $t2,`$FRAME+12^$LITTLE_ENDIAN`($sp)
+ lwz $t5,`$FRAME+16^$LITTLE_ENDIAN`($sp)
+ lwz $t4,`$FRAME+20^$LITTLE_ENDIAN`($sp)
+ lwz $t7,`$FRAME+24^$LITTLE_ENDIAN`($sp)
+ lwz $t6,`$FRAME+28^$LITTLE_ENDIAN`($sp)
stfd $dota,`$FRAME+64`($sp)
stfd $dotb,`$FRAME+72`($sp)
@@ -1397,14 +1399,14 @@ $code.=<<___;
stw $t0,4($tp) ; tp[j-1]
stw $t4,0($tp)
- lwz $t3,`$FRAME+32`($sp) ; permuted $t1
- lwz $t2,`$FRAME+36`($sp) ; permuted $t0
- lwz $t7,`$FRAME+40`($sp) ; permuted $t3
- lwz $t6,`$FRAME+44`($sp) ; permuted $t2
- lwz $t1,`$FRAME+48`($sp) ; permuted $t5
- lwz $t0,`$FRAME+52`($sp) ; permuted $t4
- lwz $t5,`$FRAME+56`($sp) ; permuted $t7
- lwz $t4,`$FRAME+60`($sp) ; permuted $t6
+ lwz $t3,`$FRAME+32^$LITTLE_ENDIAN`($sp) ; permuted $t1
+ lwz $t2,`$FRAME+36^$LITTLE_ENDIAN`($sp) ; permuted $t0
+ lwz $t7,`$FRAME+40^$LITTLE_ENDIAN`($sp) ; permuted $t3
+ lwz $t6,`$FRAME+44^$LITTLE_ENDIAN`($sp) ; permuted $t2
+ lwz $t1,`$FRAME+48^$LITTLE_ENDIAN`($sp) ; permuted $t5
+ lwz $t0,`$FRAME+52^$LITTLE_ENDIAN`($sp) ; permuted $t4
+ lwz $t5,`$FRAME+56^$LITTLE_ENDIAN`($sp) ; permuted $t7
+ lwz $t4,`$FRAME+60^$LITTLE_ENDIAN`($sp) ; permuted $t6
addc $t2,$t2,$carry
adde $t3,$t3,$c1
@@ -1433,12 +1435,12 @@ $code.=<<___;
addc $t2,$t2,$t6
adde $t0,$t0,$t7
- lwz $t7,`$FRAME+64`($sp)
- lwz $t6,`$FRAME+68`($sp)
+ lwz $t7,`$FRAME+64^$LITTLE_ENDIAN`($sp)
+ lwz $t6,`$FRAME+68^$LITTLE_ENDIAN`($sp)
addze $carry,$carry
addze $c1,$c1
- lwz $t5,`$FRAME+72`($sp)
- lwz $t4,`$FRAME+76`($sp)
+ lwz $t5,`$FRAME+72^$LITTLE_ENDIAN`($sp)
+ lwz $t4,`$FRAME+76^$LITTLE_ENDIAN`($sp)
addc $t6,$t6,$carry
adde $t7,$t7,$c1
diff --git a/crypto/bn/asm/rsaz-x86_64.pl b/crypto/bn/asm/rsaz-x86_64.pl
index 3bd45dbac01d..12b571c282dc 100755
--- a/crypto/bn/asm/rsaz-x86_64.pl
+++ b/crypto/bn/asm/rsaz-x86_64.pl
@@ -113,7 +113,7 @@ if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
$addx = ($1>=12);
}
-if (!$addx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9])\.([0-9]+)/) {
+if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9])\.([0-9]+)/) {
my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
$addx = ($ver>=3.03);
}
diff --git a/crypto/bn/asm/s390x-gf2m.pl b/crypto/bn/asm/s390x-gf2m.pl
index cd9f13eca292..9d18d40e7784 100755
--- a/crypto/bn/asm/s390x-gf2m.pl
+++ b/crypto/bn/asm/s390x-gf2m.pl
@@ -172,19 +172,19 @@ ___
if ($SIZE_T==8) {
my @r=map("%r$_",(6..9));
$code.=<<___;
- bras $ra,_mul_1x1 # a1·b1
+ bras $ra,_mul_1x1 # a1·b1
stmg $lo,$hi,16($rp)
lg $a,`$stdframe+128+4*$SIZE_T`($sp)
lg $b,`$stdframe+128+6*$SIZE_T`($sp)
- bras $ra,_mul_1x1 # a0·b0
+ bras $ra,_mul_1x1 # a0·b0
stmg $lo,$hi,0($rp)
lg $a,`$stdframe+128+3*$SIZE_T`($sp)
lg $b,`$stdframe+128+5*$SIZE_T`($sp)
xg $a,`$stdframe+128+4*$SIZE_T`($sp)
xg $b,`$stdframe+128+6*$SIZE_T`($sp)
- bras $ra,_mul_1x1 # (a0+a1)·(b0+b1)
+ bras $ra,_mul_1x1 # (a0+a1)·(b0+b1)
lmg @r[0],@r[3],0($rp)
xgr $lo,$hi
diff --git a/crypto/bn/asm/s390x.S b/crypto/bn/asm/s390x.S
index 43fcb79bc011..f5eebe413a28 100755
--- a/crypto/bn/asm/s390x.S
+++ b/crypto/bn/asm/s390x.S
@@ -18,71 +18,106 @@
.align 4
bn_mul_add_words:
lghi zero,0 // zero = 0
- la %r1,0(%r2) // put rp aside
- lghi %r2,0 // i=0;
+ la %r1,0(%r2) // put rp aside [to give way to]
+ lghi %r2,0 // return value
ltgfr %r4,%r4
bler %r14 // if (len<=0) return 0;
- stmg %r6,%r10,48(%r15)
- lghi %r10,3
- lghi %r8,0 // carry = 0
- nr %r10,%r4 // len%4
+ stmg %r6,%r13,48(%r15)
+ lghi %r2,3
+ lghi %r12,0 // carry = 0
+ slgr %r1,%r3 // rp-=ap
+ nr %r2,%r4 // len%4
sra %r4,2 // cnt=len/4
jz .Loop1_madd // carry is incidentally cleared if branch taken
algr zero,zero // clear carry
-.Loop4_madd:
- lg %r7,0(%r2,%r3) // ap[i]
+ lg %r7,0(%r3) // ap[0]
+ lg %r9,8(%r3) // ap[1]
mlgr %r6,%r5 // *=w
- alcgr %r7,%r8 // +=carry
- alcgr %r6,zero
- alg %r7,0(%r2,%r1) // +=rp[i]
- stg %r7,0(%r2,%r1) // rp[i]=
+ brct %r4,.Loop4_madd
+ j .Loop4_madd_tail
- lg %r9,8(%r2,%r3)
+.Loop4_madd:
mlgr %r8,%r5
+ lg %r11,16(%r3) // ap[i+2]
+ alcgr %r7,%r12 // +=carry
+ alcgr %r6,zero
+ alg %r7,0(%r3,%r1) // +=rp[i]
+ stg %r7,0(%r3,%r1) // rp[i]=
+
+ mlgr %r10,%r5
+ lg %r13,24(%r3)
alcgr %r9,%r6
alcgr %r8,zero
- alg %r9,8(%r2,%r1)
- stg %r9,8(%r2,%r1)
+ alg %r9,8(%r3,%r1)
+ stg %r9,8(%r3,%r1)
+
+ mlgr %r12,%r5
+ lg %r7,32(%r3)
+ alcgr %r11,%r8
+ alcgr %r10,zero
+ alg %r11,16(%r3,%r1)
+ stg %r11,16(%r3,%r1)
- lg %r7,16(%r2,%r3)
mlgr %r6,%r5
- alcgr %r7,%r8
- alcgr %r6,zero
- alg %r7,16(%r2,%r1)
- stg %r7,16(%r2,%r1)
+ lg %r9,40(%r3)
+ alcgr %r13,%r10
+ alcgr %r12,zero
+ alg %r13,24(%r3,%r1)
+ stg %r13,24(%r3,%r1)
- lg %r9,24(%r2,%r3)
+ la %r3,32(%r3) // i+=4
+ brct %r4,.Loop4_madd
+
+.Loop4_madd_tail:
mlgr %r8,%r5
+ lg %r11,16(%r3)
+ alcgr %r7,%r12 // +=carry
+ alcgr %r6,zero
+ alg %r7,0(%r3,%r1) // +=rp[i]
+ stg %r7,0(%r3,%r1) // rp[i]=
+
+ mlgr %r10,%r5
+ lg %r13,24(%r3)
alcgr %r9,%r6
alcgr %r8,zero
- alg %r9,24(%r2,%r1)
- stg %r9,24(%r2,%r1)
+ alg %r9,8(%r3,%r1)
+ stg %r9,8(%r3,%r1)
- la %r2,32(%r2) // i+=4
- brct %r4,.Loop4_madd
+ mlgr %r12,%r5
+ alcgr %r11,%r8
+ alcgr %r10,zero
+ alg %r11,16(%r3,%r1)
+ stg %r11,16(%r3,%r1)
- la %r10,1(%r10) // see if len%4 is zero ...
- brct %r10,.Loop1_madd // without touching condition code:-)
+ alcgr %r13,%r10
+ alcgr %r12,zero
+ alg %r13,24(%r3,%r1)
+ stg %r13,24(%r3,%r1)
+
+ la %r3,32(%r3) // i+=4
+
+ la %r2,1(%r2) // see if len%4 is zero ...
+ brct %r2,.Loop1_madd // without touching condition code:-)
.Lend_madd:
- alcgr %r8,zero // collect carry bit
- lgr %r2,%r8
- lmg %r6,%r10,48(%r15)
+ lgr %r2,zero // return value
+ alcgr %r2,%r12 // collect even carry bit
+ lmg %r6,%r13,48(%r15)
br %r14
.Loop1_madd:
- lg %r7,0(%r2,%r3) // ap[i]
+ lg %r7,0(%r3) // ap[i]
mlgr %r6,%r5 // *=w
- alcgr %r7,%r8 // +=carry
+ alcgr %r7,%r12 // +=carry
alcgr %r6,zero
- alg %r7,0(%r2,%r1) // +=rp[i]
- stg %r7,0(%r2,%r1) // rp[i]=
+ alg %r7,0(%r3,%r1) // +=rp[i]
+ stg %r7,0(%r3,%r1) // rp[i]=
- lgr %r8,%r6
- la %r2,8(%r2) // i++
- brct %r10,.Loop1_madd
+ lgr %r12,%r6
+ la %r3,8(%r3) // i++
+ brct %r2,.Loop1_madd
j .Lend_madd
.size bn_mul_add_words,.-bn_mul_add_words
diff --git a/crypto/bn/asm/x86-gf2m.pl b/crypto/bn/asm/x86-gf2m.pl
index 808a1e59691d..b57953027298 100755
--- a/crypto/bn/asm/x86-gf2m.pl
+++ b/crypto/bn/asm/x86-gf2m.pl
@@ -14,7 +14,7 @@
# the time being... Except that it has three code paths: pure integer
# code suitable for any x86 CPU, MMX code suitable for PIII and later
# and PCLMULQDQ suitable for Westmere and later. Improvement varies
-# from one benchmark and µ-arch to another. Below are interval values
+# from one benchmark and µ-arch to another. Below are interval values
# for 163- and 571-bit ECDH benchmarks relative to compiler-generated
# code:
#
@@ -226,22 +226,22 @@ if ($sse2) {
&push ("edi");
&mov ($a,&wparam(1));
&mov ($b,&wparam(3));
- &call ("_mul_1x1_mmx"); # a1·b1
+ &call ("_mul_1x1_mmx"); # a1·b1
&movq ("mm7",$R);
&mov ($a,&wparam(2));
&mov ($b,&wparam(4));
- &call ("_mul_1x1_mmx"); # a0·b0
+ &call ("_mul_1x1_mmx"); # a0·b0
&movq ("mm6",$R);
&mov ($a,&wparam(1));
&mov ($b,&wparam(3));
&xor ($a,&wparam(2));
&xor ($b,&wparam(4));
- &call ("_mul_1x1_mmx"); # (a0+a1)·(b0+b1)
+ &call ("_mul_1x1_mmx"); # (a0+a1)·(b0+b1)
&pxor ($R,"mm7");
&mov ($a,&wparam(0));
- &pxor ($R,"mm6"); # (a0+a1)·(b0+b1)-a1·b1-a0·b0
+ &pxor ($R,"mm6"); # (a0+a1)·(b0+b1)-a1·b1-a0·b0
&movq ($A,$R);
&psllq ($R,32);
@@ -266,13 +266,13 @@ if ($sse2) {
&mov ($a,&wparam(1));
&mov ($b,&wparam(3));
- &call ("_mul_1x1_ialu"); # a1·b1
+ &call ("_mul_1x1_ialu"); # a1·b1
&mov (&DWP(8,"esp"),$lo);
&mov (&DWP(12,"esp"),$hi);
&mov ($a,&wparam(2));
&mov ($b,&wparam(4));
- &call ("_mul_1x1_ialu"); # a0·b0
+ &call ("_mul_1x1_ialu"); # a0·b0
&mov (&DWP(0,"esp"),$lo);
&mov (&DWP(4,"esp"),$hi);
@@ -280,7 +280,7 @@ if ($sse2) {
&mov ($b,&wparam(3));
&xor ($a,&wparam(2));
&xor ($b,&wparam(4));
- &call ("_mul_1x1_ialu"); # (a0+a1)·(b0+b1)
+ &call ("_mul_1x1_ialu"); # (a0+a1)·(b0+b1)
&mov ("ebp",&wparam(0));
@r=("ebx","ecx","edi","esi");
diff --git a/crypto/bn/asm/x86_64-gcc.c b/crypto/bn/asm/x86_64-gcc.c
index d5488866e0cf..d77dc433d405 100644
--- a/crypto/bn/asm/x86_64-gcc.c
+++ b/crypto/bn/asm/x86_64-gcc.c
@@ -65,7 +65,7 @@
# undef mul_add
/*-
- * "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
+ * "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
* "g"(0) let the compiler to decide where does it
* want to keep the value of zero;
*/
diff --git a/crypto/bn/asm/x86_64-gf2m.pl b/crypto/bn/asm/x86_64-gf2m.pl
index 226c66c35e35..42bbec2fb7ef 100755
--- a/crypto/bn/asm/x86_64-gf2m.pl
+++ b/crypto/bn/asm/x86_64-gf2m.pl
@@ -13,7 +13,7 @@
# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
# the time being... Except that it has two code paths: code suitable
# for any x86_64 CPU and PCLMULQDQ one suitable for Westmere and
-# later. Improvement varies from one benchmark and µ-arch to another.
+# later. Improvement varies from one benchmark and µ-arch to another.
# Vanilla code path is at most 20% faster than compiler-generated code
# [not very impressive], while PCLMULQDQ - whole 85%-160% better on
# 163- and 571-bit ECDH benchmarks on Intel CPUs. Keep in mind that
@@ -184,13 +184,13 @@ ___
$code.=<<___;
movdqa %xmm0,%xmm4
movdqa %xmm1,%xmm5
- pclmulqdq \$0,%xmm1,%xmm0 # a1·b1
+ pclmulqdq \$0,%xmm1,%xmm0 # a1·b1
pxor %xmm2,%xmm4
pxor %xmm3,%xmm5
- pclmulqdq \$0,%xmm3,%xmm2 # a0·b0
- pclmulqdq \$0,%xmm5,%xmm4 # (a0+a1)·(b0+b1)
+ pclmulqdq \$0,%xmm3,%xmm2 # a0·b0
+ pclmulqdq \$0,%xmm5,%xmm4 # (a0+a1)·(b0+b1)
xorps %xmm0,%xmm4
- xorps %xmm2,%xmm4 # (a0+a1)·(b0+b1)-a0·b0-a1·b1
+ xorps %xmm2,%xmm4 # (a0+a1)·(b0+b1)-a0·b0-a1·b1
movdqa %xmm4,%xmm5
pslldq \$8,%xmm4
psrldq \$8,%xmm5
@@ -225,13 +225,13 @@ $code.=<<___;
mov \$0xf,$mask
mov $a1,$a
mov $b1,$b
- call _mul_1x1 # a1·b1
+ call _mul_1x1 # a1·b1
mov $lo,16(%rsp)
mov $hi,24(%rsp)
mov 48(%rsp),$a
mov 64(%rsp),$b
- call _mul_1x1 # a0·b0
+ call _mul_1x1 # a0·b0
mov $lo,0(%rsp)
mov $hi,8(%rsp)
@@ -239,7 +239,7 @@ $code.=<<___;
mov 56(%rsp),$b
xor 48(%rsp),$a
xor 64(%rsp),$b
- call _mul_1x1 # (a0+a1)·(b0+b1)
+ call _mul_1x1 # (a0+a1)·(b0+b1)
___
@r=("%rbx","%rcx","%rdi","%rsi");
$code.=<<___;
diff --git a/crypto/bn/asm/x86_64-mont.pl b/crypto/bn/asm/x86_64-mont.pl
index 2989b58f256e..725833d022e2 100755
--- a/crypto/bn/asm/x86_64-mont.pl
+++ b/crypto/bn/asm/x86_64-mont.pl
@@ -68,6 +68,11 @@ if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
$addx = ($1>=12);
}
+if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9])\.([0-9]+)/) {
+ my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
+ $addx = ($ver>=3.03);
+}
+
# int bn_mul_mont(
$rp="%rdi"; # BN_ULONG *rp,
$ap="%rsi"; # const BN_ULONG *ap,
diff --git a/crypto/bn/asm/x86_64-mont5.pl b/crypto/bn/asm/x86_64-mont5.pl
index 820de3d6f627..64e668f140c2 100755
--- a/crypto/bn/asm/x86_64-mont5.pl
+++ b/crypto/bn/asm/x86_64-mont5.pl
@@ -53,6 +53,11 @@ if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
$addx = ($1>=12);
}
+if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9])\.([0-9]+)/) {
+ my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
+ $addx = ($ver>=3.03);
+}
+
# int bn_mul_mont_gather5(
$rp="%rdi"; # BN_ULONG *rp,
$ap="%rsi"; # const BN_ULONG *ap,
@@ -1779,6 +1784,15 @@ sqr8x_reduction:
.align 32
.L8x_tail_done:
add (%rdx),%r8 # can this overflow?
+ adc \$0,%r9
+ adc \$0,%r10
+ adc \$0,%r11
+ adc \$0,%r12
+ adc \$0,%r13
+ adc \$0,%r14
+ adc \$0,%r15 # can't overflow, because we
+ # started with "overhung" part
+ # of multiplication
xor %rax,%rax
neg $carry
@@ -3125,6 +3139,15 @@ sqrx8x_reduction:
.align 32
.Lsqrx8x_tail_done:
add 24+8(%rsp),%r8 # can this overflow?
+ adc \$0,%r9
+ adc \$0,%r10
+ adc \$0,%r11
+ adc \$0,%r12
+ adc \$0,%r13
+ adc \$0,%r14
+ adc \$0,%r15 # can't overflow, because we
+ # started with "overhung" part
+ # of multiplication
mov $carry,%rax # xor %rax,%rax
sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
@@ -3168,13 +3191,11 @@ my ($rptr,$nptr)=("%rdx","%rbp");
my @ri=map("%r$_",(10..13));
my @ni=map("%r$_",(14..15));
$code.=<<___;
- xor %rbx,%rbx
+ xor %ebx,%ebx
sub %r15,%rsi # compare top-most words
adc %rbx,%rbx
mov %rcx,%r10 # -$num
- .byte 0x67
or %rbx,%rax
- .byte 0x67
mov %rcx,%r9 # -$num
xor \$1,%rax
sar \$3+2,%rcx # cf=0
diff --git a/crypto/bn/bn_exp.c b/crypto/bn/bn_exp.c
index 24afdd60a227..50cf3231b07b 100644
--- a/crypto/bn/bn_exp.c
+++ b/crypto/bn/bn_exp.c
@@ -662,12 +662,13 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
bn_check_top(p);
bn_check_top(m);
- top = m->top;
-
- if (!(m->d[0] & 1)) {
+ if (!BN_is_odd(m)) {
BNerr(BN_F_BN_MOD_EXP_MONT_CONSTTIME, BN_R_CALLED_WITH_EVEN_MODULUS);
return (0);
}
+
+ top = m->top;
+
bits = BN_num_bits(p);
if (bits == 0) {
ret = BN_one(rr);
diff --git a/crypto/bn/bn_gcd.c b/crypto/bn/bn_gcd.c
index 97c55ab72098..ce59fe701f9d 100644
--- a/crypto/bn/bn_gcd.c
+++ b/crypto/bn/bn_gcd.c
@@ -583,6 +583,7 @@ static BIGNUM *BN_mod_inverse_no_branch(BIGNUM *in,
* BN_div_no_branch will be called eventually.
*/
pB = &local_B;
+ local_B.flags = 0;
BN_with_flags(pB, B, BN_FLG_CONSTTIME);
if (!BN_nnmod(B, pB, A, ctx))
goto err;
@@ -610,6 +611,7 @@ static BIGNUM *BN_mod_inverse_no_branch(BIGNUM *in,
* BN_div_no_branch will be called eventually.
*/
pA = &local_A;
+ local_A.flags = 0;
BN_with_flags(pA, A, BN_FLG_CONSTTIME);
/* (D, M) := (A/B, A%B) ... */
diff --git a/crypto/bn/bn_gf2m.c b/crypto/bn/bn_gf2m.c
index cfa1c7ce1499..2c61da11093f 100644
--- a/crypto/bn/bn_gf2m.c
+++ b/crypto/bn/bn_gf2m.c
@@ -575,7 +575,7 @@ int BN_GF2m_mod_sqr_arr(BIGNUM *r, const BIGNUM *a, const int p[],
bn_check_top(a);
BN_CTX_start(ctx);
if ((s = BN_CTX_get(ctx)) == NULL)
- return 0;
+ goto err;
if (!bn_wexpand(s, 2 * a->top))
goto err;
@@ -699,18 +699,21 @@ int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
int top = p->top;
BN_ULONG *udp, *bdp, *vdp, *cdp;
- bn_wexpand(u, top);
+ if (!bn_wexpand(u, top))
+ goto err;
udp = u->d;
for (i = u->top; i < top; i++)
udp[i] = 0;
u->top = top;
- bn_wexpand(b, top);
+ if (!bn_wexpand(b, top))
+ goto err;
bdp = b->d;
bdp[0] = 1;
for (i = 1; i < top; i++)
bdp[i] = 0;
b->top = top;
- bn_wexpand(c, top);
+ if (!bn_wexpand(c, top))
+ goto err;
cdp = c->d;
for (i = 0; i < top; i++)
cdp[i] = 0;
diff --git a/crypto/bn/bn_mont.c b/crypto/bn/bn_mont.c
index aadd5db1d8db..be95bd55d020 100644
--- a/crypto/bn/bn_mont.c
+++ b/crypto/bn/bn_mont.c
@@ -361,9 +361,9 @@ void BN_MONT_CTX_free(BN_MONT_CTX *mont)
if (mont == NULL)
return;
- BN_free(&(mont->RR));
- BN_free(&(mont->N));
- BN_free(&(mont->Ni));
+ BN_clear_free(&(mont->RR));
+ BN_clear_free(&(mont->N));
+ BN_clear_free(&(mont->Ni));
if (mont->flags & BN_FLG_MALLOCED)
OPENSSL_free(mont);
}
@@ -373,6 +373,9 @@ int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx)
int ret = 0;
BIGNUM *Ri, *R;
+ if (BN_is_zero(mod))
+ return 0;
+
BN_CTX_start(ctx);
if ((Ri = BN_CTX_get(ctx)) == NULL)
goto err;
diff --git a/crypto/bn/bn_recp.c b/crypto/bn/bn_recp.c
index 6826f93b3882..7497ac624d94 100644
--- a/crypto/bn/bn_recp.c
+++ b/crypto/bn/bn_recp.c
@@ -152,8 +152,10 @@ int BN_div_recp(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m,
if (BN_ucmp(m, &(recp->N)) < 0) {
BN_zero(d);
- if (!BN_copy(r, m))
+ if (!BN_copy(r, m)) {
+ BN_CTX_end(ctx);
return 0;
+ }
BN_CTX_end(ctx);
return (1);
}
diff --git a/crypto/bn/bn_x931p.c b/crypto/bn/bn_x931p.c
index 6d76b1284e10..efa48bdf8772 100644
--- a/crypto/bn/bn_x931p.c
+++ b/crypto/bn/bn_x931p.c
@@ -213,14 +213,14 @@ int BN_X931_generate_Xpq(BIGNUM *Xp, BIGNUM *Xq, int nbits, BN_CTX *ctx)
* exceeded.
*/
if (!BN_rand(Xp, nbits, 1, 0))
- return 0;
+ goto err;
BN_CTX_start(ctx);
t = BN_CTX_get(ctx);
for (i = 0; i < 1000; i++) {
if (!BN_rand(Xq, nbits, 1, 0))
- return 0;
+ goto err;
/* Check that |Xp - Xq| > 2^(nbits - 100) */
BN_sub(t, Xp, Xq);
if (BN_num_bits(t) > (nbits - 100))
@@ -234,6 +234,9 @@ int BN_X931_generate_Xpq(BIGNUM *Xp, BIGNUM *Xq, int nbits, BN_CTX *ctx)
return 0;
+ err:
+ BN_CTX_end(ctx);
+ return 0;
}
/*
diff --git a/crypto/bn/bntest.c b/crypto/bn/bntest.c
index 470d5dabf1ec..1e35988022bb 100644
--- a/crypto/bn/bntest.c
+++ b/crypto/bn/bntest.c
@@ -441,6 +441,14 @@ int test_div(BIO *bp, BN_CTX *ctx)
BN_init(&d);
BN_init(&e);
+ BN_one(&a);
+ BN_zero(&b);
+
+ if (BN_div(&d, &c, &a, &b, ctx)) {
+ fprintf(stderr, "Division by zero succeeded!\n");
+ return 0;
+ }
+
for (i = 0; i < num0 + num1; i++) {
if (i < num1) {
BN_bntest_rand(&a, 400, 0, 0);
@@ -516,9 +524,9 @@ int test_div_word(BIO *bp)
do {
BN_bntest_rand(&a, 512, -1, 0);
BN_bntest_rand(&b, BN_BITS2, -1, 0);
- s = b.d[0];
- } while (!s);
+ } while (BN_is_zero(&b));
+ s = b.d[0];
BN_copy(&b, &a);
r = BN_div_word(&b, s);
@@ -781,6 +789,18 @@ int test_mont(BIO *bp, BN_CTX *ctx)
if (mont == NULL)
return 0;
+ BN_zero(&n);
+ if (BN_MONT_CTX_set(mont, &n, ctx)) {
+ fprintf(stderr, "BN_MONT_CTX_set succeeded for zero modulus!\n");
+ return 0;
+ }
+
+ BN_set_word(&n, 16);
+ if (BN_MONT_CTX_set(mont, &n, ctx)) {
+ fprintf(stderr, "BN_MONT_CTX_set succeeded for even modulus!\n");
+ return 0;
+ }
+
BN_bntest_rand(&a, 100, 0, 0);
BN_bntest_rand(&b, 100, 0, 0);
for (i = 0; i < num2; i++) {
@@ -887,6 +907,14 @@ int test_mod_mul(BIO *bp, BN_CTX *ctx)
d = BN_new();
e = BN_new();
+ BN_one(a);
+ BN_one(b);
+ BN_zero(c);
+ if (BN_mod_mul(e, a, b, c, ctx)) {
+ fprintf(stderr, "BN_mod_mul with zero modulus succeeded!\n");
+ return 0;
+ }
+
for (j = 0; j < 3; j++) {
BN_bntest_rand(c, 1024, 0, 0);
for (i = 0; i < num0; i++) {
@@ -952,6 +980,14 @@ int test_mod_exp(BIO *bp, BN_CTX *ctx)
d = BN_new();
e = BN_new();
+ BN_one(a);
+ BN_one(b);
+ BN_zero(c);
+ if (BN_mod_exp(d, a, b, c, ctx)) {
+ fprintf(stderr, "BN_mod_exp with zero modulus succeeded!\n");
+ return 0;
+ }
+
BN_bntest_rand(c, 30, 0, 1); /* must be odd for montgomery */
for (i = 0; i < num2; i++) {
BN_bntest_rand(a, 20 + i * 5, 0, 0);
@@ -980,6 +1016,24 @@ int test_mod_exp(BIO *bp, BN_CTX *ctx)
return 0;
}
}
+
+ /* Regression test for carry propagation bug in sqr8x_reduction */
+ BN_hex2bn(&a, "050505050505");
+ BN_hex2bn(&b, "02");
+ BN_hex2bn(&c,
+ "4141414141414141414141274141414141414141414141414141414141414141"
+ "4141414141414141414141414141414141414141414141414141414141414141"
+ "4141414141414141414141800000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000001");
+ BN_mod_exp(d, a, b, c, ctx);
+ BN_mul(e, a, a, ctx);
+ if (BN_cmp(d, e)) {
+ fprintf(stderr, "BN_mod_exp and BN_mul produce different results!\n");
+ return 0;
+ }
+
BN_free(a);
BN_free(b);
BN_free(c);
@@ -999,6 +1053,22 @@ int test_mod_exp_mont_consttime(BIO *bp, BN_CTX *ctx)
d = BN_new();
e = BN_new();
+ BN_one(a);
+ BN_one(b);
+ BN_zero(c);
+ if (BN_mod_exp_mont_consttime(d, a, b, c, ctx, NULL)) {
+ fprintf(stderr, "BN_mod_exp_mont_consttime with zero modulus "
+ "succeeded\n");
+ return 0;
+ }
+
+ BN_set_word(c, 16);
+ if (BN_mod_exp_mont_consttime(d, a, b, c, ctx, NULL)) {
+ fprintf(stderr, "BN_mod_exp_mont_consttime with even modulus "
+ "succeeded\n");
+ return 0;
+ }
+
BN_bntest_rand(c, 30, 0, 1); /* must be odd for montgomery */
for (i = 0; i < num2; i++) {
BN_bntest_rand(a, 20 + i * 5, 0, 0);
diff --git a/crypto/bn/rsaz_exp.h b/crypto/bn/rsaz_exp.h
index 33361de99572..229e181f67b5 100644
--- a/crypto/bn/rsaz_exp.h
+++ b/crypto/bn/rsaz_exp.h
@@ -1,32 +1,44 @@
-/******************************************************************************
-* Copyright(c) 2012, Intel Corp.
-* Developers and authors:
-* Shay Gueron (1, 2), and Vlad Krasnov (1)
-* (1) Intel Corporation, Israel Development Center, Haifa, Israel
-* (2) University of Haifa, Israel
+/*****************************************************************************
+* *
+* Copyright (c) 2012, Intel Corporation *
+* *
+* All rights reserved. *
+* *
+* Redistribution and use in source and binary forms, with or without *
+* modification, are permitted provided that the following conditions are *
+* met: *
+* *
+* * Redistributions of source code must retain the above copyright *
+* notice, this list of conditions and the following disclaimer. *
+* *
+* * Redistributions in binary form must reproduce the above copyright *
+* notice, this list of conditions and the following disclaimer in the *
+* documentation and/or other materials provided with the *
+* distribution. *
+* *
+* * Neither the name of the Intel Corporation nor the names of its *
+* contributors may be used to endorse or promote products derived from *
+* this software without specific prior written permission. *
+* *
+* *
+* THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY *
+* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR *
+* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR *
+* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
+* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, *
+* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR *
+* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF *
+* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *
+* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *
+* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
+* *
******************************************************************************
-* LICENSE:
-* This submission to OpenSSL is to be made available under the OpenSSL
-* license, and only to the OpenSSL project, in order to allow integration
-* into the publicly distributed code.
-* The use of this code, or portions of this code, or concepts embedded in
-* this code, or modification of this code and/or algorithm(s) in it, or the
-* use of this code for any other purpose than stated above, requires special
-* licensing.
-******************************************************************************
-* DISCLAIMER:
-* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS AND THE COPYRIGHT OWNERS
-* ``AS IS''. ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
-* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS OR THE COPYRIGHT
-* OWNERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
-* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-******************************************************************************/
+* Developers and authors: *
+* Shay Gueron (1, 2), and Vlad Krasnov (1) *
+* (1) Intel Corporation, Israel Development Center, Haifa, Israel *
+* (2) University of Haifa, Israel *
+*****************************************************************************/
#ifndef RSAZ_EXP_H
# define RSAZ_EXP_H
diff --git a/crypto/buffer/buf_str.c b/crypto/buffer/buf_str.c
index ebc5ab4646ce..fa0d608e76bb 100644
--- a/crypto/buffer/buf_str.c
+++ b/crypto/buffer/buf_str.c
@@ -58,6 +58,7 @@
#include <stdio.h>
#include "cryptlib.h"
+#include <limits.h>
#include <openssl/buffer.h>
size_t BUF_strnlen(const char *str, size_t maxlen)
@@ -72,7 +73,7 @@ size_t BUF_strnlen(const char *str, size_t maxlen)
char *BUF_strdup(const char *str)
{
if (str == NULL)
- return (NULL);
+ return NULL;
return BUF_strndup(str, strlen(str));
}
@@ -81,16 +82,22 @@ char *BUF_strndup(const char *str, size_t siz)
char *ret;
if (str == NULL)
- return (NULL);
+ return NULL;
siz = BUF_strnlen(str, siz);
+ if (siz >= INT_MAX)
+ return NULL;
+
ret = OPENSSL_malloc(siz + 1);
if (ret == NULL) {
BUFerr(BUF_F_BUF_STRNDUP, ERR_R_MALLOC_FAILURE);
- return (NULL);
+ return NULL;
}
- BUF_strlcpy(ret, str, siz + 1);
+
+ memcpy(ret, str, siz);
+ ret[siz] = '\0';
+
return (ret);
}
@@ -98,13 +105,13 @@ void *BUF_memdup(const void *data, size_t siz)
{
void *ret;
- if (data == NULL)
- return (NULL);
+ if (data == NULL || siz >= INT_MAX)
+ return NULL;
ret = OPENSSL_malloc(siz);
if (ret == NULL) {
BUFerr(BUF_F_BUF_MEMDUP, ERR_R_MALLOC_FAILURE);
- return (NULL);
+ return NULL;
}
return memcpy(ret, data, siz);
}
diff --git a/crypto/buffer/buffer.h b/crypto/buffer/buffer.h
index c343dd772f1e..efd240a5f91e 100644
--- a/crypto/buffer/buffer.h
+++ b/crypto/buffer/buffer.h
@@ -86,7 +86,13 @@ int BUF_MEM_grow(BUF_MEM *str, size_t len);
int BUF_MEM_grow_clean(BUF_MEM *str, size_t len);
size_t BUF_strnlen(const char *str, size_t maxlen);
char *BUF_strdup(const char *str);
+
+/*
+ * Like strndup, but in addition, explicitly guarantees to never read past the
+ * first |siz| bytes of |str|.
+ */
char *BUF_strndup(const char *str, size_t siz);
+
void *BUF_memdup(const void *data, size_t siz);
void BUF_reverse(unsigned char *out, const unsigned char *in, size_t siz);
diff --git a/crypto/cms/cms_enc.c b/crypto/cms/cms_enc.c
index 85ae928a496f..b14b4b68b5c9 100644
--- a/crypto/cms/cms_enc.c
+++ b/crypto/cms/cms_enc.c
@@ -195,7 +195,7 @@ BIO *cms_EncryptedContent_init_bio(CMS_EncryptedContentInfo *ec)
ok = 1;
err:
- if (ec->key && !keep_key) {
+ if (ec->key && (!keep_key || !ok)) {
OPENSSL_cleanse(ec->key, ec->keylen);
OPENSSL_free(ec->key);
ec->key = NULL;
diff --git a/crypto/cms/cms_pwri.c b/crypto/cms/cms_pwri.c
index a8322dcdf1a6..b91c01691fec 100644
--- a/crypto/cms/cms_pwri.c
+++ b/crypto/cms/cms_pwri.c
@@ -121,6 +121,9 @@ CMS_RecipientInfo *CMS_add0_recipient_password(CMS_ContentInfo *cms,
/* Setup algorithm identifier for cipher */
encalg = X509_ALGOR_new();
+ if (encalg == NULL) {
+ goto merr;
+ }
EVP_CIPHER_CTX_init(&ctx);
if (EVP_EncryptInit_ex(&ctx, kekciph, NULL, NULL, NULL) <= 0) {
diff --git a/crypto/cms/cms_sd.c b/crypto/cms/cms_sd.c
index 721ffd5afb85..a41aca8e1277 100644
--- a/crypto/cms/cms_sd.c
+++ b/crypto/cms/cms_sd.c
@@ -857,6 +857,8 @@ int CMS_SignerInfo_verify_content(CMS_SignerInfo *si, BIO *chain)
} else {
const EVP_MD *md = EVP_MD_CTX_md(&mctx);
pkctx = EVP_PKEY_CTX_new(si->pkey, NULL);
+ if (pkctx == NULL)
+ goto err;
if (EVP_PKEY_verify_init(pkctx) <= 0)
goto err;
if (EVP_PKEY_CTX_set_signature_md(pkctx, md) <= 0)
diff --git a/crypto/cms/cms_smime.c b/crypto/cms/cms_smime.c
index 5522a376acb6..07e3472e1079 100644
--- a/crypto/cms/cms_smime.c
+++ b/crypto/cms/cms_smime.c
@@ -754,7 +754,7 @@ int CMS_final(CMS_ContentInfo *cms, BIO *data, BIO *dcont, unsigned int flags)
BIO *cmsbio;
int ret = 0;
if (!(cmsbio = CMS_dataInit(cms, dcont))) {
- CMSerr(CMS_F_CMS_FINAL, ERR_R_MALLOC_FAILURE);
+ CMSerr(CMS_F_CMS_FINAL, CMS_R_CMS_LIB);
return 0;
}
diff --git a/crypto/comp/c_zlib.c b/crypto/comp/c_zlib.c
index 6731af8b0d7b..9c32614d3c70 100644
--- a/crypto/comp/c_zlib.c
+++ b/crypto/comp/c_zlib.c
@@ -404,8 +404,9 @@ COMP_METHOD *COMP_zlib(void)
void COMP_zlib_cleanup(void)
{
#ifdef ZLIB_SHARED
- if (zlib_dso)
+ if (zlib_dso != NULL)
DSO_free(zlib_dso);
+ zlib_dso = NULL;
#endif
}
diff --git a/crypto/conf/conf_def.c b/crypto/conf/conf_def.c
index faca9aeb571b..68c77cec7d8b 100644
--- a/crypto/conf/conf_def.c
+++ b/crypto/conf/conf_def.c
@@ -225,12 +225,11 @@ static int def_load_bio(CONF *conf, BIO *in, long *line)
goto err;
}
- section = (char *)OPENSSL_malloc(10);
+ section = BUF_strdup("default");
if (section == NULL) {
CONFerr(CONF_F_DEF_LOAD_BIO, ERR_R_MALLOC_FAILURE);
goto err;
}
- BUF_strlcpy(section, "default", 10);
if (_CONF_new_data(conf) == 0) {
CONFerr(CONF_F_DEF_LOAD_BIO, ERR_R_MALLOC_FAILURE);
diff --git a/crypto/conf/conf_sap.c b/crypto/conf/conf_sap.c
index 544fe9738719..c042cf222dc7 100644
--- a/crypto/conf/conf_sap.c
+++ b/crypto/conf/conf_sap.c
@@ -90,6 +90,7 @@ void OPENSSL_config(const char *config_name)
CONF_modules_load_file(NULL, config_name,
CONF_MFLAGS_DEFAULT_SECTION |
CONF_MFLAGS_IGNORE_MISSING_FILE);
+ openssl_configured = 1;
}
void OPENSSL_no_config()
diff --git a/crypto/cryptlib.c b/crypto/cryptlib.c
index ca0e3ccc0c7a..c9f674ba8e62 100644
--- a/crypto/cryptlib.c
+++ b/crypto/cryptlib.c
@@ -953,13 +953,29 @@ void OPENSSL_showfatal(const char *fmta, ...)
# if defined(_WIN32_WINNT) && _WIN32_WINNT>=0x0333
/* this -------------v--- guards NT-specific calls */
if (check_winnt() && OPENSSL_isservice() > 0) {
- HANDLE h = RegisterEventSource(0, _T("OPENSSL"));
- const TCHAR *pmsg = buf;
- ReportEvent(h, EVENTLOG_ERROR_TYPE, 0, 0, 0, 1, 0, &pmsg, 0);
- DeregisterEventSource(h);
+ HANDLE hEventLog = RegisterEventSource(NULL, _T("OpenSSL"));
+
+ if (hEventLog != NULL) {
+ const TCHAR *pmsg = buf;
+
+ if (!ReportEvent(hEventLog, EVENTLOG_ERROR_TYPE, 0, 0, NULL,
+ 1, 0, &pmsg, NULL)) {
+#if defined(DEBUG)
+ /*
+ * We are in a situation where we tried to report a critical
+ * error and this failed for some reason. As a last resort,
+ * in debug builds, send output to the debugger or any other
+ * tool like DebugView which can monitor the output.
+ */
+ OutputDebugString(pmsg);
+#endif
+ }
+
+ (void)DeregisterEventSource(hEventLog);
+ }
} else
# endif
- MessageBox(NULL, buf, _T("OpenSSL: FATAL"), MB_OK | MB_ICONSTOP);
+ MessageBox(NULL, buf, _T("OpenSSL: FATAL"), MB_OK | MB_ICONERROR);
}
#else
void OPENSSL_showfatal(const char *fmta, ...)
diff --git a/crypto/dh/dh.h b/crypto/dh/dh.h
index 0502f1a9cc14..b17767328183 100644
--- a/crypto/dh/dh.h
+++ b/crypto/dh/dh.h
@@ -142,7 +142,7 @@ struct dh_st {
BIGNUM *p;
BIGNUM *g;
long length; /* optional */
- BIGNUM *pub_key; /* g^x */
+ BIGNUM *pub_key; /* g^x % p */
BIGNUM *priv_key; /* x */
int flags;
BN_MONT_CTX *method_mont_p;
diff --git a/crypto/dh/dhtest.c b/crypto/dh/dhtest.c
index c9dd76bc75e1..6fe8ff4c0c49 100644
--- a/crypto/dh/dhtest.c
+++ b/crypto/dh/dhtest.c
@@ -533,9 +533,9 @@ static int run_rfc5114_tests(void)
* Work out shared secrets using both sides and compare with expected
* values.
*/
- if (!DH_compute_key(Z1, dhB->pub_key, dhA))
+ if (DH_compute_key(Z1, dhB->pub_key, dhA) == -1)
goto bad_err;
- if (!DH_compute_key(Z2, dhA->pub_key, dhB))
+ if (DH_compute_key(Z2, dhA->pub_key, dhB) == -1)
goto bad_err;
if (memcmp(Z1, td->Z, td->Z_len))
diff --git a/crypto/dsa/dsa_ameth.c b/crypto/dsa/dsa_ameth.c
index 2a5cd71371a7..c40e1777ade1 100644
--- a/crypto/dsa/dsa_ameth.c
+++ b/crypto/dsa/dsa_ameth.c
@@ -318,6 +318,7 @@ static int dsa_priv_encode(PKCS8_PRIV_KEY_INFO *p8, const EVP_PKEY *pkey)
dplen = i2d_ASN1_INTEGER(prkey, &dp);
ASN1_STRING_clear_free(prkey);
+ prkey = NULL;
if (!PKCS8_pkey_set0(p8, OBJ_nid2obj(NID_dsa), 0,
V_ASN1_SEQUENCE, params, dp, dplen))
diff --git a/crypto/dsa/dsa_gen.c b/crypto/dsa/dsa_gen.c
index 5a328aaab5b4..15f3bb4f3f39 100644
--- a/crypto/dsa/dsa_gen.c
+++ b/crypto/dsa/dsa_gen.c
@@ -114,16 +114,8 @@ int DSA_generate_parameters_ex(DSA *ret, int bits,
}
# endif
else {
- const EVP_MD *evpmd;
- size_t qbits = bits >= 2048 ? 256 : 160;
-
- if (bits >= 2048) {
- qbits = 256;
- evpmd = EVP_sha256();
- } else {
- qbits = 160;
- evpmd = EVP_sha1();
- }
+ const EVP_MD *evpmd = bits >= 2048 ? EVP_sha256() : EVP_sha1();
+ size_t qbits = EVP_MD_size(evpmd) * 8;
return dsa_builtin_paramgen(ret, bits, qbits, evpmd,
seed_in, seed_len, NULL, counter_ret,
@@ -176,13 +168,14 @@ int dsa_builtin_paramgen(DSA *ret, size_t bits, size_t qbits,
if (seed_in != NULL)
memcpy(seed, seed_in, seed_len);
- if ((ctx = BN_CTX_new()) == NULL)
+ if ((mont = BN_MONT_CTX_new()) == NULL)
goto err;
- if ((mont = BN_MONT_CTX_new()) == NULL)
+ if ((ctx = BN_CTX_new()) == NULL)
goto err;
BN_CTX_start(ctx);
+
r0 = BN_CTX_get(ctx);
g = BN_CTX_get(ctx);
W = BN_CTX_get(ctx);
@@ -203,7 +196,7 @@ int dsa_builtin_paramgen(DSA *ret, size_t bits, size_t qbits,
if (!BN_GENCB_call(cb, 0, m++))
goto err;
- if (!seed_len) {
+ if (!seed_len || !seed_in) {
if (RAND_pseudo_bytes(seed, qsize) < 0)
goto err;
seed_is_random = 1;
diff --git a/crypto/ec/Makefile b/crypto/ec/Makefile
index 359ef4e40fd4..89491454a441 100644
--- a/crypto/ec/Makefile
+++ b/crypto/ec/Makefile
@@ -89,7 +89,7 @@ dclean:
mv -f Makefile.new $(MAKEFILE)
clean:
- rm -f *.o */*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff
+ rm -f *.s *.o */*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff
# DO NOT DELETE THIS LINE -- make depend depends on it.
diff --git a/crypto/ec/asm/ecp_nistz256-x86_64.pl b/crypto/ec/asm/ecp_nistz256-x86_64.pl
index 84379fce1cb9..648c969be621 100755
--- a/crypto/ec/asm/ecp_nistz256-x86_64.pl
+++ b/crypto/ec/asm/ecp_nistz256-x86_64.pl
@@ -81,7 +81,7 @@ if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
$addx = ($1>=12);
}
-if (!$addx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9])\.([0-9]+)/) {
+if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9])\.([0-9]+)/) {
my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
$avx = ($ver>=3.0) + ($ver>=3.01);
$addx = ($ver>=3.03);
diff --git a/crypto/ec/ec.h b/crypto/ec/ec.h
index 6d3178f609f4..81e6faf6c5c5 100644
--- a/crypto/ec/ec.h
+++ b/crypto/ec/ec.h
@@ -106,7 +106,7 @@ typedef enum {
/** the point is encoded as z||x, where the octet z specifies
* which solution of the quadratic equation y is */
POINT_CONVERSION_COMPRESSED = 2,
- /** the point is encoded as z||x||y, where z is the octet 0x02 */
+ /** the point is encoded as z||x||y, where z is the octet 0x04 */
POINT_CONVERSION_UNCOMPRESSED = 4,
/** the point is encoded as z||x||y, where the octet z specifies
* which solution of the quadratic equation y is */
diff --git a/crypto/ec/ec_asn1.c b/crypto/ec/ec_asn1.c
index 4ad8494981bf..33abf61f4441 100644
--- a/crypto/ec/ec_asn1.c
+++ b/crypto/ec/ec_asn1.c
@@ -970,8 +970,9 @@ EC_GROUP *d2i_ECPKParameters(EC_GROUP **a, const unsigned char **in, long len)
{
EC_GROUP *group = NULL;
ECPKPARAMETERS *params = NULL;
+ const unsigned char *p = *in;
- if ((params = d2i_ECPKPARAMETERS(NULL, in, len)) == NULL) {
+ if ((params = d2i_ECPKPARAMETERS(NULL, &p, len)) == NULL) {
ECerr(EC_F_D2I_ECPKPARAMETERS, EC_R_D2I_ECPKPARAMETERS_FAILURE);
ECPKPARAMETERS_free(params);
return NULL;
@@ -989,6 +990,7 @@ EC_GROUP *d2i_ECPKParameters(EC_GROUP **a, const unsigned char **in, long len)
*a = group;
ECPKPARAMETERS_free(params);
+ *in = p;
return (group);
}
@@ -1016,8 +1018,9 @@ EC_KEY *d2i_ECPrivateKey(EC_KEY **a, const unsigned char **in, long len)
int ok = 0;
EC_KEY *ret = NULL;
EC_PRIVATEKEY *priv_key = NULL;
+ const unsigned char *p = *in;
- if ((priv_key = d2i_EC_PRIVATEKEY(NULL, in, len)) == NULL) {
+ if ((priv_key = d2i_EC_PRIVATEKEY(NULL, &p, len)) == NULL) {
ECerr(EC_F_D2I_ECPRIVATEKEY, ERR_R_EC_LIB);
return NULL;
}
@@ -1096,6 +1099,7 @@ EC_KEY *d2i_ECPrivateKey(EC_KEY **a, const unsigned char **in, long len)
if (a)
*a = ret;
+ *in = p;
ok = 1;
err:
if (!ok) {
diff --git a/crypto/ec/ec_key.c b/crypto/ec/ec_key.c
index 55ce3fe9beb2..c784b6fd30a3 100644
--- a/crypto/ec/ec_key.c
+++ b/crypto/ec/ec_key.c
@@ -366,7 +366,10 @@ int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, BIGNUM *x,
BN_CTX *ctx = NULL;
BIGNUM *tx, *ty;
EC_POINT *point = NULL;
- int ok = 0, tmp_nid, is_char_two = 0;
+ int ok = 0;
+#ifndef OPENSSL_NO_EC2M
+ int tmp_nid, is_char_two = 0;
+#endif
if (!key || !key->group || !x || !y) {
ECerr(EC_F_EC_KEY_SET_PUBLIC_KEY_AFFINE_COORDINATES,
@@ -382,14 +385,15 @@ int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, BIGNUM *x,
if (!point)
goto err;
+ tx = BN_CTX_get(ctx);
+ ty = BN_CTX_get(ctx);
+
+#ifndef OPENSSL_NO_EC2M
tmp_nid = EC_METHOD_get_field_type(EC_GROUP_method_of(key->group));
if (tmp_nid == NID_X9_62_characteristic_two_field)
is_char_two = 1;
- tx = BN_CTX_get(ctx);
- ty = BN_CTX_get(ctx);
-#ifndef OPENSSL_NO_EC2M
if (is_char_two) {
if (!EC_POINT_set_affine_coordinates_GF2m(key->group, point,
x, y, ctx))
diff --git a/crypto/ecdsa/ecdsa.h b/crypto/ecdsa/ecdsa.h
index c4016ac3e19b..a6f0930f829c 100644
--- a/crypto/ecdsa/ecdsa.h
+++ b/crypto/ecdsa/ecdsa.h
@@ -233,7 +233,7 @@ void *ECDSA_get_ex_data(EC_KEY *d, int idx);
* \return pointer to a ECDSA_METHOD structure or NULL if an error occurred
*/
-ECDSA_METHOD *ECDSA_METHOD_new(ECDSA_METHOD *ecdsa_method);
+ECDSA_METHOD *ECDSA_METHOD_new(const ECDSA_METHOD *ecdsa_method);
/** frees a ECDSA_METHOD structure
* \param ecdsa_method pointer to the ECDSA_METHOD structure
diff --git a/crypto/ecdsa/ecs_lib.c b/crypto/ecdsa/ecs_lib.c
index 1c0231031850..8dc1dda46259 100644
--- a/crypto/ecdsa/ecs_lib.c
+++ b/crypto/ecdsa/ecs_lib.c
@@ -276,7 +276,7 @@ void *ECDSA_get_ex_data(EC_KEY *d, int idx)
return (CRYPTO_get_ex_data(&ecdsa->ex_data, idx));
}
-ECDSA_METHOD *ECDSA_METHOD_new(ECDSA_METHOD *ecdsa_meth)
+ECDSA_METHOD *ECDSA_METHOD_new(const ECDSA_METHOD *ecdsa_meth)
{
ECDSA_METHOD *ret;
diff --git a/crypto/engine/eng_cryptodev.c b/crypto/engine/eng_cryptodev.c
index 926d95c0d7fc..8fb9c3373dd6 100644
--- a/crypto/engine/eng_cryptodev.c
+++ b/crypto/engine/eng_cryptodev.c
@@ -1292,15 +1292,18 @@ static DSA_SIG *cryptodev_dsa_do_sign(const unsigned char *dgst, int dlen,
if (cryptodev_asym(&kop, BN_num_bytes(dsa->q), r,
BN_num_bytes(dsa->q), s) == 0) {
dsaret = DSA_SIG_new();
+ if (dsaret == NULL)
+ goto err;
dsaret->r = r;
dsaret->s = s;
+ r = s = NULL;
} else {
const DSA_METHOD *meth = DSA_OpenSSL();
- BN_free(r);
- BN_free(s);
dsaret = (meth->dsa_do_sign) (dgst, dlen, dsa);
}
err:
+ BN_free(r);
+ BN_free(s);
kop.crk_param[0].crp_p = NULL;
zapparams(&kop);
return (dsaret);
diff --git a/crypto/engine/eng_list.c b/crypto/engine/eng_list.c
index 3384e3182893..83c95d56f466 100644
--- a/crypto/engine/eng_list.c
+++ b/crypto/engine/eng_list.c
@@ -260,6 +260,7 @@ int ENGINE_add(ENGINE *e)
}
if ((e->id == NULL) || (e->name == NULL)) {
ENGINEerr(ENGINE_F_ENGINE_ADD, ENGINE_R_ID_OR_NAME_MISSING);
+ return 0;
}
CRYPTO_w_lock(CRYPTO_LOCK_ENGINE);
if (!engine_list_add(e)) {
diff --git a/crypto/evp/e_aes_cbc_hmac_sha256.c b/crypto/evp/e_aes_cbc_hmac_sha256.c
index b1c586e6fd96..37800213c764 100644
--- a/crypto/evp/e_aes_cbc_hmac_sha256.c
+++ b/crypto/evp/e_aes_cbc_hmac_sha256.c
@@ -498,7 +498,18 @@ static int aesni_cbc_hmac_sha256_cipher(EVP_CIPHER_CTX *ctx,
iv = AES_BLOCK_SIZE;
# if defined(STITCHED_CALL)
+ /*
+ * Assembly stitch handles AVX-capable processors, but its
+ * performance is not optimal on AMD Jaguar, ~40% worse, for
+ * unknown reasons. Incidentally processor in question supports
+ * AVX, but not AMD-specific XOP extension, which can be used
+ * to identify it and avoid stitch invocation. So that after we
+ * establish that current CPU supports AVX, we even see if it's
+ * either even XOP-capable Bulldozer-based or GenuineIntel one.
+ */
if (OPENSSL_ia32cap_P[1] & (1 << (60 - 32)) && /* AVX? */
+ ((OPENSSL_ia32cap_P[1] & (1 << (43 - 32))) /* XOP? */
+ | (OPENSSL_ia32cap_P[0] & (1<<30))) && /* "Intel CPU"? */
plen > (sha_off + iv) &&
(blocks = (plen - (sha_off + iv)) / SHA256_CBLOCK)) {
SHA256_Update(&key->md, in + iv, sha_off);
@@ -816,8 +827,6 @@ static int aesni_cbc_hmac_sha256_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg,
if (arg != EVP_AEAD_TLS1_AAD_LEN)
return -1;
- len = p[arg - 2] << 8 | p[arg - 1];
-
if (ctx->encrypt) {
key->payload_length = len;
if ((key->aux.tls_ver =
diff --git a/crypto/evp/e_des3.c b/crypto/evp/e_des3.c
index 96f272eb8046..bf6c1d2d3d39 100644
--- a/crypto/evp/e_des3.c
+++ b/crypto/evp/e_des3.c
@@ -289,7 +289,7 @@ static int des_ede_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
# endif
# ifdef EVP_CHECK_DES_KEY
if (DES_set_key_checked(&deskey[0], &dat->ks1)
- ! !DES_set_key_checked(&deskey[1], &dat->ks2))
+ || DES_set_key_checked(&deskey[1], &dat->ks2))
return 0;
# else
DES_set_key_unchecked(&deskey[0], &dat->ks1);
diff --git a/crypto/evp/encode.c b/crypto/evp/encode.c
index c361d1f01269..c6abc4ae8e47 100644
--- a/crypto/evp/encode.c
+++ b/crypto/evp/encode.c
@@ -60,9 +60,9 @@
#include "cryptlib.h"
#include <openssl/evp.h>
+static unsigned char conv_ascii2bin(unsigned char a);
#ifndef CHARSET_EBCDIC
# define conv_bin2ascii(a) (data_bin2ascii[(a)&0x3f])
-# define conv_ascii2bin(a) (data_ascii2bin[(a)&0x7f])
#else
/*
* We assume that PEM encoded files are EBCDIC files (i.e., printable text
@@ -71,7 +71,6 @@
* as the underlying textstring data_bin2ascii[] is already EBCDIC)
*/
# define conv_bin2ascii(a) (data_bin2ascii[(a)&0x3f])
-# define conv_ascii2bin(a) (data_ascii2bin[os_toascii[a]&0x7f])
#endif
/*-
@@ -103,6 +102,7 @@ abcdefghijklmnopqrstuvwxyz0123456789+/";
#define B64_WS 0xE0
#define B64_ERROR 0xFF
#define B64_NOT_BASE64(a) (((a)|0x13) == 0xF3)
+#define B64_BASE64(a) !B64_NOT_BASE64(a)
static const unsigned char data_ascii2bin[128] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -123,6 +123,23 @@ static const unsigned char data_ascii2bin[128] = {
0x31, 0x32, 0x33, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
};
+#ifndef CHARSET_EBCDIC
+static unsigned char conv_ascii2bin(unsigned char a)
+{
+ if (a & 0x80)
+ return B64_ERROR;
+ return data_ascii2bin[a];
+}
+#else
+static unsigned char conv_ascii2bin(unsigned char a)
+{
+ a = os_toascii[a];
+ if (a & 0x80)
+ return B64_ERROR;
+ return data_ascii2bin[a];
+}
+#endif
+
void EVP_EncodeInit(EVP_ENCODE_CTX *ctx)
{
ctx->length = 48;
@@ -218,8 +235,9 @@ int EVP_EncodeBlock(unsigned char *t, const unsigned char *f, int dlen)
void EVP_DecodeInit(EVP_ENCODE_CTX *ctx)
{
- ctx->length = 30;
+ /* Only ctx->num is used during decoding. */
ctx->num = 0;
+ ctx->length = 0;
ctx->line_num = 0;
ctx->expect_nl = 0;
}
@@ -228,139 +246,123 @@ void EVP_DecodeInit(EVP_ENCODE_CTX *ctx)
* -1 for error
* 0 for last line
* 1 for full line
+ *
+ * Note: even though EVP_DecodeUpdate attempts to detect and report end of
+ * content, the context doesn't currently remember it and will accept more data
+ * in the next call. Therefore, the caller is responsible for checking and
+ * rejecting a 0 return value in the middle of content.
+ *
+ * Note: even though EVP_DecodeUpdate has historically tried to detect end of
+ * content based on line length, this has never worked properly. Therefore,
+ * we now return 0 when one of the following is true:
+ * - Padding or B64_EOF was detected and the last block is complete.
+ * - Input has zero-length.
+ * -1 is returned if:
+ * - Invalid characters are detected.
+ * - There is extra trailing padding, or data after padding.
+ * - B64_EOF is detected after an incomplete base64 block.
*/
int EVP_DecodeUpdate(EVP_ENCODE_CTX *ctx, unsigned char *out, int *outl,
const unsigned char *in, int inl)
{
- int seof = -1, eof = 0, rv = -1, ret = 0, i, v, tmp, n, ln, exp_nl;
+ int seof = 0, eof = 0, rv = -1, ret = 0, i, v, tmp, n, decoded_len;
unsigned char *d;
n = ctx->num;
d = ctx->enc_data;
- ln = ctx->line_num;
- exp_nl = ctx->expect_nl;
- /* last line of input. */
- if ((inl == 0) || ((n == 0) && (conv_ascii2bin(in[0]) == B64_EOF))) {
+ if (n > 0 && d[n - 1] == '=') {
+ eof++;
+ if (n > 1 && d[n - 2] == '=')
+ eof++;
+ }
+
+ /* Legacy behaviour: an empty input chunk signals end of input. */
+ if (inl == 0) {
rv = 0;
goto end;
}
- /* We parse the input data */
for (i = 0; i < inl; i++) {
- /* If the current line is > 80 characters, scream a lot */
- if (ln >= 80) {
- rv = -1;
- goto end;
- }
-
- /* Get char and put it into the buffer */
tmp = *(in++);
v = conv_ascii2bin(tmp);
- /* only save the good data :-) */
- if (!B64_NOT_BASE64(v)) {
- OPENSSL_assert(n < (int)sizeof(ctx->enc_data));
- d[n++] = tmp;
- ln++;
- } else if (v == B64_ERROR) {
+ if (v == B64_ERROR) {
rv = -1;
goto end;
}
- /*
- * have we seen a '=' which is 'definitly' the last input line. seof
- * will point to the character that holds it. and eof will hold how
- * many characters to chop off.
- */
if (tmp == '=') {
- if (seof == -1)
- seof = n;
eof++;
+ } else if (eof > 0 && B64_BASE64(v)) {
+ /* More data after padding. */
+ rv = -1;
+ goto end;
}
- if (v == B64_CR) {
- ln = 0;
- if (exp_nl)
- continue;
+ if (eof > 2) {
+ rv = -1;
+ goto end;
}
- /* eoln */
- if (v == B64_EOLN) {
- ln = 0;
- if (exp_nl) {
- exp_nl = 0;
- continue;
- }
- }
- exp_nl = 0;
-
- /*
- * If we are at the end of input and it looks like a line, process
- * it.
- */
- if (((i + 1) == inl) && (((n & 3) == 0) || eof)) {
- v = B64_EOF;
- /*
- * In case things were given us in really small records (so two
- * '=' were given in separate updates), eof may contain the
- * incorrect number of ending bytes to skip, so let's redo the
- * count
- */
- eof = 0;
- if (d[n - 1] == '=')
- eof++;
- if (d[n - 2] == '=')
- eof++;
- /* There will never be more than two '=' */
+ if (v == B64_EOF) {
+ seof = 1;
+ goto tail;
}
- if ((v == B64_EOF && (n & 3) == 0) || (n >= 64)) {
- /*
- * This is needed to work correctly on 64 byte input lines. We
- * process the line and then need to accept the '\n'
- */
- if ((v != B64_EOF) && (n >= 64))
- exp_nl = 1;
- if (n > 0) {
- v = EVP_DecodeBlock(out, d, n);
- n = 0;
- if (v < 0) {
- rv = 0;
- goto end;
- }
- if (eof > v) {
- rv = -1;
- goto end;
- }
- ret += (v - eof);
- } else {
- eof = 1;
- v = 0;
+ /* Only save valid base64 characters. */
+ if (B64_BASE64(v)) {
+ if (n >= 64) {
+ /*
+ * We increment n once per loop, and empty the buffer as soon as
+ * we reach 64 characters, so this can only happen if someone's
+ * manually messed with the ctx. Refuse to write any more data.
+ */
+ rv = -1;
+ goto end;
}
+ OPENSSL_assert(n < (int)sizeof(ctx->enc_data));
+ d[n++] = tmp;
+ }
- /*
- * This is the case where we have had a short but valid input
- * line
- */
- if ((v < ctx->length) && eof) {
- rv = 0;
+ if (n == 64) {
+ decoded_len = EVP_DecodeBlock(out, d, n);
+ n = 0;
+ if (decoded_len < 0 || eof > decoded_len) {
+ rv = -1;
goto end;
- } else
- ctx->length = v;
+ }
+ ret += decoded_len - eof;
+ out += decoded_len - eof;
+ }
+ }
- if (seof >= 0) {
- rv = 0;
+ /*
+ * Legacy behaviour: if the current line is a full base64-block (i.e., has
+ * 0 mod 4 base64 characters), it is processed immediately. We keep this
+ * behaviour as applications may not be calling EVP_DecodeFinal properly.
+ */
+tail:
+ if (n > 0) {
+ if ((n & 3) == 0) {
+ decoded_len = EVP_DecodeBlock(out, d, n);
+ n = 0;
+ if (decoded_len < 0 || eof > decoded_len) {
+ rv = -1;
goto end;
}
- out += v;
+ ret += (decoded_len - eof);
+ } else if (seof) {
+ /* EOF in the middle of a base64 block. */
+ rv = -1;
+ goto end;
}
}
- rv = 1;
- end:
+
+ rv = seof || (n == 0 && eof) ? 0 : 1;
+end:
+ /* Legacy behaviour. This should probably rather be zeroed on error. */
*outl = ret;
ctx->num = n;
- ctx->line_num = ln;
- ctx->expect_nl = exp_nl;
return (rv);
}
diff --git a/crypto/evp/evp_key.c b/crypto/evp/evp_key.c
index 71fa627b20d3..5be9e336f9e7 100644
--- a/crypto/evp/evp_key.c
+++ b/crypto/evp/evp_key.c
@@ -104,6 +104,8 @@ int EVP_read_pw_string_min(char *buf, int min, int len, const char *prompt,
if ((prompt == NULL) && (prompt_string[0] != '\0'))
prompt = prompt_string;
ui = UI_new();
+ if (ui == NULL)
+ return -1;
UI_add_input_string(ui, prompt, 0, buf, min,
(len >= BUFSIZ) ? BUFSIZ - 1 : len);
if (verify)
@@ -137,7 +139,7 @@ int EVP_BytesToKey(const EVP_CIPHER *type, const EVP_MD *md,
EVP_MD_CTX_init(&c);
for (;;) {
if (!EVP_DigestInit_ex(&c, md, NULL))
- return 0;
+ goto err;
if (addmd++)
if (!EVP_DigestUpdate(&c, &(md_buf[0]), mds))
goto err;
@@ -188,6 +190,6 @@ int EVP_BytesToKey(const EVP_CIPHER *type, const EVP_MD *md,
rv = type->key_len;
err:
EVP_MD_CTX_cleanup(&c);
- OPENSSL_cleanse(&(md_buf[0]), EVP_MAX_MD_SIZE);
+ OPENSSL_cleanse(md_buf, sizeof(md_buf));
return rv;
}
diff --git a/crypto/evp/evp_lib.c b/crypto/evp/evp_lib.c
index a53a27ca0c92..7e0bab90d49a 100644
--- a/crypto/evp/evp_lib.c
+++ b/crypto/evp/evp_lib.c
@@ -72,11 +72,22 @@ int EVP_CIPHER_param_to_asn1(EVP_CIPHER_CTX *c, ASN1_TYPE *type)
if (c->cipher->set_asn1_parameters != NULL)
ret = c->cipher->set_asn1_parameters(c, type);
else if (c->cipher->flags & EVP_CIPH_FLAG_DEFAULT_ASN1) {
- if (EVP_CIPHER_CTX_mode(c) == EVP_CIPH_WRAP_MODE) {
- ASN1_TYPE_set(type, V_ASN1_NULL, NULL);
+ switch (EVP_CIPHER_CTX_mode(c)) {
+ case EVP_CIPH_WRAP_MODE:
+ if (EVP_CIPHER_CTX_nid(c) == NID_id_smime_alg_CMS3DESwrap)
+ ASN1_TYPE_set(type, V_ASN1_NULL, NULL);
ret = 1;
- } else
+ break;
+
+ case EVP_CIPH_GCM_MODE:
+ case EVP_CIPH_CCM_MODE:
+ case EVP_CIPH_XTS_MODE:
+ ret = -1;
+ break;
+
+ default:
ret = EVP_CIPHER_set_asn1_iv(c, type);
+ }
} else
ret = -1;
return (ret);
@@ -89,9 +100,22 @@ int EVP_CIPHER_asn1_to_param(EVP_CIPHER_CTX *c, ASN1_TYPE *type)
if (c->cipher->get_asn1_parameters != NULL)
ret = c->cipher->get_asn1_parameters(c, type);
else if (c->cipher->flags & EVP_CIPH_FLAG_DEFAULT_ASN1) {
- if (EVP_CIPHER_CTX_mode(c) == EVP_CIPH_WRAP_MODE)
- return 1;
- ret = EVP_CIPHER_get_asn1_iv(c, type);
+ switch (EVP_CIPHER_CTX_mode(c)) {
+
+ case EVP_CIPH_WRAP_MODE:
+ ret = 1;
+ break;
+
+ case EVP_CIPH_GCM_MODE:
+ case EVP_CIPH_CCM_MODE:
+ case EVP_CIPH_XTS_MODE:
+ ret = -1;
+ break;
+
+ default:
+ ret = EVP_CIPHER_get_asn1_iv(c, type);
+ break;
+ }
} else
ret = -1;
return (ret);
diff --git a/crypto/evp/evp_pbe.c b/crypto/evp/evp_pbe.c
index e3fa95db9289..7934c95fad0c 100644
--- a/crypto/evp/evp_pbe.c
+++ b/crypto/evp/evp_pbe.c
@@ -228,12 +228,16 @@ int EVP_PBE_alg_add_type(int pbe_type, int pbe_nid, int cipher_nid,
int md_nid, EVP_PBE_KEYGEN *keygen)
{
EVP_PBE_CTL *pbe_tmp;
- if (!pbe_algs)
+
+ if (pbe_algs == NULL) {
pbe_algs = sk_EVP_PBE_CTL_new(pbe_cmp);
- if (!(pbe_tmp = (EVP_PBE_CTL *)OPENSSL_malloc(sizeof(EVP_PBE_CTL)))) {
- EVPerr(EVP_F_EVP_PBE_ALG_ADD_TYPE, ERR_R_MALLOC_FAILURE);
- return 0;
+ if (pbe_algs == NULL)
+ goto err;
}
+
+ if ((pbe_tmp = OPENSSL_malloc(sizeof(*pbe_tmp))) == NULL)
+ goto err;
+
pbe_tmp->pbe_type = pbe_type;
pbe_tmp->pbe_nid = pbe_nid;
pbe_tmp->cipher_nid = cipher_nid;
@@ -242,6 +246,10 @@ int EVP_PBE_alg_add_type(int pbe_type, int pbe_nid, int cipher_nid,
sk_EVP_PBE_CTL_push(pbe_algs, pbe_tmp);
return 1;
+
+ err:
+ EVPerr(EVP_F_EVP_PBE_ALG_ADD_TYPE, ERR_R_MALLOC_FAILURE);
+ return 0;
}
int EVP_PBE_alg_add(int nid, const EVP_CIPHER *cipher, const EVP_MD *md,
diff --git a/crypto/evp/p_lib.c b/crypto/evp/p_lib.c
index 1171d3086d0b..c0171244d5d0 100644
--- a/crypto/evp/p_lib.c
+++ b/crypto/evp/p_lib.c
@@ -253,7 +253,7 @@ int EVP_PKEY_set_type_str(EVP_PKEY *pkey, const char *str, int len)
int EVP_PKEY_assign(EVP_PKEY *pkey, int type, void *key)
{
- if (!EVP_PKEY_set_type(pkey, type))
+ if (pkey == NULL || !EVP_PKEY_set_type(pkey, type))
return 0;
pkey->pkey.ptr = key;
return (key != NULL);
diff --git a/crypto/evp/pmeth_gn.c b/crypto/evp/pmeth_gn.c
index 59f81342e94d..6435f1b632cf 100644
--- a/crypto/evp/pmeth_gn.c
+++ b/crypto/evp/pmeth_gn.c
@@ -96,12 +96,17 @@ int EVP_PKEY_paramgen(EVP_PKEY_CTX *ctx, EVP_PKEY **ppkey)
return -1;
}
- if (!ppkey)
+ if (ppkey == NULL)
return -1;
- if (!*ppkey)
+ if (*ppkey == NULL)
*ppkey = EVP_PKEY_new();
+ if (*ppkey == NULL) {
+ EVPerr(EVP_F_EVP_PKEY_PARAMGEN, ERR_R_MALLOC_FAILURE);
+ return -1;
+ }
+
ret = ctx->pmeth->paramgen(ctx, *ppkey);
if (ret <= 0) {
EVP_PKEY_free(*ppkey);
diff --git a/crypto/hmac/hm_ameth.c b/crypto/hmac/hm_ameth.c
index 29b2b5dffcf7..944c6c857b17 100644
--- a/crypto/hmac/hm_ameth.c
+++ b/crypto/hmac/hm_ameth.c
@@ -108,9 +108,14 @@ static int old_hmac_decode(EVP_PKEY *pkey,
ASN1_OCTET_STRING *os;
os = ASN1_OCTET_STRING_new();
if (!os || !ASN1_OCTET_STRING_set(os, *pder, derlen))
- return 0;
- EVP_PKEY_assign(pkey, EVP_PKEY_HMAC, os);
+ goto err;
+ if (!EVP_PKEY_assign(pkey, EVP_PKEY_HMAC, os))
+ goto err;
return 1;
+
+ err:
+ ASN1_OCTET_STRING_free(os);
+ return 0;
}
static int old_hmac_encode(const EVP_PKEY *pkey, unsigned char **pder)
diff --git a/crypto/jpake/jpake.c b/crypto/jpake/jpake.c
index 8c38727e20fd..ebc09755756d 100644
--- a/crypto/jpake/jpake.c
+++ b/crypto/jpake/jpake.c
@@ -219,6 +219,9 @@ static int verify_zkp(const JPAKE_STEP_PART *p, const BIGNUM *zkpg,
BIGNUM *t3 = BN_new();
int ret = 0;
+ if (h == NULL || t1 == NULL || t2 == NULL || t3 == NULL)
+ goto end;
+
zkp_hash(h, zkpg, p, ctx->p.peer_name);
/* t1 = g^b */
@@ -234,6 +237,7 @@ static int verify_zkp(const JPAKE_STEP_PART *p, const BIGNUM *zkpg,
else
JPAKEerr(JPAKE_F_VERIFY_ZKP, JPAKE_R_ZKP_VERIFY_FAILED);
+end:
/* cleanup */
BN_free(t3);
BN_free(t2);
diff --git a/crypto/mem_clr.c b/crypto/mem_clr.c
index 3df1f3928d06..1a06636d0ce8 100644
--- a/crypto/mem_clr.c
+++ b/crypto/mem_clr.c
@@ -66,6 +66,10 @@ void OPENSSL_cleanse(void *ptr, size_t len)
{
unsigned char *p = ptr;
size_t loop = len, ctr = cleanse_ctr;
+
+ if (ptr == NULL)
+ return;
+
while (loop--) {
*(p++) = (unsigned char)ctr;
ctr += (17 + ((size_t)p & 0xF));
diff --git a/crypto/modes/asm/aesni-gcm-x86_64.pl b/crypto/modes/asm/aesni-gcm-x86_64.pl
index 7e4e04ea2530..4be25571ea28 100755
--- a/crypto/modes/asm/aesni-gcm-x86_64.pl
+++ b/crypto/modes/asm/aesni-gcm-x86_64.pl
@@ -56,7 +56,7 @@ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
$avx = ($1>=10) + ($1>=11);
}
-if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9]\.[0-9]+)/) {
$avx = ($2>=3.0) + ($2>3.0);
}
diff --git a/crypto/modes/asm/ghash-armv4.pl b/crypto/modes/asm/ghash-armv4.pl
index 77fbf34465db..8ccc963ef297 100755
--- a/crypto/modes/asm/ghash-armv4.pl
+++ b/crypto/modes/asm/ghash-armv4.pl
@@ -45,7 +45,7 @@
# processes one byte in 8.45 cycles, A9 - in 10.2, Snapdragon S4 -
# in 9.33.
#
-# Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software
+# Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software
# Polynomial Multiplication on ARM Processors using the NEON Engine.
#
# http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf
@@ -126,6 +126,11 @@ $code=<<___;
.text
.code 32
+#ifdef __clang__
+#define ldrplb ldrbpl
+#define ldrneb ldrbne
+#endif
+
.type rem_4bit,%object
.align 5
rem_4bit:
@@ -432,12 +437,12 @@ gcm_ghash_neon:
veor $IN,$Xl @ inp^=Xi
.Lgmult_neon:
___
- &clmul64x64 ($Xl,$Hlo,"$IN#lo"); # H.lo·Xi.lo
+ &clmul64x64 ($Xl,$Hlo,"$IN#lo"); # H.lo·Xi.lo
$code.=<<___;
veor $IN#lo,$IN#lo,$IN#hi @ Karatsuba pre-processing
___
- &clmul64x64 ($Xm,$Hhl,"$IN#lo"); # (H.lo+H.hi)·(Xi.lo+Xi.hi)
- &clmul64x64 ($Xh,$Hhi,"$IN#hi"); # H.hi·Xi.hi
+ &clmul64x64 ($Xm,$Hhl,"$IN#lo"); # (H.lo+H.hi)·(Xi.lo+Xi.hi)
+ &clmul64x64 ($Xh,$Hhi,"$IN#hi"); # H.hi·Xi.hi
$code.=<<___;
veor $Xm,$Xm,$Xl @ Karatsuba post-processing
veor $Xm,$Xm,$Xh
diff --git a/crypto/modes/asm/ghash-sparcv9.pl b/crypto/modes/asm/ghash-sparcv9.pl
index 0365e0f1ff42..5bc28702019a 100755
--- a/crypto/modes/asm/ghash-sparcv9.pl
+++ b/crypto/modes/asm/ghash-sparcv9.pl
@@ -379,7 +379,7 @@ gcm_init_vis3:
or $V,%lo(0xA0406080),$V
or %l0,%lo(0x20C0E000),%l0
sllx $V,32,$V
- or %l0,$V,$V ! (0xE0·i)&0xff=0xA040608020C0E000
+ or %l0,$V,$V ! (0xE0·i)&0xff=0xA040608020C0E000
stx $V,[%i0+16]
ret
@@ -399,7 +399,7 @@ gcm_gmult_vis3:
mov 0xE1,%l7
sllx %l7,57,$xE1 ! 57 is not a typo
- ldx [$Htable+16],$V ! (0xE0·i)&0xff=0xA040608020C0E000
+ ldx [$Htable+16],$V ! (0xE0·i)&0xff=0xA040608020C0E000
xor $Hhi,$Hlo,$Hhl ! Karatsuba pre-processing
xmulx $Xlo,$Hlo,$C0
@@ -411,9 +411,9 @@ gcm_gmult_vis3:
xmulx $Xhi,$Hhi,$Xhi
sll $C0,3,$sqr
- srlx $V,$sqr,$sqr ! ·0xE0 [implicit &(7<<3)]
+ srlx $V,$sqr,$sqr ! ·0xE0 [implicit &(7<<3)]
xor $C0,$sqr,$sqr
- sllx $sqr,57,$sqr ! ($C0·0xE1)<<1<<56 [implicit &0x7f]
+ sllx $sqr,57,$sqr ! ($C0·0xE1)<<1<<56 [implicit &0x7f]
xor $C0,$C1,$C1 ! Karatsuba post-processing
xor $Xlo,$C2,$C2
@@ -423,7 +423,7 @@ gcm_gmult_vis3:
xor $Xhi,$C2,$C2
xor $Xhi,$C1,$C1
- xmulxhi $C0,$xE1,$Xlo ! ·0xE1<<1<<56
+ xmulxhi $C0,$xE1,$Xlo ! ·0xE1<<1<<56
xor $C0,$C2,$C2
xmulx $C1,$xE1,$C0
xor $C1,$C3,$C3
@@ -453,7 +453,7 @@ gcm_ghash_vis3:
mov 0xE1,%l7
sllx %l7,57,$xE1 ! 57 is not a typo
- ldx [$Htable+16],$V ! (0xE0·i)&0xff=0xA040608020C0E000
+ ldx [$Htable+16],$V ! (0xE0·i)&0xff=0xA040608020C0E000
and $inp,7,$shl
andn $inp,7,$inp
@@ -490,9 +490,9 @@ gcm_ghash_vis3:
xmulx $Xhi,$Hhi,$Xhi
sll $C0,3,$sqr
- srlx $V,$sqr,$sqr ! ·0xE0 [implicit &(7<<3)]
+ srlx $V,$sqr,$sqr ! ·0xE0 [implicit &(7<<3)]
xor $C0,$sqr,$sqr
- sllx $sqr,57,$sqr ! ($C0·0xE1)<<1<<56 [implicit &0x7f]
+ sllx $sqr,57,$sqr ! ($C0·0xE1)<<1<<56 [implicit &0x7f]
xor $C0,$C1,$C1 ! Karatsuba post-processing
xor $Xlo,$C2,$C2
@@ -502,7 +502,7 @@ gcm_ghash_vis3:
xor $Xhi,$C2,$C2
xor $Xhi,$C1,$C1
- xmulxhi $C0,$xE1,$Xlo ! ·0xE1<<1<<56
+ xmulxhi $C0,$xE1,$Xlo ! ·0xE1<<1<<56
xor $C0,$C2,$C2
xmulx $C1,$xE1,$C0
xor $C1,$C3,$C3
diff --git a/crypto/modes/asm/ghash-x86.pl b/crypto/modes/asm/ghash-x86.pl
index 23a5527b30af..0269169fa743 100755
--- a/crypto/modes/asm/ghash-x86.pl
+++ b/crypto/modes/asm/ghash-x86.pl
@@ -358,7 +358,7 @@ $S=12; # shift factor for rem_4bit
# effective address calculation and finally merge of value to Z.hi.
# Reference to rem_4bit is scheduled so late that I had to >>4
# rem_4bit elements. This resulted in 20-45% procent improvement
-# on contemporary µ-archs.
+# on contemporary µ-archs.
{
my $cnt;
my $rem_4bit = "eax";
diff --git a/crypto/modes/asm/ghash-x86_64.pl b/crypto/modes/asm/ghash-x86_64.pl
index 6e656ca13b80..0bcb6d4e028b 100755
--- a/crypto/modes/asm/ghash-x86_64.pl
+++ b/crypto/modes/asm/ghash-x86_64.pl
@@ -105,7 +105,7 @@ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
$avx = ($1>=10) + ($1>=11);
}
-if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9]\.[0-9]+)/) {
$avx = ($2>=3.0) + ($2>3.0);
}
@@ -576,15 +576,15 @@ $code.=<<___ if (0 || (&reduction_alg9($Xhi,$Xi)&&0));
# experimental alternative. special thing about is that there
# no dependency between the two multiplications...
mov \$`0xE1<<1`,%eax
- mov \$0xA040608020C0E000,%r10 # ((7..0)·0xE0)&0xff
+ mov \$0xA040608020C0E000,%r10 # ((7..0)·0xE0)&0xff
mov \$0x07,%r11d
movq %rax,$T1
movq %r10,$T2
movq %r11,$T3 # borrow $T3
pand $Xi,$T3
- pshufb $T3,$T2 # ($Xi&7)·0xE0
+ pshufb $T3,$T2 # ($Xi&7)·0xE0
movq %rax,$T3
- pclmulqdq \$0x00,$Xi,$T1 # ·(0xE1<<1)
+ pclmulqdq \$0x00,$Xi,$T1 # ·(0xE1<<1)
pxor $Xi,$T2
pslldq \$15,$T2
paddd $T2,$T2 # <<(64+56+1)
@@ -657,7 +657,7 @@ $code.=<<___;
je .Lskip4x
sub \$0x30,$len
- mov \$0xA040608020C0E000,%rax # ((7..0)·0xE0)&0xff
+ mov \$0xA040608020C0E000,%rax # ((7..0)·0xE0)&0xff
movdqu 0x30($Htbl),$Hkey3
movdqu 0x40($Htbl),$Hkey4
diff --git a/crypto/modes/asm/ghashp8-ppc.pl b/crypto/modes/asm/ghashp8-ppc.pl
index e76a58c343c1..71457cf4fc59 100755
--- a/crypto/modes/asm/ghashp8-ppc.pl
+++ b/crypto/modes/asm/ghashp8-ppc.pl
@@ -118,9 +118,9 @@ $code=<<___;
le?vperm $IN,$IN,$IN,$lemask
vxor $zero,$zero,$zero
- vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
- vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
- vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
+ vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
+ vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
+ vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
vpmsumd $t2,$Xl,$xC2 # 1st phase
@@ -178,11 +178,11 @@ $code=<<___;
.align 5
Loop:
subic $len,$len,16
- vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
+ vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
subfe. r0,r0,r0 # borrow?-1:0
- vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
+ vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
and r0,r0,$len
- vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
+ vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
add $inp,$inp,r0
vpmsumd $t2,$Xl,$xC2 # 1st phase
diff --git a/crypto/modes/asm/ghashv8-armx.pl b/crypto/modes/asm/ghashv8-armx.pl
index 0b9cd7359fba..0886d2180702 100755
--- a/crypto/modes/asm/ghashv8-armx.pl
+++ b/crypto/modes/asm/ghashv8-armx.pl
@@ -135,10 +135,10 @@ gcm_gmult_v8:
#endif
vext.8 $IN,$t1,$t1,#8
- vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo
+ vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo
veor $t1,$t1,$IN @ Karatsuba pre-processing
- vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi
- vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
+ vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi
+ vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
veor $t2,$Xl,$Xh
@@ -226,7 +226,7 @@ $code.=<<___;
#endif
vext.8 $In,$t1,$t1,#8
veor $IN,$IN,$Xl @ I[i]^=Xi
- vpmull.p64 $Xln,$H,$In @ H·Ii+1
+ vpmull.p64 $Xln,$H,$In @ H·Ii+1
veor $t1,$t1,$In @ Karatsuba pre-processing
vpmull2.p64 $Xhn,$H,$In
b .Loop_mod2x_v8
@@ -235,14 +235,14 @@ $code.=<<___;
.Loop_mod2x_v8:
vext.8 $t2,$IN,$IN,#8
subs $len,$len,#32 @ is there more data?
- vpmull.p64 $Xl,$H2,$IN @ H^2.lo·Xi.lo
+ vpmull.p64 $Xl,$H2,$IN @ H^2.lo·Xi.lo
cclr $inc,lo @ is it time to zero $inc?
vpmull.p64 $Xmn,$Hhl,$t1
veor $t2,$t2,$IN @ Karatsuba pre-processing
- vpmull2.p64 $Xh,$H2,$IN @ H^2.hi·Xi.hi
+ vpmull2.p64 $Xh,$H2,$IN @ H^2.hi·Xi.hi
veor $Xl,$Xl,$Xln @ accumulate
- vpmull2.p64 $Xm,$Hhl,$t2 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
+ vpmull2.p64 $Xm,$Hhl,$t2 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
vld1.64 {$t0},[$inp],$inc @ load [rotated] I[i+2]
veor $Xh,$Xh,$Xhn
@@ -267,7 +267,7 @@ $code.=<<___;
vext.8 $In,$t1,$t1,#8
vext.8 $IN,$t0,$t0,#8
veor $Xl,$Xm,$t2
- vpmull.p64 $Xln,$H,$In @ H·Ii+1
+ vpmull.p64 $Xln,$H,$In @ H·Ii+1
veor $IN,$IN,$Xh @ accumulate $IN early
vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
@@ -291,10 +291,10 @@ $code.=<<___;
veor $IN,$IN,$Xl @ inp^=Xi
veor $t1,$t0,$t2 @ $t1 is rotated inp^Xi
- vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo
+ vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo
veor $t1,$t1,$IN @ Karatsuba pre-processing
- vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi
- vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
+ vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi
+ vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
veor $t2,$Xl,$Xh
diff --git a/crypto/modes/wrap128.c b/crypto/modes/wrap128.c
index 4dcaf0326fa8..384978371af2 100644
--- a/crypto/modes/wrap128.c
+++ b/crypto/modes/wrap128.c
@@ -76,7 +76,7 @@ size_t CRYPTO_128_wrap(void *key, const unsigned char *iv,
return 0;
A = B;
t = 1;
- memcpy(out + 8, in, inlen);
+ memmove(out + 8, in, inlen);
if (!iv)
iv = default_iv;
@@ -113,7 +113,7 @@ size_t CRYPTO_128_unwrap(void *key, const unsigned char *iv,
A = B;
t = 6 * (inlen >> 3);
memcpy(A, in, 8);
- memcpy(out, in + 8, inlen);
+ memmove(out, in + 8, inlen);
for (j = 0; j < 6; j++) {
R = out + inlen - 8;
for (i = 0; i < inlen; i += 8, t--, R -= 8) {
diff --git a/crypto/ocsp/ocsp_lib.c b/crypto/ocsp/ocsp_lib.c
index 442a5b63d4ba..cabf53933a44 100755
--- a/crypto/ocsp/ocsp_lib.c
+++ b/crypto/ocsp/ocsp_lib.c
@@ -246,12 +246,6 @@ int OCSP_parse_url(const char *url, char **phost, char **pport, char **ppath,
if ((p = strchr(p, ':'))) {
*p = 0;
port = p + 1;
- } else {
- /* Not found: set default port */
- if (*pssl)
- port = "443";
- else
- port = "80";
}
*pport = BUF_strdup(port);
diff --git a/crypto/ocsp/ocsp_prn.c b/crypto/ocsp/ocsp_prn.c
index 1834256af271..47d5f83ef9b0 100644
--- a/crypto/ocsp/ocsp_prn.c
+++ b/crypto/ocsp/ocsp_prn.c
@@ -212,8 +212,7 @@ int OCSP_RESPONSE_print(BIO *bp, OCSP_RESPONSE *o, unsigned long flags)
return 1;
}
- i = ASN1_STRING_length(rb->response);
- if (!(br = OCSP_response_get1_basic(o)))
+ if ((br = OCSP_response_get1_basic(o)) == NULL)
goto err;
rd = br->tbsResponseData;
l = ASN1_INTEGER_get(rd->version);
diff --git a/crypto/opensslconf.h b/crypto/opensslconf.h
index 15487c9f93be..b4d522e68505 100644
--- a/crypto/opensslconf.h
+++ b/crypto/opensslconf.h
@@ -216,7 +216,7 @@ extern "C" {
optimization options. Older Sparc's work better with only UNROLL, but
there's no way to tell at compile time what it is you're running on */
-#if defined( sun ) /* Newer Sparc's */
+#if defined( __sun ) || defined ( sun ) /* Newer Sparc's */
# define DES_PTR
# define DES_RISC1
# define DES_UNROLL
diff --git a/crypto/opensslconf.h.in b/crypto/opensslconf.h.in
index 814309becb6c..7a1c85d6ec9d 100644
--- a/crypto/opensslconf.h.in
+++ b/crypto/opensslconf.h.in
@@ -120,7 +120,7 @@
optimization options. Older Sparc's work better with only UNROLL, but
there's no way to tell at compile time what it is you're running on */
-#if defined( sun ) /* Newer Sparc's */
+#if defined( __sun ) || defined ( sun ) /* Newer Sparc's */
# define DES_PTR
# define DES_RISC1
# define DES_UNROLL
diff --git a/crypto/opensslv.h b/crypto/opensslv.h
index c06b13ac6b0f..abcef15b17d9 100644
--- a/crypto/opensslv.h
+++ b/crypto/opensslv.h
@@ -30,11 +30,11 @@ extern "C" {
* (Prior to 0.9.5a beta1, a different scheme was used: MMNNFFRBB for
* major minor fix final patch/beta)
*/
-# define OPENSSL_VERSION_NUMBER 0x1000204fL
+# define OPENSSL_VERSION_NUMBER 0x1000205fL
# ifdef OPENSSL_FIPS
-# define OPENSSL_VERSION_TEXT "OpenSSL 1.0.2d-fips 9 Jul 2015"
+# define OPENSSL_VERSION_TEXT "OpenSSL 1.0.2e-fips 3 Dec 2015"
# else
-# define OPENSSL_VERSION_TEXT "OpenSSL 1.0.2d 9 Jul 2015"
+# define OPENSSL_VERSION_TEXT "OpenSSL 1.0.2e 3 Dec 2015"
# endif
# define OPENSSL_VERSION_PTEXT " part of " OPENSSL_VERSION_TEXT
diff --git a/crypto/pem/pem_info.c b/crypto/pem/pem_info.c
index 68747d162586..4d736a1d07e5 100644
--- a/crypto/pem/pem_info.c
+++ b/crypto/pem/pem_info.c
@@ -172,6 +172,8 @@ STACK_OF(X509_INFO) *PEM_X509_INFO_read_bio(BIO *bp, STACK_OF(X509_INFO) *sk,
xi->enc_len = 0;
xi->x_pkey = X509_PKEY_new();
+ if (xi->x_pkey == NULL)
+ goto err;
ptype = EVP_PKEY_RSA;
pp = &xi->x_pkey->dec_pkey;
if ((int)strlen(header) > 10) /* assume encrypted */
@@ -193,6 +195,8 @@ STACK_OF(X509_INFO) *PEM_X509_INFO_read_bio(BIO *bp, STACK_OF(X509_INFO) *sk,
xi->enc_len = 0;
xi->x_pkey = X509_PKEY_new();
+ if (xi->x_pkey == NULL)
+ goto err;
ptype = EVP_PKEY_DSA;
pp = &xi->x_pkey->dec_pkey;
if ((int)strlen(header) > 10) /* assume encrypted */
@@ -214,6 +218,8 @@ STACK_OF(X509_INFO) *PEM_X509_INFO_read_bio(BIO *bp, STACK_OF(X509_INFO) *sk,
xi->enc_len = 0;
xi->x_pkey = X509_PKEY_new();
+ if (xi->x_pkey == NULL)
+ goto err;
ptype = EVP_PKEY_EC;
pp = &xi->x_pkey->dec_pkey;
if ((int)strlen(header) > 10) /* assume encrypted */
diff --git a/crypto/pem/pvkfmt.c b/crypto/pem/pvkfmt.c
index ee4b6a8241cc..82d45273ed16 100644
--- a/crypto/pem/pvkfmt.c
+++ b/crypto/pem/pvkfmt.c
@@ -624,13 +624,11 @@ static int do_PVK_header(const unsigned char **in, unsigned int length,
PEMerr(PEM_F_DO_PVK_HEADER, PEM_R_PVK_TOO_SHORT);
return 0;
}
- length -= 20;
} else {
if (length < 24) {
PEMerr(PEM_F_DO_PVK_HEADER, PEM_R_PVK_TOO_SHORT);
return 0;
}
- length -= 24;
pvk_magic = read_ledword(&p);
if (pvk_magic != MS_PVKMAGIC) {
PEMerr(PEM_F_DO_PVK_HEADER, PEM_R_BAD_MAGIC_NUMBER);
@@ -692,23 +690,23 @@ static EVP_PKEY *do_PVK_body(const unsigned char **in,
inlen = PEM_def_callback(psbuf, PEM_BUFSIZE, 0, u);
if (inlen <= 0) {
PEMerr(PEM_F_DO_PVK_BODY, PEM_R_BAD_PASSWORD_READ);
- return NULL;
+ goto err;
}
enctmp = OPENSSL_malloc(keylen + 8);
if (!enctmp) {
PEMerr(PEM_F_DO_PVK_BODY, ERR_R_MALLOC_FAILURE);
- return NULL;
+ goto err;
}
if (!derive_pvk_key(keybuf, p, saltlen,
(unsigned char *)psbuf, inlen))
- return NULL;
+ goto err;
p += saltlen;
/* Copy BLOBHEADER across, decrypt rest */
memcpy(enctmp, p, 8);
p += 8;
if (keylen < 8) {
PEMerr(PEM_F_DO_PVK_BODY, PEM_R_PVK_TOO_SHORT);
- return NULL;
+ goto err;
}
inlen = keylen - 8;
q = enctmp + 8;
diff --git a/crypto/perlasm/ppc-xlate.pl b/crypto/perlasm/ppc-xlate.pl
index f89e81429931..0f46cf06bcb8 100755
--- a/crypto/perlasm/ppc-xlate.pl
+++ b/crypto/perlasm/ppc-xlate.pl
@@ -151,6 +151,26 @@ my $vmr = sub {
" vor $vx,$vy,$vy";
};
+# Some ABIs specify vrsave, special-purpose register #256, as reserved
+# for system use.
+my $no_vrsave = ($flavour =~ /aix|linux64le/);
+my $mtspr = sub {
+ my ($f,$idx,$ra) = @_;
+ if ($idx == 256 && $no_vrsave) {
+ " or $ra,$ra,$ra";
+ } else {
+ " mtspr $idx,$ra";
+ }
+};
+my $mfspr = sub {
+ my ($f,$rd,$idx) = @_;
+ if ($idx == 256 && $no_vrsave) {
+ " li $rd,-1";
+ } else {
+ " mfspr $rd,$idx";
+ }
+};
+
# PowerISA 2.06 stuff
sub vsxmem_op {
my ($f, $vrt, $ra, $rb, $op) = @_;
diff --git a/crypto/pkcs12/p12_add.c b/crypto/pkcs12/p12_add.c
index 982805d988de..d9f03a39fd15 100644
--- a/crypto/pkcs12/p12_add.c
+++ b/crypto/pkcs12/p12_add.c
@@ -75,15 +75,19 @@ PKCS12_SAFEBAG *PKCS12_item_pack_safebag(void *obj, const ASN1_ITEM *it,
bag->type = OBJ_nid2obj(nid1);
if (!ASN1_item_pack(obj, it, &bag->value.octet)) {
PKCS12err(PKCS12_F_PKCS12_ITEM_PACK_SAFEBAG, ERR_R_MALLOC_FAILURE);
- return NULL;
+ goto err;
}
if (!(safebag = PKCS12_SAFEBAG_new())) {
PKCS12err(PKCS12_F_PKCS12_ITEM_PACK_SAFEBAG, ERR_R_MALLOC_FAILURE);
- return NULL;
+ goto err;
}
safebag->value.bag = bag;
safebag->type = OBJ_nid2obj(nid2);
return safebag;
+
+ err:
+ PKCS12_BAGS_free(bag);
+ return NULL;
}
/* Turn PKCS8 object into a keybag */
@@ -127,6 +131,7 @@ PKCS12_SAFEBAG *PKCS12_MAKE_SHKEYBAG(int pbe_nid, const char *pass,
PKCS8_encrypt(pbe_nid, pbe_ciph, pass, passlen, salt, saltlen, iter,
p8))) {
PKCS12err(PKCS12_F_PKCS12_MAKE_SHKEYBAG, ERR_R_MALLOC_FAILURE);
+ PKCS12_SAFEBAG_free(bag);
return NULL;
}
@@ -144,14 +149,18 @@ PKCS7 *PKCS12_pack_p7data(STACK_OF(PKCS12_SAFEBAG) *sk)
p7->type = OBJ_nid2obj(NID_pkcs7_data);
if (!(p7->d.data = M_ASN1_OCTET_STRING_new())) {
PKCS12err(PKCS12_F_PKCS12_PACK_P7DATA, ERR_R_MALLOC_FAILURE);
- return NULL;
+ goto err;
}
if (!ASN1_item_pack(sk, ASN1_ITEM_rptr(PKCS12_SAFEBAGS), &p7->d.data)) {
PKCS12err(PKCS12_F_PKCS12_PACK_P7DATA, PKCS12_R_CANT_PACK_STRUCTURE);
- return NULL;
+ goto err;
}
return p7;
+
+ err:
+ PKCS7_free(p7);
+ return NULL;
}
/* Unpack SAFEBAGS from PKCS#7 data ContentInfo */
@@ -181,7 +190,7 @@ PKCS7 *PKCS12_pack_p7encdata(int pbe_nid, const char *pass, int passlen,
if (!PKCS7_set_type(p7, NID_pkcs7_encrypted)) {
PKCS12err(PKCS12_F_PKCS12_PACK_P7ENCDATA,
PKCS12_R_ERROR_SETTING_ENCRYPTED_DATA_TYPE);
- return NULL;
+ goto err;
}
pbe_ciph = EVP_get_cipherbynid(pbe_nid);
@@ -193,7 +202,7 @@ PKCS7 *PKCS12_pack_p7encdata(int pbe_nid, const char *pass, int passlen,
if (!pbe) {
PKCS12err(PKCS12_F_PKCS12_PACK_P7ENCDATA, ERR_R_MALLOC_FAILURE);
- return NULL;
+ goto err;
}
X509_ALGOR_free(p7->d.encrypted->enc_data->algorithm);
p7->d.encrypted->enc_data->algorithm = pbe;
@@ -202,10 +211,14 @@ PKCS7 *PKCS12_pack_p7encdata(int pbe_nid, const char *pass, int passlen,
PKCS12_item_i2d_encrypt(pbe, ASN1_ITEM_rptr(PKCS12_SAFEBAGS), pass,
passlen, bags, 1))) {
PKCS12err(PKCS12_F_PKCS12_PACK_P7ENCDATA, PKCS12_R_ENCRYPT_ERROR);
- return NULL;
+ goto err;
}
return p7;
+
+ err:
+ PKCS7_free(p7);
+ return NULL;
}
STACK_OF(PKCS12_SAFEBAG) *PKCS12_unpack_p7encdata(PKCS7 *p7, const char *pass,
diff --git a/crypto/pkcs12/p12_crpt.c b/crypto/pkcs12/p12_crpt.c
index 3a166e613003..9c2dcab02463 100644
--- a/crypto/pkcs12/p12_crpt.c
+++ b/crypto/pkcs12/p12_crpt.c
@@ -77,6 +77,9 @@ int PKCS12_PBE_keyivgen(EVP_CIPHER_CTX *ctx, const char *pass, int passlen,
const unsigned char *pbuf;
unsigned char key[EVP_MAX_KEY_LENGTH], iv[EVP_MAX_IV_LENGTH];
+ if (cipher == NULL)
+ return 0;
+
/* Extract useful info from parameter */
if (param == NULL || param->type != V_ASN1_SEQUENCE ||
param->value.sequence == NULL) {
diff --git a/crypto/pkcs12/p12_mutl.c b/crypto/pkcs12/p12_mutl.c
index 5ab4bf290e14..a9277827ff2d 100644
--- a/crypto/pkcs12/p12_mutl.c
+++ b/crypto/pkcs12/p12_mutl.c
@@ -173,11 +173,11 @@ int PKCS12_setup_mac(PKCS12 *p12, int iter, unsigned char *salt, int saltlen,
}
if (!saltlen)
saltlen = PKCS12_SALT_LEN;
- p12->mac->salt->length = saltlen;
- if (!(p12->mac->salt->data = OPENSSL_malloc(saltlen))) {
+ if ((p12->mac->salt->data = OPENSSL_malloc(saltlen)) == NULL) {
PKCS12err(PKCS12_F_PKCS12_SETUP_MAC, ERR_R_MALLOC_FAILURE);
return 0;
}
+ p12->mac->salt->length = saltlen;
if (!salt) {
if (RAND_pseudo_bytes(p12->mac->salt->data, saltlen) < 0)
return 0;
diff --git a/crypto/pkcs7/pk7_doit.c b/crypto/pkcs7/pk7_doit.c
index c8d7db01bd73..946aaa65435b 100644
--- a/crypto/pkcs7/pk7_doit.c
+++ b/crypto/pkcs7/pk7_doit.c
@@ -656,6 +656,8 @@ BIO *PKCS7_dataDecode(PKCS7 *p7, EVP_PKEY *pkey, BIO *in_bio, X509 *pcert)
bio = BIO_new_mem_buf(data_body->data, data_body->length);
else {
bio = BIO_new(BIO_s_mem());
+ if (bio == NULL)
+ goto err;
BIO_set_mem_eof_return(bio, 0);
}
if (bio == NULL)
@@ -1156,7 +1158,6 @@ PKCS7_ISSUER_AND_SERIAL *PKCS7_get_issuer_and_serial(PKCS7 *p7, int idx)
rsk = p7->d.signed_and_enveloped->recipientinfo;
if (rsk == NULL)
return NULL;
- ri = sk_PKCS7_RECIP_INFO_value(rsk, 0);
if (sk_PKCS7_RECIP_INFO_num(rsk) <= idx)
return (NULL);
ri = sk_PKCS7_RECIP_INFO_value(rsk, idx);
diff --git a/crypto/pkcs7/pk7_smime.c b/crypto/pkcs7/pk7_smime.c
index dbd4100c8d02..c4d3724d2a48 100644
--- a/crypto/pkcs7/pk7_smime.c
+++ b/crypto/pkcs7/pk7_smime.c
@@ -256,8 +256,8 @@ int PKCS7_verify(PKCS7 *p7, STACK_OF(X509) *certs, X509_STORE *store,
X509_STORE_CTX cert_ctx;
char buf[4096];
int i, j = 0, k, ret = 0;
- BIO *p7bio;
- BIO *tmpin, *tmpout;
+ BIO *p7bio = NULL;
+ BIO *tmpin = NULL, *tmpout = NULL;
if (!p7) {
PKCS7err(PKCS7_F_PKCS7_VERIFY, PKCS7_R_INVALID_NULL_POINTER);
@@ -274,18 +274,12 @@ int PKCS7_verify(PKCS7 *p7, STACK_OF(X509) *certs, X509_STORE *store,
PKCS7err(PKCS7_F_PKCS7_VERIFY, PKCS7_R_NO_CONTENT);
return 0;
}
-#if 0
- /*
- * NB: this test commented out because some versions of Netscape
- * illegally include zero length content when signing data.
- */
/* Check for data and content: two sets of data */
if (!PKCS7_get_detached(p7) && indata) {
PKCS7err(PKCS7_F_PKCS7_VERIFY, PKCS7_R_CONTENT_AND_DATA_PRESENT);
return 0;
}
-#endif
sinfos = PKCS7_get_signer_info(p7);
@@ -295,7 +289,6 @@ int PKCS7_verify(PKCS7 *p7, STACK_OF(X509) *certs, X509_STORE *store,
}
signers = PKCS7_get0_signers(p7, certs, flags);
-
if (!signers)
return 0;
@@ -308,14 +301,12 @@ int PKCS7_verify(PKCS7 *p7, STACK_OF(X509) *certs, X509_STORE *store,
if (!X509_STORE_CTX_init(&cert_ctx, store, signer,
p7->d.sign->cert)) {
PKCS7err(PKCS7_F_PKCS7_VERIFY, ERR_R_X509_LIB);
- sk_X509_free(signers);
- return 0;
+ goto err;
}
X509_STORE_CTX_set_default(&cert_ctx, "smime_sign");
} else if (!X509_STORE_CTX_init(&cert_ctx, store, signer, NULL)) {
PKCS7err(PKCS7_F_PKCS7_VERIFY, ERR_R_X509_LIB);
- sk_X509_free(signers);
- return 0;
+ goto err;
}
if (!(flags & PKCS7_NOCRL))
X509_STORE_CTX_set0_crls(&cert_ctx, p7->d.sign->crl);
@@ -328,8 +319,7 @@ int PKCS7_verify(PKCS7 *p7, STACK_OF(X509) *certs, X509_STORE *store,
PKCS7_R_CERTIFICATE_VERIFY_ERROR);
ERR_add_error_data(2, "Verify error:",
X509_verify_cert_error_string(j));
- sk_X509_free(signers);
- return 0;
+ goto err;
}
/* Check for revocation status here */
}
@@ -348,7 +338,7 @@ int PKCS7_verify(PKCS7 *p7, STACK_OF(X509) *certs, X509_STORE *store,
tmpin = BIO_new_mem_buf(ptr, len);
if (tmpin == NULL) {
PKCS7err(PKCS7_F_PKCS7_VERIFY, ERR_R_MALLOC_FAILURE);
- return 0;
+ goto err;
}
} else
tmpin = indata;
@@ -398,15 +388,12 @@ int PKCS7_verify(PKCS7 *p7, STACK_OF(X509) *certs, X509_STORE *store,
ret = 1;
err:
-
if (tmpin == indata) {
if (indata)
BIO_pop(p7bio);
}
BIO_free_all(p7bio);
-
sk_X509_free(signers);
-
return ret;
}
diff --git a/crypto/ppccap.c b/crypto/ppccap.c
index 2b7f704cd82a..74af4732b5fa 100644
--- a/crypto/ppccap.c
+++ b/crypto/ppccap.c
@@ -7,7 +7,7 @@
#if defined(__linux) || defined(_AIX)
# include <sys/utsname.h>
#endif
-#include <crypto.h>
+#include <openssl/crypto.h>
#include <openssl/bn.h>
#include "ppc_arch.h"
diff --git a/crypto/rc4/asm/rc4-x86_64.pl b/crypto/rc4/asm/rc4-x86_64.pl
index 75750dbf334d..20722d3e7246 100755
--- a/crypto/rc4/asm/rc4-x86_64.pl
+++ b/crypto/rc4/asm/rc4-x86_64.pl
@@ -56,7 +56,7 @@
# achieves respectful 432MBps on 2.8GHz processor now. For reference.
# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
# RC4_INT code-path. While if executed on Opteron, it's only 25%
-# slower than the RC4_INT one [meaning that if CPU µ-arch detection
+# slower than the RC4_INT one [meaning that if CPU µ-arch detection
# is not implemented, then this final RC4_CHAR code-path should be
# preferred, as it provides better *all-round* performance].
diff --git a/crypto/rsa/rsa_ameth.c b/crypto/rsa/rsa_ameth.c
index ca3922e6c298..4e0621827cf3 100644
--- a/crypto/rsa/rsa_ameth.c
+++ b/crypto/rsa/rsa_ameth.c
@@ -268,7 +268,7 @@ static X509_ALGOR *rsa_mgf1_decode(X509_ALGOR *alg)
{
const unsigned char *p;
int plen;
- if (alg == NULL)
+ if (alg == NULL || alg->parameter == NULL)
return NULL;
if (OBJ_obj2nid(alg->algorithm) != NID_mgf1)
return NULL;
diff --git a/crypto/rsa/rsa_gen.c b/crypto/rsa/rsa_gen.c
index 2465fbdebf19..7f7dca39fd08 100644
--- a/crypto/rsa/rsa_gen.c
+++ b/crypto/rsa/rsa_gen.c
@@ -69,6 +69,8 @@
#include <openssl/rsa.h>
#ifdef OPENSSL_FIPS
# include <openssl/fips.h>
+extern int FIPS_rsa_x931_generate_key_ex(RSA *rsa, int bits, BIGNUM *e,
+ BN_GENCB *cb);
#endif
static int rsa_builtin_keygen(RSA *rsa, int bits, BIGNUM *e_value,
@@ -94,7 +96,7 @@ int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e_value, BN_GENCB *cb)
return rsa->meth->rsa_keygen(rsa, bits, e_value, cb);
#ifdef OPENSSL_FIPS
if (FIPS_mode())
- return FIPS_rsa_generate_key_ex(rsa, bits, e_value, cb);
+ return FIPS_rsa_x931_generate_key_ex(rsa, bits, e_value, cb);
#endif
return rsa_builtin_keygen(rsa, bits, e_value, cb);
}
diff --git a/crypto/rsa/rsa_sign.c b/crypto/rsa/rsa_sign.c
index 19461c6364d4..82ca8324dfbc 100644
--- a/crypto/rsa/rsa_sign.c
+++ b/crypto/rsa/rsa_sign.c
@@ -218,14 +218,13 @@ int int_rsa_verify(int dtype, const unsigned char *m,
memcpy(rm, s + 2, 16);
*prm_len = 16;
ret = 1;
- } else if (memcmp(m, s + 2, 16))
+ } else if (memcmp(m, s + 2, 16)) {
RSAerr(RSA_F_INT_RSA_VERIFY, RSA_R_BAD_SIGNATURE);
- else
+ } else {
ret = 1;
- }
-
- /* Special case: SSL signature */
- if (dtype == NID_md5_sha1) {
+ }
+ } else if (dtype == NID_md5_sha1) {
+ /* Special case: SSL signature */
if ((i != SSL_SIG_LENGTH) || memcmp(s, m, SSL_SIG_LENGTH))
RSAerr(RSA_F_INT_RSA_VERIFY, RSA_R_BAD_SIGNATURE);
else
diff --git a/crypto/rsa/rsa_test.c b/crypto/rsa/rsa_test.c
index e9712953e993..85c7440b8c68 100644
--- a/crypto/rsa/rsa_test.c
+++ b/crypto/rsa/rsa_test.c
@@ -297,22 +297,30 @@ int main(int argc, char *argv[])
} else
printf("OAEP encryption/decryption ok\n");
- /* Try decrypting corrupted ciphertexts */
+ /* Try decrypting corrupted ciphertexts. */
for (n = 0; n < clen; ++n) {
- int b;
- unsigned char saved = ctext[n];
- for (b = 0; b < 256; ++b) {
- if (b == saved)
- continue;
- ctext[n] = b;
- num = RSA_private_decrypt(num, ctext, ptext, key,
+ ctext[n] ^= 1;
+ num = RSA_private_decrypt(clen, ctext, ptext, key,
RSA_PKCS1_OAEP_PADDING);
- if (num > 0) {
- printf("Corrupt data decrypted!\n");
- err = 1;
- }
+ if (num > 0) {
+ printf("Corrupt data decrypted!\n");
+ err = 1;
+ break;
}
+ ctext[n] ^= 1;
}
+
+ /* Test truncated ciphertexts, as well as negative length. */
+ for (n = -1; n < clen; ++n) {
+ num = RSA_private_decrypt(n, ctext, ptext, key,
+ RSA_PKCS1_OAEP_PADDING);
+ if (num > 0) {
+ printf("Truncated data decrypted!\n");
+ err = 1;
+ break;
+ }
+ }
+
next:
RSA_free(key);
}
diff --git a/crypto/sha/asm/sha1-586.pl b/crypto/sha/asm/sha1-586.pl
index 4895eb3ddf85..e0b5d83b6201 100644
--- a/crypto/sha/asm/sha1-586.pl
+++ b/crypto/sha/asm/sha1-586.pl
@@ -66,9 +66,9 @@
# switch to AVX alone improves performance by as little as 4% in
# comparison to SSSE3 code path. But below result doesn't look like
# 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as
-# pair of µ-ops, and it's the additional µ-ops, two per round, that
+# pair of µ-ops, and it's the additional µ-ops, two per round, that
# make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded
-# as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with
+# as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with
# equivalent 'sh[rl]d' that is responsible for the impressive 5.1
# cycles per processed byte. But 'sh[rl]d' is not something that used
# to be fast, nor does it appear to be fast in upcoming Bulldozer
diff --git a/crypto/sha/asm/sha1-mb-x86_64.pl b/crypto/sha/asm/sha1-mb-x86_64.pl
index a8ee075eaaa0..f856bb888b0e 100755
--- a/crypto/sha/asm/sha1-mb-x86_64.pl
+++ b/crypto/sha/asm/sha1-mb-x86_64.pl
@@ -58,7 +58,7 @@ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
$avx = ($1>=10) + ($1>=11);
}
-if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9]\.[0-9]+)/) {
$avx = ($2>=3.0) + ($2>3.0);
}
diff --git a/crypto/sha/asm/sha1-x86_64.pl b/crypto/sha/asm/sha1-x86_64.pl
index 9bb6b498190f..9a6acc347d33 100755
--- a/crypto/sha/asm/sha1-x86_64.pl
+++ b/crypto/sha/asm/sha1-x86_64.pl
@@ -107,7 +107,7 @@ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
$avx = ($1>=10) + ($1>=11);
}
-if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([2-9]\.[0-9]+)/) {
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([2-9]\.[0-9]+)/) {
$avx = ($2>=3.0) + ($2>3.0);
}
diff --git a/crypto/sha/asm/sha256-586.pl b/crypto/sha/asm/sha256-586.pl
index 6462e45ba75b..e9077143817c 100755
--- a/crypto/sha/asm/sha256-586.pl
+++ b/crypto/sha/asm/sha256-586.pl
@@ -10,7 +10,7 @@
# SHA256 block transform for x86. September 2007.
#
# Performance improvement over compiler generated code varies from
-# 10% to 40% [see below]. Not very impressive on some µ-archs, but
+# 10% to 40% [see below]. Not very impressive on some µ-archs, but
# it's 5 times smaller and optimizies amount of writes.
#
# May 2012.
diff --git a/crypto/sha/asm/sha256-mb-x86_64.pl b/crypto/sha/asm/sha256-mb-x86_64.pl
index adf2ddccd18b..3d37ae31ad3e 100755
--- a/crypto/sha/asm/sha256-mb-x86_64.pl
+++ b/crypto/sha/asm/sha256-mb-x86_64.pl
@@ -59,7 +59,7 @@ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
$avx = ($1>=10) + ($1>=11);
}
-if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9]\.[0-9]+)/) {
$avx = ($2>=3.0) + ($2>3.0);
}
diff --git a/crypto/sha/asm/sha512-586.pl b/crypto/sha/asm/sha512-586.pl
index e96ec00314a4..2f6a202c3765 100755
--- a/crypto/sha/asm/sha512-586.pl
+++ b/crypto/sha/asm/sha512-586.pl
@@ -37,7 +37,7 @@
#
# IALU code-path is optimized for elder Pentiums. On vanilla Pentium
# performance improvement over compiler generated code reaches ~60%,
-# while on PIII - ~35%. On newer µ-archs improvement varies from 15%
+# while on PIII - ~35%. On newer µ-archs improvement varies from 15%
# to 50%, but it's less important as they are expected to execute SSE2
# code-path, which is commonly ~2-3x faster [than compiler generated
# code]. SSE2 code-path is as fast as original sha512-sse2.pl, even
diff --git a/crypto/sha/asm/sha512-parisc.pl b/crypto/sha/asm/sha512-parisc.pl
index fc0e15b3c059..6cad72e25573 100755
--- a/crypto/sha/asm/sha512-parisc.pl
+++ b/crypto/sha/asm/sha512-parisc.pl
@@ -19,7 +19,7 @@
# SHA512 performance is >2.9x better than gcc 3.2 generated code on
# PA-7100LC, PA-RISC 1.1 processor. Then implementation detects if the
# code is executed on PA-RISC 2.0 processor and switches to 64-bit
-# code path delivering adequate peformance even in "blended" 32-bit
+# code path delivering adequate performance even in "blended" 32-bit
# build. Though 64-bit code is not any faster than code generated by
# vendor compiler on PA-8600...
#
diff --git a/crypto/sha/asm/sha512-x86_64.pl b/crypto/sha/asm/sha512-x86_64.pl
index b7b44b441136..58665667f149 100755
--- a/crypto/sha/asm/sha512-x86_64.pl
+++ b/crypto/sha/asm/sha512-x86_64.pl
@@ -124,7 +124,7 @@ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
$avx = ($1>=10) + ($1>=11);
}
-if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9]\.[0-9]+)/) {
$avx = ($2>=3.0) + ($2>3.0);
}
diff --git a/crypto/sparccpuid.S b/crypto/sparccpuid.S
index eea2006fba18..7b12ec293154 100644
--- a/crypto/sparccpuid.S
+++ b/crypto/sparccpuid.S
@@ -123,7 +123,7 @@ OPENSSL_wipe_cpu:
fmovs %f1,%f3
fmovs %f0,%f2
- add %fp,BIAS,%i0 ! return pointer to caller´s top of stack
+ add %fp,BIAS,%i0 ! return pointer to caller´s top of stack
ret
restore
diff --git a/crypto/sparcv9cap.c b/crypto/sparcv9cap.c
index 8bf2846929b1..a36e46179294 100644
--- a/crypto/sparcv9cap.c
+++ b/crypto/sparcv9cap.c
@@ -237,6 +237,17 @@ static void common_handler(int sig)
siglongjmp(common_jmp, sig);
}
+#if defined(__sun) && defined(__SVR4)
+# if defined(__GNUC__) && __GNUC__>=2
+extern unsigned int getisax(unsigned int vec[], unsigned int sz) __attribute__ ((weak));
+# elif defined(__SUNPRO_C)
+#pragma weak getisax
+extern unsigned int getisax(unsigned int vec[], unsigned int sz);
+# else
+static unsigned int (*getisax) (unsigned int vec[], unsigned int sz) = NULL;
+# endif
+#endif
+
void OPENSSL_cpuid_setup(void)
{
char *e;
@@ -255,6 +266,42 @@ void OPENSSL_cpuid_setup(void)
return;
}
+#if defined(__sun) && defined(__SVR4)
+ if (getisax != NULL) {
+ unsigned int vec[1];
+
+ if (getisax (vec,1)) {
+ if (vec[0]&0x0020) OPENSSL_sparcv9cap_P[0] |= SPARCV9_VIS1;
+ if (vec[0]&0x0040) OPENSSL_sparcv9cap_P[0] |= SPARCV9_VIS2;
+ if (vec[0]&0x0080) OPENSSL_sparcv9cap_P[0] |= SPARCV9_BLK;
+ if (vec[0]&0x0100) OPENSSL_sparcv9cap_P[0] |= SPARCV9_FMADD;
+ if (vec[0]&0x0400) OPENSSL_sparcv9cap_P[0] |= SPARCV9_VIS3;
+
+ /* reconstruct %cfr copy */
+ OPENSSL_sparcv9cap_P[1] = (vec[0]>>17)&0x3ff;
+ OPENSSL_sparcv9cap_P[1] |= (OPENSSL_sparcv9cap_P[1]&CFR_MONTMUL)<<1;
+ if (vec[0]&0x20000000) OPENSSL_sparcv9cap_P[1] |= CFR_CRC32C;
+
+ /* Some heuristics */
+ /* all known VIS2-capable CPUs have unprivileged tick counter */
+ if (OPENSSL_sparcv9cap_P[0]&SPARCV9_VIS2)
+ OPENSSL_sparcv9cap_P[0] &= ~SPARCV9_TICK_PRIVILEGED;
+
+ OPENSSL_sparcv9cap_P[0] |= SPARCV9_PREFER_FPU;
+
+ /* detect UltraSPARC-Tx, see sparccpud.S for details... */
+ if ((OPENSSL_sparcv9cap_P[0]&SPARCV9_VIS1) &&
+ _sparcv9_vis1_instrument() >= 12)
+ OPENSSL_sparcv9cap_P[0] &= ~(SPARCV9_VIS1 | SPARCV9_PREFER_FPU);
+ }
+
+ if (sizeof(size_t) == 8)
+ OPENSSL_sparcv9cap_P[0] |= SPARCV9_64BIT_STACK;
+
+ return;
+ }
+#endif
+
/* Initial value, fits UltraSPARC-I&II... */
OPENSSL_sparcv9cap_P[0] = SPARCV9_PREFER_FPU | SPARCV9_TICK_PRIVILEGED;
diff --git a/crypto/srp/srp_vfy.c b/crypto/srp/srp_vfy.c
index 50f75d7e4c9f..a3f1a8a0a4d5 100644
--- a/crypto/srp/srp_vfy.c
+++ b/crypto/srp/srp_vfy.c
@@ -521,12 +521,12 @@ char *SRP_create_verifier(const char *user, const char *pass, char **salt,
char **verifier, const char *N, const char *g)
{
int len;
- char *result = NULL;
- char *vf;
+ char *result = NULL, *vf = NULL;
BIGNUM *N_bn = NULL, *g_bn = NULL, *s = NULL, *v = NULL;
unsigned char tmp[MAX_LEN];
unsigned char tmp2[MAX_LEN];
char *defgNid = NULL;
+ int vfsize = 0;
if ((user == NULL) ||
(pass == NULL) || (salt == NULL) || (verifier == NULL))
@@ -564,22 +564,23 @@ char *SRP_create_verifier(const char *user, const char *pass, char **salt,
goto err;
BN_bn2bin(v, tmp);
- if (((vf = OPENSSL_malloc(BN_num_bytes(v) * 2)) == NULL))
+ vfsize = BN_num_bytes(v) * 2;
+ if (((vf = OPENSSL_malloc(vfsize)) == NULL))
goto err;
t_tob64(vf, tmp, BN_num_bytes(v));
- *verifier = vf;
if (*salt == NULL) {
char *tmp_salt;
if ((tmp_salt = OPENSSL_malloc(SRP_RANDOM_SALT_LEN * 2)) == NULL) {
- OPENSSL_free(vf);
goto err;
}
t_tob64(tmp_salt, tmp2, SRP_RANDOM_SALT_LEN);
*salt = tmp_salt;
}
+ *verifier = vf;
+ vf = NULL;
result = defgNid;
err:
@@ -587,11 +588,21 @@ char *SRP_create_verifier(const char *user, const char *pass, char **salt,
BN_free(N_bn);
BN_free(g_bn);
}
+ OPENSSL_cleanse(vf, vfsize);
+ OPENSSL_free(vf);
+ BN_clear_free(s);
+ BN_clear_free(v);
return result;
}
/*
- * create a verifier (*salt,*verifier,g and N are BIGNUMs)
+ * create a verifier (*salt,*verifier,g and N are BIGNUMs). If *salt != NULL
+ * then the provided salt will be used. On successful exit *verifier will point
+ * to a newly allocated BIGNUM containing the verifier and (if a salt was not
+ * provided) *salt will be populated with a newly allocated BIGNUM containing a
+ * random salt.
+ * The caller is responsible for freeing the allocated *salt and *verifier
+ * BIGNUMS.
*/
int SRP_create_verifier_BN(const char *user, const char *pass, BIGNUM **salt,
BIGNUM **verifier, BIGNUM *N, BIGNUM *g)
@@ -600,6 +611,7 @@ int SRP_create_verifier_BN(const char *user, const char *pass, BIGNUM **salt,
BIGNUM *x = NULL;
BN_CTX *bn_ctx = BN_CTX_new();
unsigned char tmp2[MAX_LEN];
+ BIGNUM *salttmp = NULL;
if ((user == NULL) ||
(pass == NULL) ||
@@ -614,10 +626,12 @@ int SRP_create_verifier_BN(const char *user, const char *pass, BIGNUM **salt,
if (RAND_pseudo_bytes(tmp2, SRP_RANDOM_SALT_LEN) < 0)
goto err;
- *salt = BN_bin2bn(tmp2, SRP_RANDOM_SALT_LEN, NULL);
+ salttmp = BN_bin2bn(tmp2, SRP_RANDOM_SALT_LEN, NULL);
+ } else {
+ salttmp = *salt;
}
- x = SRP_Calc_x(*salt, user, pass);
+ x = SRP_Calc_x(salttmp, user, pass);
*verifier = BN_new();
if (*verifier == NULL)
@@ -631,9 +645,11 @@ int SRP_create_verifier_BN(const char *user, const char *pass, BIGNUM **salt,
srp_bn_print(*verifier);
result = 1;
+ *salt = salttmp;
err:
-
+ if (*salt != salttmp)
+ BN_clear_free(salttmp);
BN_clear_free(x);
BN_CTX_free(bn_ctx);
return result;
diff --git a/crypto/ts/ts_rsp_verify.c b/crypto/ts/ts_rsp_verify.c
index 3ce765dfa1b6..da8991173ced 100644
--- a/crypto/ts/ts_rsp_verify.c
+++ b/crypto/ts/ts_rsp_verify.c
@@ -522,7 +522,7 @@ static int TS_check_status_info(TS_RESP *response)
if (ASN1_BIT_STRING_get_bit(info->failure_info,
TS_failure_info[i].code)) {
if (!first)
- strcpy(failure_text, ",");
+ strcat(failure_text, ",");
else
first = 0;
strcat(failure_text, TS_failure_info[i].text);
diff --git a/crypto/whrlpool/asm/wp-mmx.pl b/crypto/whrlpool/asm/wp-mmx.pl
index c584e5b92b25..7725951d6b71 100755
--- a/crypto/whrlpool/asm/wp-mmx.pl
+++ b/crypto/whrlpool/asm/wp-mmx.pl
@@ -16,7 +16,7 @@
# table]. I stick to value of 2 for two reasons: 1. smaller table
# minimizes cache trashing and thus mitigates the hazard of side-
# channel leakage similar to AES cache-timing one; 2. performance
-# gap among different µ-archs is smaller.
+# gap among different µ-archs is smaller.
#
# Performance table lists rounded amounts of CPU cycles spent by
# whirlpool_block_mmx routine on single 64 byte input block, i.e.
diff --git a/crypto/x509/x509_lu.c b/crypto/x509/x509_lu.c
index b0d653903ff5..50120a4d70c6 100644
--- a/crypto/x509/x509_lu.c
+++ b/crypto/x509/x509_lu.c
@@ -536,8 +536,6 @@ STACK_OF(X509_CRL) *X509_STORE_get1_crls(X509_STORE_CTX *ctx, X509_NAME *nm)
X509_OBJECT *obj, xobj;
sk = sk_X509_CRL_new_null();
CRYPTO_w_lock(CRYPTO_LOCK_X509_STORE);
- /* Check cache first */
- idx = x509_object_idx_cnt(ctx->ctx->objs, X509_LU_CRL, nm, &cnt);
/*
* Always do lookup to possibly add new CRLs to cache
diff --git a/crypto/x509/x509_vfy.c b/crypto/x509/x509_vfy.c
index a2f1dbefe352..ab94948f0135 100644
--- a/crypto/x509/x509_vfy.c
+++ b/crypto/x509/x509_vfy.c
@@ -249,7 +249,7 @@ int X509_verify_cert(X509_STORE_CTX *ctx)
if (ctx->param->flags & X509_V_FLAG_TRUSTED_FIRST) {
ok = ctx->get_issuer(&xtmp, ctx, x);
if (ok < 0)
- return ok;
+ goto end;
/*
* If successful for now free up cert so it will be picked up
* again later.
@@ -347,14 +347,15 @@ int X509_verify_cert(X509_STORE_CTX *ctx)
ok = ctx->get_issuer(&xtmp, ctx, x);
if (ok < 0)
- return ok;
+ goto end;
if (ok == 0)
break;
x = xtmp;
if (!sk_X509_push(ctx->chain, x)) {
X509_free(xtmp);
X509err(X509_F_X509_VERIFY_CERT, ERR_R_MALLOC_FAILURE);
- return 0;
+ ok = 0;
+ goto end;
}
num++;
}
@@ -752,6 +753,10 @@ static int check_hosts(X509 *x, X509_VERIFY_PARAM_ID *id)
int n = sk_OPENSSL_STRING_num(id->hosts);
char *name;
+ if (id->peername != NULL) {
+ OPENSSL_free(id->peername);
+ id->peername = NULL;
+ }
for (i = 0; i < n; ++i) {
name = sk_OPENSSL_STRING_value(id->hosts, i);
if (X509_check_host(x, name, 0, id->hostflags, &id->peername) > 0)
diff --git a/crypto/x509/x509_vpm.c b/crypto/x509/x509_vpm.c
index 1ea0c69f5743..592a8a5f6a5c 100644
--- a/crypto/x509/x509_vpm.c
+++ b/crypto/x509/x509_vpm.c
@@ -155,6 +155,7 @@ static void x509_verify_param_zero(X509_VERIFY_PARAM *param)
}
if (paramid->peername)
OPENSSL_free(paramid->peername);
+ paramid->peername = NULL;
if (paramid->email) {
OPENSSL_free(paramid->email);
paramid->email = NULL;
@@ -165,7 +166,6 @@ static void x509_verify_param_zero(X509_VERIFY_PARAM *param)
paramid->ip = NULL;
paramid->iplen = 0;
}
-
}
X509_VERIFY_PARAM *X509_VERIFY_PARAM_new(void)
@@ -176,13 +176,20 @@ X509_VERIFY_PARAM *X509_VERIFY_PARAM_new(void)
param = OPENSSL_malloc(sizeof *param);
if (!param)
return NULL;
- paramid = OPENSSL_malloc(sizeof *paramid);
+ memset(param, 0, sizeof(*param));
+
+ paramid = OPENSSL_malloc(sizeof(*paramid));
if (!paramid) {
OPENSSL_free(param);
return NULL;
}
- memset(param, 0, sizeof *param);
- memset(paramid, 0, sizeof *paramid);
+ memset(paramid, 0, sizeof(*paramid));
+ /* Exotic platforms may have non-zero bit representation of NULL */
+ paramid->hosts = NULL;
+ paramid->peername = NULL;
+ paramid->email = NULL;
+ paramid->ip = NULL;
+
param->id = paramid;
x509_verify_param_zero(param);
return param;
diff --git a/crypto/x509v3/v3_cpols.c b/crypto/x509v3/v3_cpols.c
index 0febc1b3edc1..d97f6226b9ee 100644
--- a/crypto/x509v3/v3_cpols.c
+++ b/crypto/x509v3/v3_cpols.c
@@ -186,6 +186,10 @@ static STACK_OF(POLICYINFO) *r2i_certpol(X509V3_EXT_METHOD *method,
goto err;
}
pol = POLICYINFO_new();
+ if (pol == NULL) {
+ X509V3err(X509V3_F_R2I_CERTPOL, ERR_R_MALLOC_FAILURE);
+ goto err;
+ }
pol->policyid = pobj;
}
if (!sk_POLICYINFO_push(pols, pol)) {
diff --git a/crypto/x509v3/v3_ncons.c b/crypto/x509v3/v3_ncons.c
index b97ed271e3e2..2855269668be 100644
--- a/crypto/x509v3/v3_ncons.c
+++ b/crypto/x509v3/v3_ncons.c
@@ -132,6 +132,8 @@ static void *v2i_NAME_CONSTRAINTS(const X509V3_EXT_METHOD *method,
}
tval.value = val->value;
sub = GENERAL_SUBTREE_new();
+ if (sub == NULL)
+ goto memerr;
if (!v2i_GENERAL_NAME_ex(sub->base, method, ctx, &tval, 1))
goto err;
if (!*ptree)
diff --git a/crypto/x509v3/v3_pci.c b/crypto/x509v3/v3_pci.c
index fe0d8063d1f1..48ac0959cb10 100644
--- a/crypto/x509v3/v3_pci.c
+++ b/crypto/x509v3/v3_pci.c
@@ -3,7 +3,7 @@
* Contributed to the OpenSSL Project 2004 by Richard Levitte
* (richard@levitte.org)
*/
-/* Copyright (c) 2004 Kungliga Tekniska Högskolan
+/* Copyright (c) 2004 Kungliga Tekniska Högskolan
* (Royal Institute of Technology, Stockholm, Sweden).
* All rights reserved.
*
diff --git a/crypto/x509v3/v3_pcia.c b/crypto/x509v3/v3_pcia.c
index 350b39889fcc..43fd362aeda0 100644
--- a/crypto/x509v3/v3_pcia.c
+++ b/crypto/x509v3/v3_pcia.c
@@ -3,7 +3,7 @@
* Contributed to the OpenSSL Project 2004 by Richard Levitte
* (richard@levitte.org)
*/
-/* Copyright (c) 2004 Kungliga Tekniska Högskolan
+/* Copyright (c) 2004 Kungliga Tekniska Högskolan
* (Royal Institute of Technology, Stockholm, Sweden).
* All rights reserved.
*
diff --git a/crypto/x509v3/v3_purp.c b/crypto/x509v3/v3_purp.c
index 36b0d87a0d8b..845be673b799 100644
--- a/crypto/x509v3/v3_purp.c
+++ b/crypto/x509v3/v3_purp.c
@@ -380,6 +380,14 @@ static void setup_crldp(X509 *x)
setup_dp(x, sk_DIST_POINT_value(x->crldp, i));
}
+#define V1_ROOT (EXFLAG_V1|EXFLAG_SS)
+#define ku_reject(x, usage) \
+ (((x)->ex_flags & EXFLAG_KUSAGE) && !((x)->ex_kusage & (usage)))
+#define xku_reject(x, usage) \
+ (((x)->ex_flags & EXFLAG_XKUSAGE) && !((x)->ex_xkusage & (usage)))
+#define ns_reject(x, usage) \
+ (((x)->ex_flags & EXFLAG_NSCERT) && !((x)->ex_nscert & (usage)))
+
static void x509v3_cache_extensions(X509 *x)
{
BASIC_CONSTRAINTS *bs;
@@ -499,7 +507,8 @@ static void x509v3_cache_extensions(X509 *x)
if (!X509_NAME_cmp(X509_get_subject_name(x), X509_get_issuer_name(x))) {
x->ex_flags |= EXFLAG_SI;
/* If SKID matches AKID also indicate self signed */
- if (X509_check_akid(x, x->akid) == X509_V_OK)
+ if (X509_check_akid(x, x->akid) == X509_V_OK &&
+ !ku_reject(x, KU_KEY_CERT_SIGN))
x->ex_flags |= EXFLAG_SS;
}
x->altname = X509_get_ext_d2i(x, NID_subject_alt_name, NULL, NULL);
@@ -538,14 +547,6 @@ static void x509v3_cache_extensions(X509 *x)
* 4 basicConstraints absent but keyUsage present and keyCertSign asserted.
*/
-#define V1_ROOT (EXFLAG_V1|EXFLAG_SS)
-#define ku_reject(x, usage) \
- (((x)->ex_flags & EXFLAG_KUSAGE) && !((x)->ex_kusage & (usage)))
-#define xku_reject(x, usage) \
- (((x)->ex_flags & EXFLAG_XKUSAGE) && !((x)->ex_xkusage & (usage)))
-#define ns_reject(x, usage) \
- (((x)->ex_flags & EXFLAG_NSCERT) && !((x)->ex_nscert & (usage)))
-
static int check_ca(const X509 *x)
{
/* keyUsage if present should allow cert signing */
diff --git a/crypto/x509v3/v3_scts.c b/crypto/x509v3/v3_scts.c
index 6e0b8d6844c8..0b7c68180e78 100644
--- a/crypto/x509v3/v3_scts.c
+++ b/crypto/x509v3/v3_scts.c
@@ -190,8 +190,9 @@ static STACK_OF(SCT) *d2i_SCT_LIST(STACK_OF(SCT) **a,
SCT *sct;
unsigned char *p, *p2;
unsigned short listlen, sctlen = 0, fieldlen;
+ const unsigned char *q = *pp;
- if (d2i_ASN1_OCTET_STRING(&oct, pp, length) == NULL)
+ if (d2i_ASN1_OCTET_STRING(&oct, &q, length) == NULL)
return NULL;
if (oct->length < 2)
goto done;
@@ -279,6 +280,7 @@ static STACK_OF(SCT) *d2i_SCT_LIST(STACK_OF(SCT) **a,
done:
ASN1_OCTET_STRING_free(oct);
+ *pp = q;
return sk;
err:
diff --git a/crypto/x509v3/v3_utl.c b/crypto/x509v3/v3_utl.c
index bdd7b95f4570..4d1ecc58bf94 100644
--- a/crypto/x509v3/v3_utl.c
+++ b/crypto/x509v3/v3_utl.c
@@ -926,7 +926,7 @@ static int do_x509_check(X509 *x, const char *chk, size_t chklen,
GENERAL_NAMES *gens = NULL;
X509_NAME *name = NULL;
int i;
- int cnid;
+ int cnid = NID_undef;
int alt_type;
int san_present = 0;
int rv = 0;
@@ -949,7 +949,6 @@ static int do_x509_check(X509 *x, const char *chk, size_t chklen,
else
equal = equal_wildcard;
} else {
- cnid = 0;
alt_type = V_ASN1_OCTET_STRING;
equal = equal_case;
}
@@ -980,11 +979,16 @@ static int do_x509_check(X509 *x, const char *chk, size_t chklen,
GENERAL_NAMES_free(gens);
if (rv != 0)
return rv;
- if (!cnid
+ if (cnid == NID_undef
|| (san_present
&& !(flags & X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT)))
return 0;
}
+
+ /* We're done if CN-ID is not pertinent */
+ if (cnid == NID_undef)
+ return 0;
+
i = -1;
name = X509_get_subject_name(x);
while ((i = X509_NAME_get_index_by_NID(name, cnid, i)) >= 0) {