aboutsummaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rwxr-xr-xcrypto/aes/asm/aes-ppc.pl4
-rwxr-xr-xcrypto/aes/asm/aes-s390x.pl29
-rw-r--r--crypto/asn1/a_bytes.c4
-rw-r--r--crypto/asn1/a_d2i_fp.c36
-rw-r--r--crypto/asn1/a_type.c2
-rw-r--r--crypto/asn1/asn1_lib.c18
-rw-r--r--crypto/asn1/asn1_par.c17
-rw-r--r--crypto/asn1/t_x509.c3
-rw-r--r--crypto/asn1/tasn_dec.c2
-rw-r--r--crypto/asn1/tasn_enc.c2
-rw-r--r--crypto/asn1/x_name.c11
-rw-r--r--crypto/asn1/x_x509.c16
-rwxr-xr-xcrypto/bn/asm/ppc-mont.pl10
-rw-r--r--crypto/bn/asm/ppc.pl10
-rwxr-xr-xcrypto/bn/asm/ppc64-mont.pl12
-rwxr-xr-xcrypto/bn/asm/x86-mont.pl15
-rwxr-xr-xcrypto/bn/asm/x86_64-mont.pl42
-rwxr-xr-xcrypto/bn/asm/x86_64-mont5.pl61
-rw-r--r--crypto/comp/comp.h4
-rw-r--r--crypto/evp/Makefile13
-rw-r--r--crypto/evp/digest.c4
-rw-r--r--crypto/evp/e_aes_cbc_hmac_sha1.c3
-rw-r--r--crypto/evp/e_aes_cbc_hmac_sha256.c3
-rw-r--r--crypto/evp/encode.c12
-rw-r--r--crypto/evp/evp_enc.c2
-rwxr-xr-xcrypto/modes/asm/ghash-s390x.pl4
-rw-r--r--crypto/opensslv.h6
-rw-r--r--crypto/pem/pem_lib.c2
-rw-r--r--crypto/pem/pvkfmt.c7
-rwxr-xr-xcrypto/perlasm/x86_64-xlate.pl2
-rw-r--r--crypto/s390xcpuid.S44
-rwxr-xr-xcrypto/sha/asm/sha1-ppc.pl6
-rwxr-xr-xcrypto/sha/asm/sha1-s390x.pl7
-rwxr-xr-xcrypto/sha/asm/sha512-ppc.pl8
-rwxr-xr-xcrypto/sha/asm/sha512-s390x.pl7
-rw-r--r--crypto/x509/x509.h1
-rw-r--r--crypto/x509/x509_err.c1
-rw-r--r--crypto/x509/x509_obj.c26
38 files changed, 334 insertions, 122 deletions
diff --git a/crypto/aes/asm/aes-ppc.pl b/crypto/aes/asm/aes-ppc.pl
index 7a99fc3d0452..5b83016efa98 100755
--- a/crypto/aes/asm/aes-ppc.pl
+++ b/crypto/aes/asm/aes-ppc.pl
@@ -590,7 +590,7 @@ Lenc_loop:
xor $s2,$t2,$acc14
xor $s3,$t3,$acc15
addi $key,$key,16
- bdnz- Lenc_loop
+ bdnz Lenc_loop
addi $Tbl2,$Tbl0,2048
nop
@@ -1068,7 +1068,7 @@ Ldec_loop:
xor $s2,$t2,$acc14
xor $s3,$t3,$acc15
addi $key,$key,16
- bdnz- Ldec_loop
+ bdnz Ldec_loop
addi $Tbl2,$Tbl0,2048
nop
diff --git a/crypto/aes/asm/aes-s390x.pl b/crypto/aes/asm/aes-s390x.pl
index e75dcd0315e5..76ca8e52198a 100755
--- a/crypto/aes/asm/aes-s390x.pl
+++ b/crypto/aes/asm/aes-s390x.pl
@@ -818,13 +818,9 @@ $code.=<<___ if (!$softonly);
tmhl %r0,0x4000 # check for message-security assist
jz .Lekey_internal
- lghi %r0,0 # query capability vector
- la %r1,16($sp)
- .long 0xb92f0042 # kmc %r4,%r2
-
- llihh %r1,0x8000
- srlg %r1,%r1,0(%r5)
- ng %r1,16($sp)
+ llihh %r0,0x8000
+ srlg %r0,%r0,0(%r5)
+ ng %r0,48(%r1) # check kmc capability vector
jz .Lekey_internal
lmg %r0,%r1,0($inp) # just copy 128 bits...
@@ -1444,13 +1440,10 @@ $code.=<<___ if (0); ######### kmctr code was measured to be ~12% slower
llgfr $s0,%r0
lgr $s1,%r1
- lghi %r0,0
- la %r1,16($sp)
- .long 0xb92d2042 # kmctr %r4,%r2,%r2
-
+ larl %r1,OPENSSL_s390xcap_P
llihh %r0,0x8000 # check if kmctr supports the function code
srlg %r0,%r0,0($s0)
- ng %r0,16($sp)
+ ng %r0,64(%r1) # check kmctr capability vector
lgr %r0,$s0
lgr %r1,$s1
jz .Lctr32_km_loop
@@ -1597,12 +1590,10 @@ $code.=<<___ if(1);
llgfr $s0,%r0 # put aside the function code
lghi $s1,0x7f
nr $s1,%r0
- lghi %r0,0 # query capability vector
- la %r1,$tweak-16($sp)
- .long 0xb92e0042 # km %r4,%r2
- llihh %r1,0x8000
- srlg %r1,%r1,32($s1) # check for 32+function code
- ng %r1,$tweak-16($sp)
+ larl %r1,OPENSSL_s390xcap_P
+ llihh %r0,0x8000
+ srlg %r0,%r0,32($s1) # check for 32+function code
+ ng %r0,32(%r1) # check km capability vector
lgr %r0,$s0 # restore the function code
la %r1,0($key1) # restore $key1
jz .Lxts_km_vanilla
@@ -2229,7 +2220,7 @@ ___
}
$code.=<<___;
.string "AES for s390x, CRYPTOGAMS by <appro\@openssl.org>"
-.comm OPENSSL_s390xcap_P,16,8
+.comm OPENSSL_s390xcap_P,80,8
___
$code =~ s/\`([^\`]*)\`/eval $1/gem;
diff --git a/crypto/asn1/a_bytes.c b/crypto/asn1/a_bytes.c
index 12715a72809d..385b53986a29 100644
--- a/crypto/asn1/a_bytes.c
+++ b/crypto/asn1/a_bytes.c
@@ -200,13 +200,13 @@ ASN1_STRING *d2i_ASN1_bytes(ASN1_STRING **a, const unsigned char **pp,
} else {
if (len != 0) {
if ((ret->length < len) || (ret->data == NULL)) {
- if (ret->data != NULL)
- OPENSSL_free(ret->data);
s = (unsigned char *)OPENSSL_malloc((int)len + 1);
if (s == NULL) {
i = ERR_R_MALLOC_FAILURE;
goto err;
}
+ if (ret->data != NULL)
+ OPENSSL_free(ret->data);
} else
s = ret->data;
memcpy(s, p, (int)len);
diff --git a/crypto/asn1/a_d2i_fp.c b/crypto/asn1/a_d2i_fp.c
index a1864b42c977..51b6f245ab10 100644
--- a/crypto/asn1/a_d2i_fp.c
+++ b/crypto/asn1/a_d2i_fp.c
@@ -141,6 +141,7 @@ void *ASN1_item_d2i_fp(const ASN1_ITEM *it, FILE *in, void *x)
#endif
#define HEADER_SIZE 8
+#define ASN1_CHUNK_INITIAL_SIZE (16 * 1024)
static int asn1_d2i_read_bio(BIO *in, BUF_MEM **pb)
{
BUF_MEM *b;
@@ -217,29 +218,44 @@ static int asn1_d2i_read_bio(BIO *in, BUF_MEM **pb)
/* suck in c.slen bytes of data */
want = c.slen;
if (want > (len - off)) {
+ size_t chunk_max = ASN1_CHUNK_INITIAL_SIZE;
+
want -= (len - off);
if (want > INT_MAX /* BIO_read takes an int length */ ||
len + want < len) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ASN1_R_TOO_LONG);
goto err;
}
- if (!BUF_MEM_grow_clean(b, len + want)) {
- ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ERR_R_MALLOC_FAILURE);
- goto err;
- }
while (want > 0) {
- i = BIO_read(in, &(b->data[len]), want);
- if (i <= 0) {
- ASN1err(ASN1_F_ASN1_D2I_READ_BIO,
- ASN1_R_NOT_ENOUGH_DATA);
+ /*
+ * Read content in chunks of increasing size
+ * so we can return an error for EOF without
+ * having to allocate the entire content length
+ * in one go.
+ */
+ size_t chunk = want > chunk_max ? chunk_max : want;
+
+ if (!BUF_MEM_grow_clean(b, len + chunk)) {
+ ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ERR_R_MALLOC_FAILURE);
goto err;
}
+ want -= chunk;
+ while (chunk > 0) {
+ i = BIO_read(in, &(b->data[len]), chunk);
+ if (i <= 0) {
+ ASN1err(ASN1_F_ASN1_D2I_READ_BIO,
+ ASN1_R_NOT_ENOUGH_DATA);
+ goto err;
+ }
/*
* This can't overflow because |len+want| didn't
* overflow.
*/
- len += i;
- want -= i;
+ len += i;
+ chunk -= i;
+ }
+ if (chunk_max < INT_MAX/2)
+ chunk_max *= 2;
}
}
if (off + c.slen < off) {
diff --git a/crypto/asn1/a_type.c b/crypto/asn1/a_type.c
index af795306b5bf..bb166e8568b5 100644
--- a/crypto/asn1/a_type.c
+++ b/crypto/asn1/a_type.c
@@ -126,9 +126,7 @@ int ASN1_TYPE_cmp(const ASN1_TYPE *a, const ASN1_TYPE *b)
result = 0; /* They do not have content. */
break;
case V_ASN1_INTEGER:
- case V_ASN1_NEG_INTEGER:
case V_ASN1_ENUMERATED:
- case V_ASN1_NEG_ENUMERATED:
case V_ASN1_BIT_STRING:
case V_ASN1_OCTET_STRING:
case V_ASN1_SEQUENCE:
diff --git a/crypto/asn1/asn1_lib.c b/crypto/asn1/asn1_lib.c
index 0b61fc930967..874b1af8b09a 100644
--- a/crypto/asn1/asn1_lib.c
+++ b/crypto/asn1/asn1_lib.c
@@ -63,7 +63,7 @@
#include <openssl/asn1_mac.h>
static int asn1_get_length(const unsigned char **pp, int *inf, long *rl,
- int max);
+ long max);
static void asn1_put_length(unsigned char **pp, int length);
const char ASN1_version[] = "ASN.1" OPENSSL_VERSION_PTEXT;
@@ -131,7 +131,7 @@ int ASN1_get_object(const unsigned char **pp, long *plength, int *ptag,
}
*ptag = tag;
*pclass = xclass;
- if (!asn1_get_length(&p, &inf, plength, (int)max))
+ if (!asn1_get_length(&p, &inf, plength, max))
goto err;
if (inf && !(ret & V_ASN1_CONSTRUCTED))
@@ -159,14 +159,14 @@ int ASN1_get_object(const unsigned char **pp, long *plength, int *ptag,
}
static int asn1_get_length(const unsigned char **pp, int *inf, long *rl,
- int max)
+ long max)
{
const unsigned char *p = *pp;
unsigned long ret = 0;
- unsigned int i;
+ unsigned long i;
if (max-- < 1)
- return (0);
+ return 0;
if (*p == 0x80) {
*inf = 1;
ret = 0;
@@ -175,15 +175,11 @@ static int asn1_get_length(const unsigned char **pp, int *inf, long *rl,
*inf = 0;
i = *p & 0x7f;
if (*(p++) & 0x80) {
- if (i > sizeof(long))
+ if (i > sizeof(ret) || max < (long)i)
return 0;
- if (max-- == 0)
- return (0);
while (i-- > 0) {
ret <<= 8L;
ret |= *(p++);
- if (max-- == 0)
- return (0);
}
} else
ret = i;
@@ -192,7 +188,7 @@ static int asn1_get_length(const unsigned char **pp, int *inf, long *rl,
return 0;
*pp = p;
*rl = (long)ret;
- return (1);
+ return 1;
}
/*
diff --git a/crypto/asn1/asn1_par.c b/crypto/asn1/asn1_par.c
index 0ca985a2be1e..e85e3398b6bb 100644
--- a/crypto/asn1/asn1_par.c
+++ b/crypto/asn1/asn1_par.c
@@ -173,6 +173,8 @@ static int asn1_parse2(BIO *bp, const unsigned char **pp, long length,
if (!asn1_print_info(bp, tag, xclass, j, (indent) ? depth : 0))
goto end;
if (j & V_ASN1_CONSTRUCTED) {
+ const unsigned char *sp;
+
ep = p + len;
if (BIO_write(bp, "\n", 1) <= 0)
goto end;
@@ -182,6 +184,7 @@ static int asn1_parse2(BIO *bp, const unsigned char **pp, long length,
goto end;
}
if ((j == 0x21) && (len == 0)) {
+ sp = p;
for (;;) {
r = asn1_parse2(bp, &p, (long)(tot - p),
offset + (p - *pp), depth + 1,
@@ -190,19 +193,25 @@ static int asn1_parse2(BIO *bp, const unsigned char **pp, long length,
ret = 0;
goto end;
}
- if ((r == 2) || (p >= tot))
+ if ((r == 2) || (p >= tot)) {
+ len = p - sp;
break;
+ }
}
- } else
+ } else {
+ long tmp = len;
+
while (p < ep) {
- r = asn1_parse2(bp, &p, (long)len,
- offset + (p - *pp), depth + 1,
+ sp = p;
+ r = asn1_parse2(bp, &p, tmp, offset + (p - *pp), depth + 1,
indent, dump);
if (r == 0) {
ret = 0;
goto end;
}
+ tmp -= p - sp;
}
+ }
} else if (xclass != 0) {
p += len;
if (BIO_write(bp, "\n", 1) <= 0)
diff --git a/crypto/asn1/t_x509.c b/crypto/asn1/t_x509.c
index 8aab55130c95..8888396f8434 100644
--- a/crypto/asn1/t_x509.c
+++ b/crypto/asn1/t_x509.c
@@ -140,7 +140,8 @@ int X509_print_ex(BIO *bp, X509 *x, unsigned long nmflags,
goto err;
bs = X509_get_serialNumber(x);
- if (bs->length <= (int)sizeof(long)) {
+ if (bs->length < (int)sizeof(long)
+ || (bs->length == sizeof(long) && (bs->data[0] & 0x80) == 0)) {
l = ASN1_INTEGER_get(bs);
if (bs->type == V_ASN1_NEG_INTEGER) {
l = -l;
diff --git a/crypto/asn1/tasn_dec.c b/crypto/asn1/tasn_dec.c
index 5a507967c894..6bdcd5c542ca 100644
--- a/crypto/asn1/tasn_dec.c
+++ b/crypto/asn1/tasn_dec.c
@@ -901,9 +901,7 @@ int asn1_ex_c2i(ASN1_VALUE **pval, const unsigned char *cont, int len,
break;
case V_ASN1_INTEGER:
- case V_ASN1_NEG_INTEGER:
case V_ASN1_ENUMERATED:
- case V_ASN1_NEG_ENUMERATED:
tint = (ASN1_INTEGER **)pval;
if (!c2i_ASN1_INTEGER(tint, &cont, len))
goto err;
diff --git a/crypto/asn1/tasn_enc.c b/crypto/asn1/tasn_enc.c
index f04a6892a8d9..f7f83e56a981 100644
--- a/crypto/asn1/tasn_enc.c
+++ b/crypto/asn1/tasn_enc.c
@@ -611,9 +611,7 @@ int asn1_ex_i2c(ASN1_VALUE **pval, unsigned char *cout, int *putype,
break;
case V_ASN1_INTEGER:
- case V_ASN1_NEG_INTEGER:
case V_ASN1_ENUMERATED:
- case V_ASN1_NEG_ENUMERATED:
/*
* These are all have the same content format as ASN1_INTEGER
*/
diff --git a/crypto/asn1/x_name.c b/crypto/asn1/x_name.c
index 737c426f2dea..a858c2993b90 100644
--- a/crypto/asn1/x_name.c
+++ b/crypto/asn1/x_name.c
@@ -66,6 +66,13 @@
typedef STACK_OF(X509_NAME_ENTRY) STACK_OF_X509_NAME_ENTRY;
DECLARE_STACK_OF(STACK_OF_X509_NAME_ENTRY)
+/*
+ * Maximum length of X509_NAME: much larger than anything we should
+ * ever see in practice.
+ */
+
+#define X509_NAME_MAX (1024 * 1024)
+
static int x509_name_ex_d2i(ASN1_VALUE **val,
const unsigned char **in, long len,
const ASN1_ITEM *it,
@@ -192,6 +199,10 @@ static int x509_name_ex_d2i(ASN1_VALUE **val,
int i, j, ret;
STACK_OF(X509_NAME_ENTRY) *entries;
X509_NAME_ENTRY *entry;
+ if (len > X509_NAME_MAX) {
+ ASN1err(ASN1_F_X509_NAME_EX_D2I, ASN1_R_TOO_LONG);
+ return 0;
+ }
q = p;
/* Get internal representation of Name */
diff --git a/crypto/asn1/x_x509.c b/crypto/asn1/x_x509.c
index e2cac836943d..e31e1e750d9d 100644
--- a/crypto/asn1/x_x509.c
+++ b/crypto/asn1/x_x509.c
@@ -201,10 +201,20 @@ X509 *d2i_X509_AUX(X509 **a, const unsigned char **pp, long length)
int i2d_X509_AUX(X509 *a, unsigned char **pp)
{
- int length;
+ int length, tmplen;
+ unsigned char *start = pp != NULL ? *pp : NULL;
length = i2d_X509(a, pp);
- if (a)
- length += i2d_X509_CERT_AUX(a->aux, pp);
+ if (length < 0 || a == NULL)
+ return length;
+
+ tmplen = i2d_X509_CERT_AUX(a->aux, pp);
+ if (tmplen < 0) {
+ if (start != NULL)
+ *pp = start;
+ return tmplen;
+ }
+ length += tmplen;
+
return length;
}
diff --git a/crypto/bn/asm/ppc-mont.pl b/crypto/bn/asm/ppc-mont.pl
index da69c6aaaf6a..6930a3acebd2 100755
--- a/crypto/bn/asm/ppc-mont.pl
+++ b/crypto/bn/asm/ppc-mont.pl
@@ -191,7 +191,7 @@ L1st:
addi $j,$j,$BNSZ ; j++
addi $tp,$tp,$BNSZ ; tp++
- bdnz- L1st
+ bdnz L1st
;L1st
addc $lo0,$alo,$hi0
addze $hi0,$ahi
@@ -253,7 +253,7 @@ Linner:
addze $hi1,$hi1
$ST $lo1,0($tp) ; tp[j-1]
addi $tp,$tp,$BNSZ ; tp++
- bdnz- Linner
+ bdnz Linner
;Linner
$LD $tj,$BNSZ($tp) ; tp[j]
addc $lo0,$alo,$hi0
@@ -276,7 +276,7 @@ Linner:
slwi $tj,$num,`log($BNSZ)/log(2)`
$UCMP $i,$tj
addi $i,$i,$BNSZ
- ble- Louter
+ ble Louter
addi $num,$num,2 ; restore $num
subfc $j,$j,$j ; j=0 and "clear" XER[CA]
@@ -289,7 +289,7 @@ Lsub: $LDX $tj,$tp,$j
subfe $aj,$nj,$tj ; tp[j]-np[j]
$STX $aj,$rp,$j
addi $j,$j,$BNSZ
- bdnz- Lsub
+ bdnz Lsub
li $j,0
mtctr $num
@@ -304,7 +304,7 @@ Lcopy: ; copy or in-place refresh
$STX $tj,$rp,$j
$STX $j,$tp,$j ; zap at once
addi $j,$j,$BNSZ
- bdnz- Lcopy
+ bdnz Lcopy
$POP $tj,0($sp)
li r3,1
diff --git a/crypto/bn/asm/ppc.pl b/crypto/bn/asm/ppc.pl
index 04df1fe5cc65..446d8ba9492b 100644
--- a/crypto/bn/asm/ppc.pl
+++ b/crypto/bn/asm/ppc.pl
@@ -1556,7 +1556,7 @@ Lppcasm_sub_mainloop:
# if carry = 1 this is r7-r8. Else it
# is r7-r8 -1 as we need.
$STU r6,$BNSZ(r3)
- bdnz- Lppcasm_sub_mainloop
+ bdnz Lppcasm_sub_mainloop
Lppcasm_sub_adios:
subfze r3,r0 # if carry bit is set then r3 = 0 else -1
andi. r3,r3,1 # keep only last bit.
@@ -1603,7 +1603,7 @@ Lppcasm_add_mainloop:
$LDU r8,$BNSZ(r5)
adde r8,r7,r8
$STU r8,$BNSZ(r3)
- bdnz- Lppcasm_add_mainloop
+ bdnz Lppcasm_add_mainloop
Lppcasm_add_adios:
addze r3,r0 #return carry bit.
blr
@@ -1762,7 +1762,7 @@ Lppcasm_sqr_mainloop:
$UMULH r8,r6,r6
$STU r7,$BNSZ(r3)
$STU r8,$BNSZ(r3)
- bdnz- Lppcasm_sqr_mainloop
+ bdnz Lppcasm_sqr_mainloop
Lppcasm_sqr_adios:
blr
.long 0
@@ -1827,7 +1827,7 @@ Lppcasm_mw_LOOP:
addi r3,r3,`4*$BNSZ`
addi r4,r4,`4*$BNSZ`
- bdnz- Lppcasm_mw_LOOP
+ bdnz Lppcasm_mw_LOOP
Lppcasm_mw_REM:
andi. r5,r5,0x3
@@ -1951,7 +1951,7 @@ Lppcasm_maw_mainloop:
$ST r11,`3*$BNSZ`(r3)
addi r3,r3,`4*$BNSZ`
addi r4,r4,`4*$BNSZ`
- bdnz- Lppcasm_maw_mainloop
+ bdnz Lppcasm_maw_mainloop
Lppcasm_maw_leftover:
andi. r5,r5,0x3
diff --git a/crypto/bn/asm/ppc64-mont.pl b/crypto/bn/asm/ppc64-mont.pl
index 9e3c12d788e5..595fc6d31f60 100755
--- a/crypto/bn/asm/ppc64-mont.pl
+++ b/crypto/bn/asm/ppc64-mont.pl
@@ -734,7 +734,7 @@ $code.=<<___;
___
}
$code.=<<___;
- bdnz- L1st
+ bdnz L1st
fctid $dota,$dota
fctid $dotb,$dotb
@@ -1280,7 +1280,7 @@ $code.=<<___;
___
}
$code.=<<___;
- bdnz- Linner
+ bdnz Linner
fctid $dota,$dota
fctid $dotb,$dotb
@@ -1490,7 +1490,7 @@ Lsub: ldx $t0,$tp,$i
stdx $t0,$rp,$i
stdx $t2,$t6,$i
addi $i,$i,16
- bdnz- Lsub
+ bdnz Lsub
li $i,0
subfe $ovf,$i,$ovf ; handle upmost overflow bit
@@ -1517,7 +1517,7 @@ Lcopy: ; copy or in-place refresh
stdx $i,$tp,$i ; zap tp at once
stdx $i,$t4,$i
addi $i,$i,16
- bdnz- Lcopy
+ bdnz Lcopy
___
$code.=<<___ if ($SIZE_T==4);
subf $np,$num,$np ; rewind np
@@ -1550,7 +1550,7 @@ Lsub: lwz $t0,12($tp) ; load tp[j..j+3] in 64-bit word order
stw $t5,8($rp)
stw $t6,12($rp)
stwu $t7,16($rp)
- bdnz- Lsub
+ bdnz Lsub
li $i,0
subfe $ovf,$i,$ovf ; handle upmost overflow bit
@@ -1582,7 +1582,7 @@ Lcopy: ; copy or in-place refresh
stwu $t3,16($rp)
std $i,8($tp) ; zap tp at once
stdu $i,16($tp)
- bdnz- Lcopy
+ bdnz Lcopy
___
$code.=<<___;
diff --git a/crypto/bn/asm/x86-mont.pl b/crypto/bn/asm/x86-mont.pl
index e8f6b050842e..89f4de61e896 100755
--- a/crypto/bn/asm/x86-mont.pl
+++ b/crypto/bn/asm/x86-mont.pl
@@ -85,6 +85,21 @@ $frame=32; # size of above frame rounded up to 16n
&and ("esp",-64); # align to cache line
+ # Some OSes, *cough*-dows, insist on stack being "wired" to
+ # physical memory in strictly sequential manner, i.e. if stack
+ # allocation spans two pages, then reference to farmost one can
+ # be punishable by SEGV. But page walking can do good even on
+ # other OSes, because it guarantees that villain thread hits
+ # the guard page before it can make damage to innocent one...
+ &mov ("eax","ebp");
+ &sub ("eax","esp");
+ &and ("eax",-4096);
+&set_label("page_walk");
+ &mov ("edx",&DWP(0,"esp","eax"));
+ &sub ("eax",4096);
+ &data_byte(0x2e);
+ &jnc (&label("page_walk"));
+
################################# load argument block...
&mov ("eax",&DWP(0*4,"esi"));# BN_ULONG *rp
&mov ("ebx",&DWP(1*4,"esi"));# const BN_ULONG *ap
diff --git a/crypto/bn/asm/x86_64-mont.pl b/crypto/bn/asm/x86_64-mont.pl
index 29ba1224e36b..8fb6c994e1ef 100755
--- a/crypto/bn/asm/x86_64-mont.pl
+++ b/crypto/bn/asm/x86_64-mont.pl
@@ -130,6 +130,20 @@ $code.=<<___;
mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
.Lmul_body:
+ # Some OSes, *cough*-dows, insist on stack being "wired" to
+ # physical memory in strictly sequential manner, i.e. if stack
+ # allocation spans two pages, then reference to farmost one can
+ # be punishable by SEGV. But page walking can do good even on
+ # other OSes, because it guarantees that villain thread hits
+ # the guard page before it can make damage to innocent one...
+ sub %rsp,%r11
+ and \$-4096,%r11
+.Lmul_page_walk:
+ mov (%rsp,%r11),%r10
+ sub \$4096,%r11
+ .byte 0x66,0x2e # predict non-taken
+ jnc .Lmul_page_walk
+
mov $bp,%r12 # reassign $bp
___
$bp="%r12";
@@ -342,6 +356,14 @@ $code.=<<___;
mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
.Lmul4x_body:
+ sub %rsp,%r11
+ and \$-4096,%r11
+.Lmul4x_page_walk:
+ mov (%rsp,%r11),%r10
+ sub \$4096,%r11
+ .byte 0x2e # predict non-taken
+ jnc .Lmul4x_page_walk
+
mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
mov %rdx,%r12 # reassign $bp
___
@@ -795,6 +817,15 @@ bn_sqr8x_mont:
sub %r11,%rsp
.Lsqr8x_sp_done:
and \$-64,%rsp
+ mov %rax,%r11
+ sub %rsp,%r11
+ and \$-4096,%r11
+.Lsqr8x_page_walk:
+ mov (%rsp,%r11),%r10
+ sub \$4096,%r11
+ .byte 0x2e # predict non-taken
+ jnc .Lsqr8x_page_walk
+
mov $num,%r10
neg $num
@@ -932,8 +963,17 @@ bn_mulx4x_mont:
sub $num,%r10 # -$num
mov ($n0),$n0 # *n0
lea -72(%rsp,%r10),%rsp # alloca(frame+$num+8)
- lea ($bp,$num),%r10
and \$-128,%rsp
+ mov %rax,%r11
+ sub %rsp,%r11
+ and \$-4096,%r11
+.Lmulx4x_page_walk:
+ mov (%rsp,%r11),%r10
+ sub \$4096,%r11
+ .byte 0x66,0x2e # predict non-taken
+ jnc .Lmulx4x_page_walk
+
+ lea ($bp,$num),%r10
##############################################################
# Stack layout
# +0 num
diff --git a/crypto/bn/asm/x86_64-mont5.pl b/crypto/bn/asm/x86_64-mont5.pl
index 2e8c9db32cbc..938e17081803 100755
--- a/crypto/bn/asm/x86_64-mont5.pl
+++ b/crypto/bn/asm/x86_64-mont5.pl
@@ -115,6 +115,20 @@ $code.=<<___;
mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
.Lmul_body:
+ # Some OSes, *cough*-dows, insist on stack being "wired" to
+ # physical memory in strictly sequential manner, i.e. if stack
+ # allocation spans two pages, then reference to farmost one can
+ # be punishable by SEGV. But page walking can do good even on
+ # other OSes, because it guarantees that villain thread hits
+ # the guard page before it can make damage to innocent one...
+ sub %rsp,%rax
+ and \$-4096,%rax
+.Lmul_page_walk:
+ mov (%rsp,%rax),%r11
+ sub \$4096,%rax
+ .byte 0x2e # predict non-taken
+ jnc .Lmul_page_walk
+
lea 128($bp),%r12 # reassign $bp (+size optimization)
___
$bp="%r12";
@@ -469,6 +483,15 @@ $code.=<<___;
sub %r11,%rsp
.Lmul4xsp_done:
and \$-64,%rsp
+ mov %rax,%r11
+ sub %rsp,%r11
+ and \$-4096,%r11
+.Lmul4x_page_walk:
+ mov (%rsp,%r11),%r10
+ sub \$4096,%r11
+ .byte 0x2e # predict non-taken
+ jnc .Lmul4x_page_walk
+
neg $num
mov %rax,40(%rsp)
@@ -1058,6 +1081,15 @@ $code.=<<___;
sub %r11,%rsp
.Lpwr_sp_done:
and \$-64,%rsp
+ mov %rax,%r11
+ sub %rsp,%r11
+ and \$-4096,%r11
+.Lpwr_page_walk:
+ mov (%rsp,%r11),%r10
+ sub \$4096,%r11
+ .byte 0x2e # predict non-taken
+ jnc .Lpwr_page_walk
+
mov $num,%r10
neg $num
@@ -2028,7 +2060,16 @@ bn_from_mont8x:
sub %r11,%rsp
.Lfrom_sp_done:
and \$-64,%rsp
- mov $num,%r10
+ mov %rax,%r11
+ sub %rsp,%r11
+ and \$-4096,%r11
+.Lfrom_page_walk:
+ mov (%rsp,%r11),%r10
+ sub \$4096,%r11
+ .byte 0x2e # predict non-taken
+ jnc .Lfrom_page_walk
+
+ mov $num,%r10
neg $num
##############################################################
@@ -2173,6 +2214,15 @@ bn_mulx4x_mont_gather5:
sub %r11,%rsp
.Lmulx4xsp_done:
and \$-64,%rsp # ensure alignment
+ mov %rax,%r11
+ sub %rsp,%r11
+ and \$-4096,%r11
+.Lmulx4x_page_walk:
+ mov (%rsp,%r11),%r10
+ sub \$4096,%r11
+ .byte 0x2e # predict non-taken
+ jnc .Lmulx4x_page_walk
+
##############################################################
# Stack layout
# +0 -num
@@ -2619,6 +2669,15 @@ bn_powerx5:
sub %r11,%rsp
.Lpwrx_sp_done:
and \$-64,%rsp
+ mov %rax,%r11
+ sub %rsp,%r11
+ and \$-4096,%r11
+.Lpwrx_page_walk:
+ mov (%rsp,%r11),%r10
+ sub \$4096,%r11
+ .byte 0x2e # predict non-taken
+ jnc .Lpwrx_page_walk
+
mov $num,%r10
neg $num
diff --git a/crypto/comp/comp.h b/crypto/comp/comp.h
index 406c428aaeed..60a073404e92 100644
--- a/crypto/comp/comp.h
+++ b/crypto/comp/comp.h
@@ -4,6 +4,10 @@
# include <openssl/crypto.h>
+# ifdef OPENSSL_NO_COMP
+# error COMP is disabled.
+# endif
+
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/crypto/evp/Makefile b/crypto/evp/Makefile
index aaaad986e0e8..fa138d0b1014 100644
--- a/crypto/evp/Makefile
+++ b/crypto/evp/Makefile
@@ -199,8 +199,8 @@ e_aes.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h
e_aes.o: ../../include/openssl/rand.h ../../include/openssl/safestack.h
e_aes.o: ../../include/openssl/stack.h ../../include/openssl/symhacks.h
e_aes.o: ../modes/modes_lcl.h e_aes.c evp_locl.h
-e_aes_cbc_hmac_sha1.o: ../../include/openssl/aes.h ../../include/openssl/asn1.h
-e_aes_cbc_hmac_sha1.o: ../../include/openssl/bio.h
+e_aes_cbc_hmac_sha1.o: ../../e_os.h ../../include/openssl/aes.h
+e_aes_cbc_hmac_sha1.o: ../../include/openssl/asn1.h ../../include/openssl/bio.h
e_aes_cbc_hmac_sha1.o: ../../include/openssl/crypto.h
e_aes_cbc_hmac_sha1.o: ../../include/openssl/e_os2.h
e_aes_cbc_hmac_sha1.o: ../../include/openssl/evp.h
@@ -214,9 +214,9 @@ e_aes_cbc_hmac_sha1.o: ../../include/openssl/rand.h
e_aes_cbc_hmac_sha1.o: ../../include/openssl/safestack.h
e_aes_cbc_hmac_sha1.o: ../../include/openssl/sha.h
e_aes_cbc_hmac_sha1.o: ../../include/openssl/stack.h
-e_aes_cbc_hmac_sha1.o: ../../include/openssl/symhacks.h ../modes/modes_lcl.h
-e_aes_cbc_hmac_sha1.o: e_aes_cbc_hmac_sha1.c
-e_aes_cbc_hmac_sha256.o: ../../include/openssl/aes.h
+e_aes_cbc_hmac_sha1.o: ../../include/openssl/symhacks.h ../constant_time_locl.h
+e_aes_cbc_hmac_sha1.o: ../modes/modes_lcl.h e_aes_cbc_hmac_sha1.c
+e_aes_cbc_hmac_sha256.o: ../../e_os.h ../../include/openssl/aes.h
e_aes_cbc_hmac_sha256.o: ../../include/openssl/asn1.h
e_aes_cbc_hmac_sha256.o: ../../include/openssl/bio.h
e_aes_cbc_hmac_sha256.o: ../../include/openssl/crypto.h
@@ -232,7 +232,8 @@ e_aes_cbc_hmac_sha256.o: ../../include/openssl/rand.h
e_aes_cbc_hmac_sha256.o: ../../include/openssl/safestack.h
e_aes_cbc_hmac_sha256.o: ../../include/openssl/sha.h
e_aes_cbc_hmac_sha256.o: ../../include/openssl/stack.h
-e_aes_cbc_hmac_sha256.o: ../../include/openssl/symhacks.h ../modes/modes_lcl.h
+e_aes_cbc_hmac_sha256.o: ../../include/openssl/symhacks.h
+e_aes_cbc_hmac_sha256.o: ../constant_time_locl.h ../modes/modes_lcl.h
e_aes_cbc_hmac_sha256.o: e_aes_cbc_hmac_sha256.c
e_bf.o: ../../e_os.h ../../include/openssl/asn1.h ../../include/openssl/bio.h
e_bf.o: ../../include/openssl/blowfish.h ../../include/openssl/buffer.h
diff --git a/crypto/evp/digest.c b/crypto/evp/digest.c
index f2643f32486a..5b642b23fc1c 100644
--- a/crypto/evp/digest.c
+++ b/crypto/evp/digest.c
@@ -212,8 +212,10 @@ int EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl)
}
#endif
if (ctx->digest != type) {
- if (ctx->digest && ctx->digest->ctx_size)
+ if (ctx->digest && ctx->digest->ctx_size) {
OPENSSL_free(ctx->md_data);
+ ctx->md_data = NULL;
+ }
ctx->digest = type;
if (!(ctx->flags & EVP_MD_CTX_FLAG_NO_INIT) && type->ctx_size) {
ctx->update = type->update;
diff --git a/crypto/evp/e_aes_cbc_hmac_sha1.c b/crypto/evp/e_aes_cbc_hmac_sha1.c
index 8330964ee16b..6dfd590a4a2c 100644
--- a/crypto/evp/e_aes_cbc_hmac_sha1.c
+++ b/crypto/evp/e_aes_cbc_hmac_sha1.c
@@ -60,6 +60,7 @@
# include <openssl/sha.h>
# include <openssl/rand.h>
# include "modes_lcl.h"
+# include "constant_time_locl.h"
# ifndef EVP_CIPH_FLAG_AEAD_CIPHER
# define EVP_CIPH_FLAG_AEAD_CIPHER 0x200000
@@ -578,6 +579,8 @@ static int aesni_cbc_hmac_sha1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8);
maxpad &= 255;
+ ret &= constant_time_ge(maxpad, pad);
+
inp_len = len - (SHA_DIGEST_LENGTH + pad + 1);
mask = (0 - ((inp_len - len) >> (sizeof(inp_len) * 8 - 1)));
inp_len &= mask;
diff --git a/crypto/evp/e_aes_cbc_hmac_sha256.c b/crypto/evp/e_aes_cbc_hmac_sha256.c
index 37800213c764..46c9d033895b 100644
--- a/crypto/evp/e_aes_cbc_hmac_sha256.c
+++ b/crypto/evp/e_aes_cbc_hmac_sha256.c
@@ -60,6 +60,7 @@
# include <openssl/sha.h>
# include <openssl/rand.h>
# include "modes_lcl.h"
+# include "constant_time_locl.h"
# ifndef EVP_CIPH_FLAG_AEAD_CIPHER
# define EVP_CIPH_FLAG_AEAD_CIPHER 0x200000
@@ -589,6 +590,8 @@ static int aesni_cbc_hmac_sha256_cipher(EVP_CIPHER_CTX *ctx,
maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8);
maxpad &= 255;
+ ret &= constant_time_ge(maxpad, pad);
+
inp_len = len - (SHA256_DIGEST_LENGTH + pad + 1);
mask = (0 - ((inp_len - len) >> (sizeof(inp_len) * 8 - 1)));
inp_len &= mask;
diff --git a/crypto/evp/encode.c b/crypto/evp/encode.c
index c6abc4ae8e47..c6c775e0a0cd 100644
--- a/crypto/evp/encode.c
+++ b/crypto/evp/encode.c
@@ -57,6 +57,7 @@
*/
#include <stdio.h>
+#include <limits.h>
#include "cryptlib.h"
#include <openssl/evp.h>
@@ -151,13 +152,13 @@ void EVP_EncodeUpdate(EVP_ENCODE_CTX *ctx, unsigned char *out, int *outl,
const unsigned char *in, int inl)
{
int i, j;
- unsigned int total = 0;
+ size_t total = 0;
*outl = 0;
if (inl <= 0)
return;
OPENSSL_assert(ctx->length <= (int)sizeof(ctx->enc_data));
- if ((ctx->num + inl) < ctx->length) {
+ if (ctx->length - ctx->num > inl) {
memcpy(&(ctx->enc_data[ctx->num]), in, inl);
ctx->num += inl;
return;
@@ -174,7 +175,7 @@ void EVP_EncodeUpdate(EVP_ENCODE_CTX *ctx, unsigned char *out, int *outl,
*out = '\0';
total = j + 1;
}
- while (inl >= ctx->length) {
+ while (inl >= ctx->length && total <= INT_MAX) {
j = EVP_EncodeBlock(out, in, ctx->length);
in += ctx->length;
inl -= ctx->length;
@@ -183,6 +184,11 @@ void EVP_EncodeUpdate(EVP_ENCODE_CTX *ctx, unsigned char *out, int *outl,
*out = '\0';
total += j + 1;
}
+ if (total > INT_MAX) {
+ /* Too much output data! */
+ *outl = 0;
+ return;
+ }
if (inl != 0)
memcpy(&(ctx->enc_data[0]), in, inl);
ctx->num = inl;
diff --git a/crypto/evp/evp_enc.c b/crypto/evp/evp_enc.c
index 65f0e0244dce..7d7be245b021 100644
--- a/crypto/evp/evp_enc.c
+++ b/crypto/evp/evp_enc.c
@@ -347,7 +347,7 @@ int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl,
bl = ctx->cipher->block_size;
OPENSSL_assert(bl <= (int)sizeof(ctx->buf));
if (i != 0) {
- if (i + inl < bl) {
+ if (bl - i > inl) {
memcpy(&(ctx->buf[i]), in, inl);
ctx->buf_len += inl;
*outl = 0;
diff --git a/crypto/modes/asm/ghash-s390x.pl b/crypto/modes/asm/ghash-s390x.pl
index 39096b423ad8..be7d55f74876 100755
--- a/crypto/modes/asm/ghash-s390x.pl
+++ b/crypto/modes/asm/ghash-s390x.pl
@@ -85,9 +85,7 @@ $code.=<<___ if(!$softonly && 0); # hardware is slow for single block...
tmhl %r0,0x4000 # check for message-security-assist
jz .Lsoft_gmult
lghi %r0,0
- la %r1,16($sp)
- .long 0xb93e0004 # kimd %r0,%r4
- lg %r1,24($sp)
+ lg %r1,24(%r1) # load second word of kimd capabilities vector
tmhh %r1,0x4000 # check for function 65
jz .Lsoft_gmult
stg %r0,16($sp) # arrange 16 bytes of zero input
diff --git a/crypto/opensslv.h b/crypto/opensslv.h
index 4334fd15cd87..13fe440231cd 100644
--- a/crypto/opensslv.h
+++ b/crypto/opensslv.h
@@ -30,11 +30,11 @@ extern "C" {
* (Prior to 0.9.5a beta1, a different scheme was used: MMNNFFRBB for
* major minor fix final patch/beta)
*/
-# define OPENSSL_VERSION_NUMBER 0x1000207fL
+# define OPENSSL_VERSION_NUMBER 0x1000208fL
# ifdef OPENSSL_FIPS
-# define OPENSSL_VERSION_TEXT "OpenSSL 1.0.2g-fips 1 Mar 2016"
+# define OPENSSL_VERSION_TEXT "OpenSSL 1.0.2h-fips 3 May 2016"
# else
-# define OPENSSL_VERSION_TEXT "OpenSSL 1.0.2g 1 Mar 2016"
+# define OPENSSL_VERSION_TEXT "OpenSSL 1.0.2h 3 May 2016"
# endif
# define OPENSSL_VERSION_PTEXT " part of " OPENSSL_VERSION_TEXT
diff --git a/crypto/pem/pem_lib.c b/crypto/pem/pem_lib.c
index a29821aab2eb..fe881d664171 100644
--- a/crypto/pem/pem_lib.c
+++ b/crypto/pem/pem_lib.c
@@ -348,7 +348,7 @@ int PEM_ASN1_write_bio(i2d_of_void *i2d, const char *name, BIO *bp,
if (enc != NULL) {
objstr = OBJ_nid2sn(EVP_CIPHER_nid(enc));
- if (objstr == NULL) {
+ if (objstr == NULL || EVP_CIPHER_iv_length(enc) == 0) {
PEMerr(PEM_F_PEM_ASN1_WRITE_BIO, PEM_R_UNSUPPORTED_CIPHER);
goto err;
}
diff --git a/crypto/pem/pvkfmt.c b/crypto/pem/pvkfmt.c
index 82d45273ed16..61864468f6d4 100644
--- a/crypto/pem/pvkfmt.c
+++ b/crypto/pem/pvkfmt.c
@@ -131,6 +131,10 @@ static int read_lebn(const unsigned char **in, unsigned int nbyte, BIGNUM **r)
# define MS_PVKMAGIC 0xb0b5f11eL
/* Salt length for PVK files */
# define PVK_SALTLEN 0x10
+/* Maximum length in PVK header */
+# define PVK_MAX_KEYLEN 102400
+/* Maximum salt length */
+# define PVK_MAX_SALTLEN 10240
static EVP_PKEY *b2i_rsa(const unsigned char **in, unsigned int length,
unsigned int bitlen, int ispub);
@@ -644,6 +648,9 @@ static int do_PVK_header(const unsigned char **in, unsigned int length,
*psaltlen = read_ledword(&p);
*pkeylen = read_ledword(&p);
+ if (*pkeylen > PVK_MAX_KEYLEN || *psaltlen > PVK_MAX_SALTLEN)
+ return 0;
+
if (is_encrypted && !*psaltlen) {
PEMerr(PEM_F_DO_PVK_HEADER, PEM_R_INCONSISTENT_HEADER);
return 0;
diff --git a/crypto/perlasm/x86_64-xlate.pl b/crypto/perlasm/x86_64-xlate.pl
index ee04221c7e1d..7a3dd04b0f9d 100755
--- a/crypto/perlasm/x86_64-xlate.pl
+++ b/crypto/perlasm/x86_64-xlate.pl
@@ -195,6 +195,7 @@ my %globals;
sub out {
my $self = shift;
+ $self->{value} =~ s/\b(0b[0-1]+)/oct($1)/eig;
if ($gas) {
# Solaris /usr/ccs/bin/as can't handle multiplications
# in $self->{value}
@@ -205,7 +206,6 @@ my %globals;
}
sprintf "\$%s",$self->{value};
} else {
- $self->{value} =~ s/(0b[0-1]+)/oct($1)/eig;
$self->{value} =~ s/0x([0-9a-f]+)/0$1h/ig if ($masm);
sprintf "%s",$self->{value};
}
diff --git a/crypto/s390xcpuid.S b/crypto/s390xcpuid.S
index 06815347e6a3..d91d5bc4b64b 100644
--- a/crypto/s390xcpuid.S
+++ b/crypto/s390xcpuid.S
@@ -5,14 +5,46 @@
.align 16
OPENSSL_s390x_facilities:
lghi %r0,0
- larl %r2,OPENSSL_s390xcap_P
- stg %r0,8(%r2)
- .long 0xb2b02000 # stfle 0(%r2)
+ larl %r4,OPENSSL_s390xcap_P
+ stg %r0,8(%r4) # wipe capability vectors
+ stg %r0,16(%r4)
+ stg %r0,24(%r4)
+ stg %r0,32(%r4)
+ stg %r0,40(%r4)
+ stg %r0,48(%r4)
+ stg %r0,56(%r4)
+ stg %r0,64(%r4)
+ stg %r0,72(%r4)
+
+ .long 0xb2b04000 # stfle 0(%r4)
brc 8,.Ldone
lghi %r0,1
- .long 0xb2b02000 # stfle 0(%r2)
+ .long 0xb2b04000 # stfle 0(%r4)
.Ldone:
- lg %r2,0(%r2)
+ lmg %r2,%r3,0(%r4)
+ tmhl %r2,0x4000 # check for message-security-assist
+ jz .Lret
+
+ lghi %r0,0 # query kimd capabilities
+ la %r1,16(%r4)
+ .long 0xb93e0002 # kimd %r0,%r2
+
+ lghi %r0,0 # query km capability vector
+ la %r1,32(%r4)
+ .long 0xb92e0042 # km %r4,%r2
+
+ lghi %r0,0 # query kmc capability vector
+ la %r1,48(%r4)
+ .long 0xb92f0042 # kmc %r4,%r2
+
+ tmhh %r3,0x0004 # check for message-security-assist-4
+ jz .Lret
+
+ lghi %r0,0 # query kmctr capability vector
+ la %r1,64(%r4)
+ .long 0xb92d2042 # kmctr %r4,%r2,%r2
+
+.Lret:
br %r14
.size OPENSSL_s390x_facilities,.-OPENSSL_s390x_facilities
@@ -96,4 +128,4 @@ OPENSSL_cleanse:
.section .init
brasl %r14,OPENSSL_cpuid_setup
-.comm OPENSSL_s390xcap_P,16,8
+.comm OPENSSL_s390xcap_P,80,8
diff --git a/crypto/sha/asm/sha1-ppc.pl b/crypto/sha/asm/sha1-ppc.pl
index df5989610c4c..ab655021ccd6 100755
--- a/crypto/sha/asm/sha1-ppc.pl
+++ b/crypto/sha/asm/sha1-ppc.pl
@@ -227,7 +227,7 @@ Lunaligned:
srwi. $t1,$t1,6 ; t1/=64
beq Lcross_page
$UCMP $num,$t1
- ble- Laligned ; didn't cross the page boundary
+ ble Laligned ; didn't cross the page boundary
mtctr $t1
subfc $num,$t1,$num
bl Lsha1_block_private
@@ -255,7 +255,7 @@ Lmemcpy:
bl Lsha1_block_private
$POP $inp,`$FRAME-$SIZE_T*18`($sp)
addic. $num,$num,-1
- bne- Lunaligned
+ bne Lunaligned
Ldone:
$POP r0,`$FRAME+$LRSAVE`($sp)
@@ -329,7 +329,7 @@ $code.=<<___;
stw r20,16($ctx)
mr $E,r20
addi $inp,$inp,`16*4`
- bdnz- Lsha1_block_private
+ bdnz Lsha1_block_private
blr
.long 0
.byte 0,12,0x14,0,0,0,0,0
diff --git a/crypto/sha/asm/sha1-s390x.pl b/crypto/sha/asm/sha1-s390x.pl
index 9193dda45eff..d5cf1640a120 100755
--- a/crypto/sha/asm/sha1-s390x.pl
+++ b/crypto/sha/asm/sha1-s390x.pl
@@ -167,10 +167,7 @@ $code.=<<___ if ($kimdfunc);
lg %r0,0(%r1)
tmhl %r0,0x4000 # check for message-security assist
jz .Lsoftware
- lghi %r0,0
- la %r1,`2*$SIZE_T`($sp)
- .long 0xb93e0002 # kimd %r0,%r2
- lg %r0,`2*$SIZE_T`($sp)
+ lg %r0,16(%r1) # check kimd capabilities
tmhh %r0,`0x8000>>$kimdfunc`
jz .Lsoftware
lghi %r0,$kimdfunc
@@ -237,7 +234,7 @@ $code.=<<___;
br %r14
.size sha1_block_data_order,.-sha1_block_data_order
.string "SHA1 block transform for s390x, CRYPTOGAMS by <appro\@openssl.org>"
-.comm OPENSSL_s390xcap_P,16,8
+.comm OPENSSL_s390xcap_P,80,8
___
$code =~ s/\`([^\`]*)\`/eval $1/gem;
diff --git a/crypto/sha/asm/sha512-ppc.pl b/crypto/sha/asm/sha512-ppc.pl
index 734f3c1ca0f0..17fdc6e8e5a9 100755
--- a/crypto/sha/asm/sha512-ppc.pl
+++ b/crypto/sha/asm/sha512-ppc.pl
@@ -259,7 +259,7 @@ Lunaligned:
andi. $t1,$t1,`4096-16*$SZ` ; distance to closest page boundary
beq Lcross_page
$UCMP $num,$t1
- ble- Laligned ; didn't cross the page boundary
+ ble Laligned ; didn't cross the page boundary
subfc $num,$t1,$num
add $t1,$inp,$t1
$PUSH $num,`$FRAME-$SIZE_T*25`($sp) ; save real remaining num
@@ -317,7 +317,7 @@ $code.=<<___;
$POP $inp,`$FRAME-$SIZE_T*26`($sp) ; restore real inp
$POP $num,`$FRAME-$SIZE_T*25`($sp) ; restore real num
addic. $num,$num,`-16*$SZ` ; num--
- bne- Lunaligned
+ bne Lunaligned
Ldone:
$POP r0,`$FRAME+$LRSAVE`($sp)
@@ -396,7 +396,7 @@ for(;$i<32;$i++) {
unshift(@V,pop(@V));
}
$code.=<<___;
- bdnz- Lrounds
+ bdnz Lrounds
$POP $ctx,`$FRAME-$SIZE_T*22`($sp)
$POP $inp,`$FRAME-$SIZE_T*23`($sp) ; inp pointer
@@ -644,7 +644,7 @@ for(;$i<32;$i++) {
($a0,$a1,$a2,$a3) = ($a2,$a3,$a0,$a1);
}
$code.=<<___;
- bdnz- Lrounds
+ bdnz Lrounds
$POP $ctx,`$FRAME-$SIZE_T*22`($sp)
$POP $inp,`$FRAME-$SIZE_T*23`($sp) ; inp pointer
diff --git a/crypto/sha/asm/sha512-s390x.pl b/crypto/sha/asm/sha512-s390x.pl
index 079a3fc78ab4..9c10e4e9ee74 100755
--- a/crypto/sha/asm/sha512-s390x.pl
+++ b/crypto/sha/asm/sha512-s390x.pl
@@ -240,10 +240,7 @@ $code.=<<___ if ($kimdfunc);
lg %r0,0(%r1)
tmhl %r0,0x4000 # check for message-security assist
jz .Lsoftware
- lghi %r0,0
- la %r1,`2*$SIZE_T`($sp)
- .long 0xb93e0002 # kimd %r0,%r2
- lg %r0,`2*$SIZE_T`($sp)
+ lg %r0,16(%r1) # check kimd capabilities
tmhh %r0,`0x8000>>$kimdfunc`
jz .Lsoftware
lghi %r0,$kimdfunc
@@ -311,7 +308,7 @@ $code.=<<___;
br %r14
.size $Func,.-$Func
.string "SHA${label} block transform for s390x, CRYPTOGAMS by <appro\@openssl.org>"
-.comm OPENSSL_s390xcap_P,16,8
+.comm OPENSSL_s390xcap_P,80,8
___
$code =~ s/\`([^\`]*)\`/eval $1/gem;
diff --git a/crypto/x509/x509.h b/crypto/x509/x509.h
index 99337b849a52..fc613ce63526 100644
--- a/crypto/x509/x509.h
+++ b/crypto/x509/x509.h
@@ -1305,6 +1305,7 @@ void ERR_load_X509_strings(void);
# define X509_R_LOADING_CERT_DIR 103
# define X509_R_LOADING_DEFAULTS 104
# define X509_R_METHOD_NOT_SUPPORTED 124
+# define X509_R_NAME_TOO_LONG 134
# define X509_R_NEWER_CRL_NOT_NEWER 132
# define X509_R_NO_CERT_SET_FOR_US_TO_VERIFY 105
# define X509_R_NO_CRL_NUMBER 130
diff --git a/crypto/x509/x509_err.c b/crypto/x509/x509_err.c
index 43cde18e49a7..1e779fefd9c1 100644
--- a/crypto/x509/x509_err.c
+++ b/crypto/x509/x509_err.c
@@ -151,6 +151,7 @@ static ERR_STRING_DATA X509_str_reasons[] = {
{ERR_REASON(X509_R_LOADING_CERT_DIR), "loading cert dir"},
{ERR_REASON(X509_R_LOADING_DEFAULTS), "loading defaults"},
{ERR_REASON(X509_R_METHOD_NOT_SUPPORTED), "method not supported"},
+ {ERR_REASON(X509_R_NAME_TOO_LONG), "name too long"},
{ERR_REASON(X509_R_NEWER_CRL_NOT_NEWER), "newer crl not newer"},
{ERR_REASON(X509_R_NO_CERT_SET_FOR_US_TO_VERIFY),
"no cert set for us to verify"},
diff --git a/crypto/x509/x509_obj.c b/crypto/x509/x509_obj.c
index d317f3af25c0..3de3ac720411 100644
--- a/crypto/x509/x509_obj.c
+++ b/crypto/x509/x509_obj.c
@@ -63,6 +63,13 @@
#include <openssl/x509.h>
#include <openssl/buffer.h>
+/*
+ * Limit to ensure we don't overflow: much greater than
+ * anything enountered in practice.
+ */
+
+#define NAME_ONELINE_MAX (1024 * 1024)
+
char *X509_NAME_oneline(X509_NAME *a, char *buf, int len)
{
X509_NAME_ENTRY *ne;
@@ -86,6 +93,8 @@ char *X509_NAME_oneline(X509_NAME *a, char *buf, int len)
goto err;
b->data[0] = '\0';
len = 200;
+ } else if (len == 0) {
+ return NULL;
}
if (a == NULL) {
if (b) {
@@ -110,6 +119,10 @@ char *X509_NAME_oneline(X509_NAME *a, char *buf, int len)
type = ne->value->type;
num = ne->value->length;
+ if (num > NAME_ONELINE_MAX) {
+ X509err(X509_F_X509_NAME_ONELINE, X509_R_NAME_TOO_LONG);
+ goto end;
+ }
q = ne->value->data;
#ifdef CHARSET_EBCDIC
if (type == V_ASN1_GENERALSTRING ||
@@ -117,8 +130,9 @@ char *X509_NAME_oneline(X509_NAME *a, char *buf, int len)
type == V_ASN1_PRINTABLESTRING ||
type == V_ASN1_TELETEXSTRING ||
type == V_ASN1_VISIBLESTRING || type == V_ASN1_IA5STRING) {
- ascii2ebcdic(ebcdic_buf, q, (num > sizeof ebcdic_buf)
- ? sizeof ebcdic_buf : num);
+ if (num > (int)sizeof(ebcdic_buf))
+ num = sizeof(ebcdic_buf);
+ ascii2ebcdic(ebcdic_buf, q, num);
q = ebcdic_buf;
}
#endif
@@ -154,6 +168,10 @@ char *X509_NAME_oneline(X509_NAME *a, char *buf, int len)
lold = l;
l += 1 + l1 + 1 + l2;
+ if (l > NAME_ONELINE_MAX) {
+ X509err(X509_F_X509_NAME_ONELINE, X509_R_NAME_TOO_LONG);
+ goto end;
+ }
if (b != NULL) {
if (!BUF_MEM_grow(b, l + 1))
goto err;
@@ -206,7 +224,7 @@ char *X509_NAME_oneline(X509_NAME *a, char *buf, int len)
return (p);
err:
X509err(X509_F_X509_NAME_ONELINE, ERR_R_MALLOC_FAILURE);
- if (b != NULL)
- BUF_MEM_free(b);
+ end:
+ BUF_MEM_free(b);
return (NULL);
}