summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--UPDATING4
-rwxr-xr-xcrypto/openssl/crypto/bn/asm/rsaz-avx2.pl15
-rw-r--r--crypto/openssl/ssl/ssl.h2
-rw-r--r--secure/lib/libcrypto/amd64/rsaz-avx2.S11
-rw-r--r--sys/conf/newvers.sh2
5 files changed, 19 insertions, 15 deletions
diff --git a/UPDATING b/UPDATING
index e63fad1..95758bf 100644
--- a/UPDATING
+++ b/UPDATING
@@ -16,6 +16,10 @@ from older versions of FreeBSD, try WITHOUT_CLANG and WITH_GCC to bootstrap to
the tip of head, and then rebuild without this option. The bootstrap process
from older version of current across the gcc/clang cutover is a bit fragile.
+20171209 p6 FreeBSD-SA-17:12.openssl
+
+ Fix multiple vulnerabilities of OpenSSL.
+
20171129 p5 FreeBSD-SA-17:11.openssl
Fix multiple vulnerabilities of OpenSSL.
diff --git a/crypto/openssl/crypto/bn/asm/rsaz-avx2.pl b/crypto/openssl/crypto/bn/asm/rsaz-avx2.pl
index 712a77f..2b3f8b0 100755
--- a/crypto/openssl/crypto/bn/asm/rsaz-avx2.pl
+++ b/crypto/openssl/crypto/bn/asm/rsaz-avx2.pl
@@ -239,7 +239,7 @@ $code.=<<___;
vmovdqu 32*8-128($ap), $ACC8
lea 192(%rsp), $tp0 # 64+128=192
- vpbroadcastq .Land_mask(%rip), $AND_MASK
+ vmovdqu .Land_mask(%rip), $AND_MASK
jmp .LOOP_GRANDE_SQR_1024
.align 32
@@ -1070,10 +1070,10 @@ $code.=<<___;
vpmuludq 32*6-128($np),$Yi,$TEMP1
vpaddq $TEMP1,$ACC6,$ACC6
vpmuludq 32*7-128($np),$Yi,$TEMP2
- vpblendd \$3, $ZERO, $ACC9, $ACC9 # correct $ACC3
+ vpblendd \$3, $ZERO, $ACC9, $TEMP1 # correct $ACC3
vpaddq $TEMP2,$ACC7,$ACC7
vpmuludq 32*8-128($np),$Yi,$TEMP0
- vpaddq $ACC9, $ACC3, $ACC3 # correct $ACC3
+ vpaddq $TEMP1, $ACC3, $ACC3 # correct $ACC3
vpaddq $TEMP0,$ACC8,$ACC8
mov %rbx, %rax
@@ -1086,7 +1086,9 @@ $code.=<<___;
vmovdqu -8+32*2-128($ap),$TEMP2
mov $r1, %rax
+ vpblendd \$0xfc, $ZERO, $ACC9, $ACC9 # correct $ACC3
imull $n0, %eax
+ vpaddq $ACC9,$ACC4,$ACC4 # correct $ACC3
and \$0x1fffffff, %eax
imulq 16-128($ap),%rbx
@@ -1322,15 +1324,12 @@ ___
# But as we underutilize resources, it's possible to correct in
# each iteration with marginal performance loss. But then, as
# we do it in each iteration, we can correct less digits, and
-# avoid performance penalties completely. Also note that we
-# correct only three digits out of four. This works because
-# most significant digit is subjected to less additions.
+# avoid performance penalties completely.
$TEMP0 = $ACC9;
$TEMP3 = $Bi;
$TEMP4 = $Yi;
$code.=<<___;
- vpermq \$0, $AND_MASK, $AND_MASK
vpaddq (%rsp), $TEMP1, $ACC0
vpsrlq \$29, $ACC0, $TEMP1
@@ -1763,7 +1762,7 @@ $code.=<<___;
.align 64
.Land_mask:
- .quad 0x1fffffff,0x1fffffff,0x1fffffff,-1
+ .quad 0x1fffffff,0x1fffffff,0x1fffffff,0x1fffffff
.Lscatter_permd:
.long 0,2,4,6,7,7,7,7
.Lgather_permd:
diff --git a/crypto/openssl/ssl/ssl.h b/crypto/openssl/ssl/ssl.h
index 90aeb0c..3cf96a2 100644
--- a/crypto/openssl/ssl/ssl.h
+++ b/crypto/openssl/ssl/ssl.h
@@ -1727,7 +1727,7 @@ extern "C" {
# define SSL_ST_BEFORE 0x4000
# define SSL_ST_OK 0x03
# define SSL_ST_RENEGOTIATE (0x04|SSL_ST_INIT)
-# define SSL_ST_ERR 0x05
+# define SSL_ST_ERR (0x05|SSL_ST_INIT)
# define SSL_CB_LOOP 0x01
# define SSL_CB_EXIT 0x02
diff --git a/secure/lib/libcrypto/amd64/rsaz-avx2.S b/secure/lib/libcrypto/amd64/rsaz-avx2.S
index 159d579..e026407 100644
--- a/secure/lib/libcrypto/amd64/rsaz-avx2.S
+++ b/secure/lib/libcrypto/amd64/rsaz-avx2.S
@@ -68,7 +68,7 @@ rsaz_1024_sqr_avx2:
vmovdqu 256-128(%rsi),%ymm8
leaq 192(%rsp),%rbx
- vpbroadcastq .Land_mask(%rip),%ymm15
+ vmovdqu .Land_mask(%rip),%ymm15
jmp .LOOP_GRANDE_SQR_1024
.align 32
@@ -801,10 +801,10 @@ rsaz_1024_mul_avx2:
vpmuludq 192-128(%rcx),%ymm11,%ymm12
vpaddq %ymm12,%ymm6,%ymm6
vpmuludq 224-128(%rcx),%ymm11,%ymm13
- vpblendd $3,%ymm14,%ymm9,%ymm9
+ vpblendd $3,%ymm14,%ymm9,%ymm12
vpaddq %ymm13,%ymm7,%ymm7
vpmuludq 256-128(%rcx),%ymm11,%ymm0
- vpaddq %ymm9,%ymm3,%ymm3
+ vpaddq %ymm12,%ymm3,%ymm3
vpaddq %ymm0,%ymm8,%ymm8
movq %rbx,%rax
@@ -817,7 +817,9 @@ rsaz_1024_mul_avx2:
vmovdqu -8+64-128(%rsi),%ymm13
movq %r10,%rax
+ vpblendd $0xfc,%ymm14,%ymm9,%ymm9
imull %r8d,%eax
+ vpaddq %ymm9,%ymm4,%ymm4
andl $0x1fffffff,%eax
imulq 16-128(%rsi),%rbx
@@ -1046,7 +1048,6 @@ rsaz_1024_mul_avx2:
decl %r14d
jnz .Loop_mul_1024
- vpermq $0,%ymm15,%ymm15
vpaddq (%rsp),%ymm12,%ymm0
vpsrlq $29,%ymm0,%ymm12
@@ -1686,7 +1687,7 @@ rsaz_avx2_eligible:
.align 64
.Land_mask:
-.quad 0x1fffffff,0x1fffffff,0x1fffffff,-1
+.quad 0x1fffffff,0x1fffffff,0x1fffffff,0x1fffffff
.Lscatter_permd:
.long 0,2,4,6,7,7,7,7
.Lgather_permd:
diff --git a/sys/conf/newvers.sh b/sys/conf/newvers.sh
index 1588239..75b4847 100644
--- a/sys/conf/newvers.sh
+++ b/sys/conf/newvers.sh
@@ -44,7 +44,7 @@
TYPE="FreeBSD"
REVISION="11.1"
-BRANCH="RELEASE-p5"
+BRANCH="RELEASE-p6"
if [ -n "${BRANCH_OVERRIDE}" ]; then
BRANCH=${BRANCH_OVERRIDE}
fi
OpenPOWER on IntegriCloud