aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2023-08-29 01:26:53 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2023-12-29 21:49:42 +0000
commit276666497ed1072ff663cb174eb44fe333be9ee3 (patch)
tree63822d1595b8f399841cb62b1e7aac60edb1a317
parentcfc94a37d8d9e7695f0d9bed14b88d4383d96798 (diff)
downloadsrc-276666497ed1072ff663cb174eb44fe333be9ee3.tar.gz
src-276666497ed1072ff663cb174eb44fe333be9ee3.zip
aesni: Push FPU sections down further
After commit 937b4473be21 aesni_cipher_crypt() and aesni_cipher_mac() execute in a FPU_KERN_NOCTX section, which means that they must run with preemption disabled. These functions handle discontiguous I/O buffers by allocating a contiguous buffer and copying as necessary, but this allocation cannot happen with preemption disabled. Fix the problem by pushing the FPU section down into aesni_cipher_crypt() and aesni_cipher_mac(). In particular, encrypt-then-auth transforms need not be handled with a single FPU section. Reported by: syzbot+78258dbb02eb92157357@syzkaller.appspotmail.com Discussed with: jhb Fixes: 937b4473be21 ("aesni: Switch to using FPU_KERN_NOCTX.") (cherry picked from commit 6b635c74fd4135eaae68970bfc5aad9ae905fec7)
-rw-r--r--sys/crypto/aesni/aesni.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/sys/crypto/aesni/aesni.c b/sys/crypto/aesni/aesni.c
index 6d83743dece9..5d5afd8aee41 100644
--- a/sys/crypto/aesni/aesni.c
+++ b/sys/crypto/aesni/aesni.c
@@ -594,8 +594,6 @@ aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp)
break;
}
- fpu_kern_enter(curthread, NULL, FPU_KERN_NORMAL | FPU_KERN_NOCTX);
-
/* Do work */
if (csp->csp_mode == CSP_MODE_ETA) {
if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
@@ -612,7 +610,6 @@ aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp)
else
error = aesni_cipher_crypt(ses, crp, csp);
- fpu_kern_leave(curthread, NULL);
return (error);
}
@@ -677,6 +674,8 @@ aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp,
outcopy = allocated;
}
+ fpu_kern_enter(curthread, NULL, FPU_KERN_NORMAL | FPU_KERN_NOCTX);
+
error = 0;
encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
if (crp->crp_cipher_key != NULL)
@@ -749,6 +748,9 @@ aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp,
}
break;
}
+
+ fpu_kern_leave(curthread, NULL);
+
if (outcopy && error == 0)
crypto_copyback(crp, CRYPTO_HAS_OUTPUT_BUFFER(crp) ?
crp->crp_payload_output_start : crp->crp_payload_start,
@@ -784,6 +786,8 @@ aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp,
key = csp->csp_auth_key;
keylen = csp->csp_auth_klen;
+ fpu_kern_enter(curthread, NULL, FPU_KERN_NORMAL | FPU_KERN_NOCTX);
+
if (ses->hmac) {
uint8_t hmac_key[SHA1_BLOCK_LEN] __aligned(16);
@@ -849,6 +853,8 @@ aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp,
ses->hash_finalize(res, &sctx);
}
+ fpu_kern_leave(curthread, NULL);
+
if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
uint32_t res2[SHA2_256_HASH_LEN / sizeof(uint32_t)];