aboutsummaryrefslogtreecommitdiff
path: root/sys/opencrypto
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2020-03-27 18:25:23 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2020-03-27 18:25:23 +0000
commitc03414326909ed7a740be3ba63fbbef01fe513a8 (patch)
tree9067f28738df03bb4b685773c52ba32517468212 /sys/opencrypto
parent4d94781b4d9e03b8dbd6604d7e2280d342d3cf7e (diff)
downloadsrc-c03414326909ed7a740be3ba63fbbef01fe513a8.tar.gz
src-c03414326909ed7a740be3ba63fbbef01fe513a8.zip
Refactor driver and consumer interfaces for OCF (in-kernel crypto).
- The linked list of cryptoini structures used in session initialization is replaced with a new flat structure: struct crypto_session_params. This session includes a new mode to define how the other fields should be interpreted. Available modes include: - COMPRESS (for compression/decompression) - CIPHER (for simply encryption/decryption) - DIGEST (computing and verifying digests) - AEAD (combined auth and encryption such as AES-GCM and AES-CCM) - ETA (combined auth and encryption using encrypt-then-authenticate) Additional modes could be added in the future (e.g. if we wanted to support TLS MtE for AES-CBC in the kernel we could add a new mode for that. TLS modes might also affect how AAD is interpreted, etc.) The flat structure also includes the key lengths and algorithms as before. However, code doesn't have to walk the linked list and switch on the algorithm to determine which key is the auth key vs encryption key. The 'csp_auth_*' fields are always used for auth keys and settings and 'csp_cipher_*' for cipher. (Compression algorithms are stored in csp_cipher_alg.) - Drivers no longer register a list of supported algorithms. This doesn't quite work when you factor in modes (e.g. a driver might support both AES-CBC and SHA2-256-HMAC separately but not combined for ETA). Instead, a new 'crypto_probesession' method has been added to the kobj interface for symmteric crypto drivers. This method returns a negative value on success (similar to how device_probe works) and the crypto framework uses this value to pick the "best" driver. There are three constants for hardware (e.g. ccr), accelerated software (e.g. aesni), and plain software (cryptosoft) that give preference in that order. One effect of this is that if you request only hardware when creating a new session, you will no longer get a session using accelerated software. Another effect is that the default setting to disallow software crypto via /dev/crypto now disables accelerated software. Once a driver is chosen, 'crypto_newsession' is invoked as before. - Crypto operations are now solely described by the flat 'cryptop' structure. The linked list of descriptors has been removed. A separate enum has been added to describe the type of data buffer in use instead of using CRYPTO_F_* flags to make it easier to add more types in the future if needed (e.g. wired userspace buffers for zero-copy). It will also make it easier to re-introduce separate input and output buffers (in-kernel TLS would benefit from this). Try to make the flags related to IV handling less insane: - CRYPTO_F_IV_SEPARATE means that the IV is stored in the 'crp_iv' member of the operation structure. If this flag is not set, the IV is stored in the data buffer at the 'crp_iv_start' offset. - CRYPTO_F_IV_GENERATE means that a random IV should be generated and stored into the data buffer. This cannot be used with CRYPTO_F_IV_SEPARATE. If a consumer wants to deal with explicit vs implicit IVs, etc. it can always generate the IV however it needs and store partial IVs in the buffer and the full IV/nonce in crp_iv and set CRYPTO_F_IV_SEPARATE. The layout of the buffer is now described via fields in cryptop. crp_aad_start and crp_aad_length define the boundaries of any AAD. Previously with GCM and CCM you defined an auth crd with this range, but for ETA your auth crd had to span both the AAD and plaintext (and they had to be adjacent). crp_payload_start and crp_payload_length define the boundaries of the plaintext/ciphertext. Modes that only do a single operation (COMPRESS, CIPHER, DIGEST) should only use this region and leave the AAD region empty. If a digest is present (or should be generated), it's starting location is marked by crp_digest_start. Instead of using the CRD_F_ENCRYPT flag to determine the direction of the operation, cryptop now includes an 'op' field defining the operation to perform. For digests I've added a new VERIFY digest mode which assumes a digest is present in the input and fails the request with EBADMSG if it doesn't match the internally-computed digest. GCM and CCM already assumed this, and the new AEAD mode requires this for decryption. The new ETA mode now also requires this for decryption, so IPsec and GELI no longer do their own authentication verification. Simple DIGEST operations can also do this, though there are no in-tree consumers. To eventually support some refcounting to close races, the session cookie is now passed to crypto_getop() and clients should no longer set crp_sesssion directly. - Assymteric crypto operation structures should be allocated via crypto_getkreq() and freed via crypto_freekreq(). This permits the crypto layer to track open asym requests and close races with a driver trying to unregister while asym requests are in flight. - crypto_copyback, crypto_copydata, crypto_apply, and crypto_contiguous_subsegment now accept the 'crp' object as the first parameter instead of individual members. This makes it easier to deal with different buffer types in the future as well as separate input and output buffers. It's also simpler for driver writers to use. - bus_dmamap_load_crp() loads a DMA mapping for a crypto buffer. This understands the various types of buffers so that drivers that use DMA do not have to be aware of different buffer types. - Helper routines now exist to build an auth context for HMAC IPAD and OPAD. This reduces some duplicated work among drivers. - Key buffers are now treated as const throughout the framework and in device drivers. However, session key buffers provided when a session is created are expected to remain alive for the duration of the session. - GCM and CCM sessions now only specify a cipher algorithm and a cipher key. The redundant auth information is not needed or used. - For cryptosoft, split up the code a bit such that the 'process' callback now invokes a function pointer in the session. This function pointer is set based on the mode (in effect) though it simplifies a few edge cases that would otherwise be in the switch in 'process'. It does split up GCM vs CCM which I think is more readable even if there is some duplication. - I changed /dev/crypto to support GMAC requests using CRYPTO_AES_NIST_GMAC as an auth algorithm and updated cryptocheck to work with it. - Combined cipher and auth sessions via /dev/crypto now always use ETA mode. The COP_F_CIPHER_FIRST flag is now a no-op that is ignored. This was actually documented as being true in crypto(4) before, but the code had not implemented this before I added the CIPHER_FIRST flag. - I have not yet updated /dev/crypto to be aware of explicit modes for sessions. I will probably do that at some point in the future as well as teach it about IV/nonce and tag lengths for AEAD so we can support all of the NIST KAT tests for GCM and CCM. - I've split up the exising crypto.9 manpage into several pages of which many are written from scratch. - I have converted all drivers and consumers in the tree and verified that they compile, but I have not tested all of them. I have tested the following drivers: - cryptosoft - aesni (AES only) - blake2 - ccr and the following consumers: - cryptodev - IPsec - ktls_ocf - GELI (lightly) I have not tested the following: - ccp - aesni with sha - hifn - kgssapi_krb5 - ubsec - padlock - safe - armv8_crypto (aarch64) - glxsb (i386) - sec (ppc) - cesa (armv7) - cryptocteon (mips64) - nlmsec (mips64) Discussed with: cem Relnotes: yes Sponsored by: Chelsio Communications Differential Revision: https://reviews.freebsd.org/D23677
Notes
Notes: svn path=/head/; revision=359374
Diffstat (limited to 'sys/opencrypto')
-rw-r--r--sys/opencrypto/criov.c85
-rw-r--r--sys/opencrypto/crypto.c1309
-rw-r--r--sys/opencrypto/cryptodev.c559
-rw-r--r--sys/opencrypto/cryptodev.h150
-rw-r--r--sys/opencrypto/cryptodev_if.m118
-rw-r--r--sys/opencrypto/cryptosoft.c1703
-rw-r--r--sys/opencrypto/cryptosoft.h71
-rw-r--r--sys/opencrypto/ktls_ocf.c95
-rw-r--r--sys/opencrypto/xform_gmac.c6
9 files changed, 2395 insertions, 1701 deletions
diff --git a/sys/opencrypto/criov.c b/sys/opencrypto/criov.c
index d80894909130..e097a22713b3 100644
--- a/sys/opencrypto/criov.c
+++ b/sys/opencrypto/criov.c
@@ -157,41 +157,62 @@ cuio_apply(struct uio *uio, int off, int len, int (*f)(void *, void *, u_int),
}
void
-crypto_copyback(int flags, caddr_t buf, int off, int size, c_caddr_t in)
+crypto_copyback(struct cryptop *crp, int off, int size, const void *src)
{
- if ((flags & CRYPTO_F_IMBUF) != 0)
- m_copyback((struct mbuf *)buf, off, size, in);
- else if ((flags & CRYPTO_F_IOV) != 0)
- cuio_copyback((struct uio *)buf, off, size, in);
- else
- bcopy(in, buf + off, size);
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ m_copyback(crp->crp_mbuf, off, size, src);
+ break;
+ case CRYPTO_BUF_UIO:
+ cuio_copyback(crp->crp_uio, off, size, src);
+ break;
+ case CRYPTO_BUF_CONTIG:
+ bcopy(src, crp->crp_buf + off, size);
+ break;
+ default:
+ panic("invalid crp buf type %d", crp->crp_buf_type);
+ }
}
void
-crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
+crypto_copydata(struct cryptop *crp, int off, int size, void *dst)
{
- if ((flags & CRYPTO_F_IMBUF) != 0)
- m_copydata((struct mbuf *)buf, off, size, out);
- else if ((flags & CRYPTO_F_IOV) != 0)
- cuio_copydata((struct uio *)buf, off, size, out);
- else
- bcopy(buf + off, out, size);
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ m_copydata(crp->crp_mbuf, off, size, dst);
+ break;
+ case CRYPTO_BUF_UIO:
+ cuio_copydata(crp->crp_uio, off, size, dst);
+ break;
+ case CRYPTO_BUF_CONTIG:
+ bcopy(crp->crp_buf + off, dst, size);
+ break;
+ default:
+ panic("invalid crp buf type %d", crp->crp_buf_type);
+ }
}
int
-crypto_apply(int flags, caddr_t buf, int off, int len,
+crypto_apply(struct cryptop *crp, int off, int len,
int (*f)(void *, void *, u_int), void *arg)
{
int error;
- if ((flags & CRYPTO_F_IMBUF) != 0)
- error = m_apply((struct mbuf *)buf, off, len, f, arg);
- else if ((flags & CRYPTO_F_IOV) != 0)
- error = cuio_apply((struct uio *)buf, off, len, f, arg);
- else
- error = (*f)(arg, buf + off, len);
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ error = m_apply(crp->crp_mbuf, off, len, f, arg);
+ break;
+ case CRYPTO_BUF_UIO:
+ error = cuio_apply(crp->crp_uio, off, len, f, arg);
+ break;
+ case CRYPTO_BUF_CONTIG:
+ error = (*f)(arg, crp->crp_buf + off, len);
+ break;
+ default:
+ panic("invalid crp buf type %d", crp->crp_buf_type);
+ }
return (error);
}
@@ -279,17 +300,17 @@ cuio_contiguous_segment(struct uio *uio, size_t skip, size_t len)
}
void *
-crypto_contiguous_subsegment(int crp_flags, void *crpbuf,
- size_t skip, size_t len)
+crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len)
{
- if ((crp_flags & CRYPTO_F_IMBUF) != 0)
- return (m_contiguous_subsegment(crpbuf, skip, len));
- else if ((crp_flags & CRYPTO_F_IOV) != 0)
- return (cuio_contiguous_segment(crpbuf, skip, len));
- else {
- MPASS((crp_flags & (CRYPTO_F_IMBUF | CRYPTO_F_IOV)) !=
- (CRYPTO_F_IMBUF | CRYPTO_F_IOV));
- return ((char*)crpbuf + skip);
+
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ return (m_contiguous_subsegment(crp->crp_mbuf, skip, len));
+ case CRYPTO_BUF_UIO:
+ return (cuio_contiguous_segment(crp->crp_uio, skip, len));
+ case CRYPTO_BUF_CONTIG:
+ return (crp->crp_buf + skip);
+ default:
+ panic("invalid crp buf type %d", crp->crp_buf_type);
}
}
-
diff --git a/sys/opencrypto/crypto.c b/sys/opencrypto/crypto.c
index f3a3bba2e59b..4dde88fc6fea 100644
--- a/sys/opencrypto/crypto.c
+++ b/sys/opencrypto/crypto.c
@@ -56,6 +56,7 @@ __FBSDID("$FreeBSD$");
#define CRYPTO_TIMING /* enable timing support */
+#include "opt_compat.h"
#include "opt_ddb.h"
#include <sys/param.h>
@@ -69,6 +70,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/malloc.h>
#include <sys/proc.h>
+#include <sys/refcount.h>
#include <sys/sdt.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
@@ -79,7 +81,8 @@ __FBSDID("$FreeBSD$");
#include <vm/uma.h>
#include <crypto/intake.h>
#include <opencrypto/cryptodev.h>
-#include <opencrypto/xform.h> /* XXX for M_XDATA */
+#include <opencrypto/xform_auth.h>
+#include <opencrypto/xform_enc.h>
#include <sys/kobj.h>
#include <sys/bus.h>
@@ -89,19 +92,12 @@ __FBSDID("$FreeBSD$");
#include <machine/pcb.h>
#endif
-struct crypto_session {
- device_t parent;
- void *softc;
- uint32_t hid;
- uint32_t capabilities;
-};
-
SDT_PROVIDER_DEFINE(opencrypto);
/*
* Crypto drivers register themselves by allocating a slot in the
* crypto_drivers table with crypto_get_driverid() and then registering
- * each algorithm they support with crypto_register() and crypto_kregister().
+ * each asym algorithm they support with crypto_kregister().
*/
static struct mtx crypto_drivers_mtx; /* lock on driver table */
#define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx)
@@ -117,15 +113,10 @@ static struct mtx crypto_drivers_mtx; /* lock on driver table */
* Not tagged fields are read-only.
*/
struct cryptocap {
- device_t cc_dev; /* (d) device/driver */
+ device_t cc_dev;
+ uint32_t cc_hid;
u_int32_t cc_sessions; /* (d) # of sessions */
u_int32_t cc_koperations; /* (d) # os asym operations */
- /*
- * Largest possible operator length (in bits) for each type of
- * encryption algorithm. XXX not used
- */
- u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
- u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1];
u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1];
int cc_flags; /* (d) flags */
@@ -133,9 +124,17 @@ struct cryptocap {
int cc_qblocked; /* (q) symmetric q blocked */
int cc_kqblocked; /* (q) asymmetric q blocked */
size_t cc_session_size;
+ volatile int cc_refs;
+};
+
+static struct cryptocap **crypto_drivers = NULL;
+static int crypto_drivers_size = 0;
+
+struct crypto_session {
+ struct cryptocap *cap;
+ void *softc;
+ struct crypto_session_params csp;
};
-static struct cryptocap *crypto_drivers = NULL;
-static int crypto_drivers_num = 0;
/*
* There are two queues for crypto requests; one for symmetric (e.g.
@@ -151,6 +150,9 @@ static struct mtx crypto_q_mtx;
#define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx)
#define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx)
+static SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0,
+ "In-kernel cryptography");
+
/*
* Taskqueue used to dispatch the crypto requests
* that have the CRYPTO_F_ASYNC flag
@@ -187,22 +189,37 @@ static struct crypto_ret_worker *crypto_ret_workers = NULL;
(TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q))
static int crypto_workers_num = 0;
+SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN,
+ &crypto_workers_num, 0,
+ "Number of crypto workers used to dispatch crypto jobs");
+#ifdef COMPAT_FREEBSD12
SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN,
&crypto_workers_num, 0,
"Number of crypto workers used to dispatch crypto jobs");
+#endif
static uma_zone_t cryptop_zone;
-static uma_zone_t cryptodesc_zone;
static uma_zone_t cryptoses_zone;
-int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
+int crypto_userasymcrypto = 1;
+SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW,
+ &crypto_userasymcrypto, 0,
+ "Enable user-mode access to asymmetric crypto support");
+#ifdef COMPAT_FREEBSD12
SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
&crypto_userasymcrypto, 0,
"Enable/disable user-mode access to asymmetric crypto support");
-int crypto_devallowsoft = 0; /* only use hardware crypto */
+#endif
+
+int crypto_devallowsoft = 0;
+SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW,
+ &crypto_devallowsoft, 0,
+ "Enable use of software crypto by /dev/crypto");
+#ifdef COMPAT_FREEBSD12
SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
&crypto_devallowsoft, 0,
"Enable/disable use of software crypto by /dev/crypto");
+#endif
MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
@@ -211,13 +228,12 @@ static struct proc *cryptoproc;
static void crypto_ret_proc(struct crypto_ret_worker *ret_worker);
static void crypto_destroy(void);
static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
-static int crypto_kinvoke(struct cryptkop *krp, int flags);
-static void crypto_remove(struct cryptocap *cap);
+static int crypto_kinvoke(struct cryptkop *krp);
static void crypto_task_invoke(void *ctx, int pending);
static void crypto_batch_enqueue(struct cryptop *crp);
static struct cryptostats cryptostats;
-SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
+SYSCTL_STRUCT(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, &cryptostats,
cryptostats, "Crypto system statistics");
#ifdef CRYPTO_TIMING
@@ -257,6 +273,29 @@ struct keybuf * get_keybuf(void) {
return (keybuf);
}
+static struct cryptocap *
+cap_ref(struct cryptocap *cap)
+{
+
+ refcount_acquire(&cap->cc_refs);
+ return (cap);
+}
+
+static void
+cap_rele(struct cryptocap *cap)
+{
+
+ if (refcount_release(&cap->cc_refs) == 0)
+ return;
+
+ KASSERT(cap->cc_sessions == 0,
+ ("freeing crypto driver with active sessions"));
+ KASSERT(cap->cc_koperations == 0,
+ ("freeing crypto driver with active key operations"));
+
+ free(cap, M_CRYPTO_DATA);
+}
+
static int
crypto_init(void)
{
@@ -273,22 +312,18 @@ crypto_init(void)
cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
0, 0, 0, 0,
UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
- cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc),
- 0, 0, 0, 0,
- UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
cryptoses_zone = uma_zcreate("crypto_session",
sizeof(struct crypto_session), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
- if (cryptodesc_zone == NULL || cryptop_zone == NULL ||
- cryptoses_zone == NULL) {
+ if (cryptop_zone == NULL || cryptoses_zone == NULL) {
printf("crypto_init: cannot setup crypto zones\n");
error = ENOMEM;
goto bad;
}
- crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
- crypto_drivers = malloc(crypto_drivers_num *
+ crypto_drivers_size = CRYPTO_DRIVERS_INITIAL;
+ crypto_drivers = malloc(crypto_drivers_size *
sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
if (crypto_drivers == NULL) {
printf("crypto_init: cannot setup crypto drivers\n");
@@ -380,9 +415,56 @@ crypto_terminate(struct proc **pp, void *q)
}
static void
+hmac_init_pad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx,
+ uint8_t padval)
+{
+ uint8_t hmac_key[HMAC_MAX_BLOCK_LEN];
+ u_int i;
+
+ KASSERT(axf->blocksize <= sizeof(hmac_key),
+ ("Invalid HMAC block size %d", axf->blocksize));
+
+ /*
+ * If the key is larger than the block size, use the digest of
+ * the key as the key instead.
+ */
+ memset(hmac_key, 0, sizeof(hmac_key));
+ if (klen > axf->blocksize) {
+ axf->Init(auth_ctx);
+ axf->Update(auth_ctx, key, klen);
+ axf->Final(hmac_key, auth_ctx);
+ klen = axf->hashsize;
+ } else
+ memcpy(hmac_key, key, klen);
+
+ for (i = 0; i < axf->blocksize; i++)
+ hmac_key[i] ^= padval;
+
+ axf->Init(auth_ctx);
+ axf->Update(auth_ctx, hmac_key, axf->blocksize);
+}
+
+void
+hmac_init_ipad(struct auth_hash *axf, const char *key, int klen,
+ void *auth_ctx)
+{
+
+ hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL);
+}
+
+void
+hmac_init_opad(struct auth_hash *axf, const char *key, int klen,
+ void *auth_ctx)
+{
+
+ hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL);
+}
+
+static void
crypto_destroy(void)
{
struct crypto_ret_worker *ret_worker;
+ int i;
/*
* Terminate any crypto threads.
@@ -400,13 +482,14 @@ crypto_destroy(void)
/*
* Reclaim dynamically allocated resources.
*/
- if (crypto_drivers != NULL)
- free(crypto_drivers, M_CRYPTO_DATA);
+ for (i = 0; i < crypto_drivers_size; i++) {
+ if (crypto_drivers[i] != NULL)
+ cap_rele(crypto_drivers[i]);
+ }
+ free(crypto_drivers, M_CRYPTO_DATA);
if (cryptoses_zone != NULL)
uma_zdestroy(cryptoses_zone);
- if (cryptodesc_zone != NULL)
- uma_zdestroy(cryptodesc_zone);
if (cryptop_zone != NULL)
uma_zdestroy(cryptop_zone);
mtx_destroy(&crypto_q_mtx);
@@ -421,13 +504,13 @@ crypto_destroy(void)
uint32_t
crypto_ses2hid(crypto_session_t crypto_session)
{
- return (crypto_session->hid);
+ return (crypto_session->cap->cc_hid);
}
uint32_t
crypto_ses2caps(crypto_session_t crypto_session)
{
- return (crypto_session->capabilities);
+ return (crypto_session->cap->cc_flags & 0xff000000);
}
void *
@@ -436,86 +519,411 @@ crypto_get_driver_session(crypto_session_t crypto_session)
return (crypto_session->softc);
}
-static struct cryptocap *
-crypto_checkdriver(u_int32_t hid)
+const struct crypto_session_params *
+crypto_get_params(crypto_session_t crypto_session)
{
- if (crypto_drivers == NULL)
- return NULL;
- return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
+ return (&crypto_session->csp);
}
-/*
- * Compare a driver's list of supported algorithms against another
- * list; return non-zero if all algorithms are supported.
- */
-static int
-driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
+struct auth_hash *
+crypto_auth_hash(const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5_HMAC:
+ return (&auth_hash_hmac_md5);
+ case CRYPTO_SHA1_HMAC:
+ return (&auth_hash_hmac_sha1);
+ case CRYPTO_SHA2_224_HMAC:
+ return (&auth_hash_hmac_sha2_224);
+ case CRYPTO_SHA2_256_HMAC:
+ return (&auth_hash_hmac_sha2_256);
+ case CRYPTO_SHA2_384_HMAC:
+ return (&auth_hash_hmac_sha2_384);
+ case CRYPTO_SHA2_512_HMAC:
+ return (&auth_hash_hmac_sha2_512);
+ case CRYPTO_NULL_HMAC:
+ return (&auth_hash_null);
+ case CRYPTO_RIPEMD160_HMAC:
+ return (&auth_hash_hmac_ripemd_160);
+ case CRYPTO_MD5_KPDK:
+ return (&auth_hash_key_md5);
+ case CRYPTO_SHA1_KPDK:
+ return (&auth_hash_key_sha1);
+#ifdef notyet
+ case CRYPTO_MD5:
+ return (&auth_hash_md5);
+#endif
+ case CRYPTO_SHA1:
+ return (&auth_hash_sha1);
+ case CRYPTO_SHA2_224:
+ return (&auth_hash_sha2_224);
+ case CRYPTO_SHA2_256:
+ return (&auth_hash_sha2_256);
+ case CRYPTO_SHA2_384:
+ return (&auth_hash_sha2_384);
+ case CRYPTO_SHA2_512:
+ return (&auth_hash_sha2_512);
+ case CRYPTO_AES_NIST_GMAC:
+ switch (csp->csp_auth_klen) {
+ case 128 / 8:
+ return (&auth_hash_nist_gmac_aes_128);
+ case 192 / 8:
+ return (&auth_hash_nist_gmac_aes_192);
+ case 256 / 8:
+ return (&auth_hash_nist_gmac_aes_256);
+ default:
+ return (NULL);
+ }
+ case CRYPTO_BLAKE2B:
+ return (&auth_hash_blake2b);
+ case CRYPTO_BLAKE2S:
+ return (&auth_hash_blake2s);
+ case CRYPTO_POLY1305:
+ return (&auth_hash_poly1305);
+ case CRYPTO_AES_CCM_CBC_MAC:
+ switch (csp->csp_auth_klen) {
+ case 128 / 8:
+ return (&auth_hash_ccm_cbc_mac_128);
+ case 192 / 8:
+ return (&auth_hash_ccm_cbc_mac_192);
+ case 256 / 8:
+ return (&auth_hash_ccm_cbc_mac_256);
+ default:
+ return (NULL);
+ }
+ default:
+ return (NULL);
+ }
+}
+
+struct enc_xform *
+crypto_cipher(const struct crypto_session_params *csp)
{
- const struct cryptoini *cr;
- /* See if all the algorithms are supported. */
- for (cr = cri; cr; cr = cr->cri_next)
- if (cap->cc_alg[cr->cri_alg] == 0)
- return 0;
- return 1;
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_DES_CBC:
+ return (&enc_xform_des);
+ case CRYPTO_3DES_CBC:
+ return (&enc_xform_3des);
+ case CRYPTO_BLF_CBC:
+ return (&enc_xform_blf);
+ case CRYPTO_CAST_CBC:
+ return (&enc_xform_cast5);
+ case CRYPTO_SKIPJACK_CBC:
+ return (&enc_xform_skipjack);
+ case CRYPTO_RIJNDAEL128_CBC:
+ return (&enc_xform_rijndael128);
+ case CRYPTO_AES_XTS:
+ return (&enc_xform_aes_xts);
+ case CRYPTO_AES_ICM:
+ return (&enc_xform_aes_icm);
+ case CRYPTO_AES_NIST_GCM_16:
+ return (&enc_xform_aes_nist_gcm);
+ case CRYPTO_CAMELLIA_CBC:
+ return (&enc_xform_camellia);
+ case CRYPTO_NULL_CBC:
+ return (&enc_xform_null);
+ case CRYPTO_CHACHA20:
+ return (&enc_xform_chacha20);
+ case CRYPTO_AES_CCM_16:
+ return (&enc_xform_ccm);
+ default:
+ return (NULL);
+ }
+}
+
+static struct cryptocap *
+crypto_checkdriver(u_int32_t hid)
+{
+
+ return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]);
}
/*
* Select a driver for a new session that supports the specified
* algorithms and, optionally, is constrained according to the flags.
- * The algorithm we use here is pretty stupid; just use the
- * first driver that supports all the algorithms we need. If there
- * are multiple drivers we choose the driver with the fewest active
- * sessions. We prefer hardware-backed drivers to software ones.
- *
- * XXX We need more smarts here (in real life too, but that's
- * XXX another story altogether).
*/
static struct cryptocap *
-crypto_select_driver(const struct cryptoini *cri, int flags)
+crypto_select_driver(const struct crypto_session_params *csp, int flags)
{
struct cryptocap *cap, *best;
- int match, hid;
+ int best_match, error, hid;
CRYPTO_DRIVER_ASSERT();
- /*
- * Look first for hardware crypto devices if permitted.
- */
- if (flags & CRYPTOCAP_F_HARDWARE)
- match = CRYPTOCAP_F_HARDWARE;
- else
- match = CRYPTOCAP_F_SOFTWARE;
best = NULL;
-again:
- for (hid = 0; hid < crypto_drivers_num; hid++) {
- cap = &crypto_drivers[hid];
+ for (hid = 0; hid < crypto_drivers_size; hid++) {
/*
- * If it's not initialized, is in the process of
- * going away, or is not appropriate (hardware
- * or software based on match), then skip.
+ * If there is no driver for this slot, or the driver
+ * is not appropriate (hardware or software based on
+ * match), then skip.
*/
- if (cap->cc_dev == NULL ||
- (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
- (cap->cc_flags & match) == 0)
+ cap = crypto_drivers[hid];
+ if (cap == NULL ||
+ (cap->cc_flags & flags) == 0)
continue;
- /* verify all the algorithms are supported. */
- if (driver_suitable(cap, cri)) {
- if (best == NULL ||
- cap->cc_sessions < best->cc_sessions)
- best = cap;
+ error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp);
+ if (error >= 0)
+ continue;
+
+ /*
+ * Use the driver with the highest probe value.
+ * Hardware drivers use a higher probe value than
+ * software. In case of a tie, prefer the driver with
+ * the fewest active sessions.
+ */
+ if (best == NULL || error > best_match ||
+ (error == best_match &&
+ cap->cc_sessions < best->cc_sessions)) {
+ best = cap;
+ best_match = error;
}
}
- if (best == NULL && match == CRYPTOCAP_F_HARDWARE &&
- (flags & CRYPTOCAP_F_SOFTWARE)) {
- /* sort of an Algol 68-style for loop */
- match = CRYPTOCAP_F_SOFTWARE;
- goto again;
- }
return best;
}
+static bool
+alg_is_compression(int alg)
+{
+
+ if (alg == CRYPTO_DEFLATE_COMP)
+ return (true);
+ return (false);
+}
+
+static bool
+alg_is_cipher(int alg)
+{
+
+ if (alg >= CRYPTO_DES_CBC && alg <= CRYPTO_SKIPJACK_CBC)
+ return (true);
+ if (alg >= CRYPTO_AES_CBC && alg <= CRYPTO_ARC4)
+ return (true);
+ if (alg == CRYPTO_NULL_CBC)
+ return (true);
+ if (alg >= CRYPTO_CAMELLIA_CBC && alg <= CRYPTO_AES_ICM)
+ return (true);
+ if (alg == CRYPTO_CHACHA20)
+ return (true);
+ return (false);
+}
+
+static bool
+alg_is_digest(int alg)
+{
+
+ if (alg >= CRYPTO_MD5_HMAC && alg <= CRYPTO_SHA1_KPDK)
+ return (true);
+ if (alg >= CRYPTO_MD5 && alg <= CRYPTO_SHA1)
+ return (true);
+ if (alg == CRYPTO_NULL_HMAC)
+ return (true);
+ if (alg >= CRYPTO_SHA2_256_HMAC && alg <= CRYPTO_SHA2_512_HMAC)
+ return (true);
+ if (alg >= CRYPTO_SHA2_256_HMAC && alg <= CRYPTO_SHA2_512_HMAC)
+ return (true);
+ if (alg == CRYPTO_AES_NIST_GMAC)
+ return (true);
+ if (alg >= CRYPTO_BLAKE2B && alg <= CRYPTO_BLAKE2S)
+ return (true);
+ if (alg >= CRYPTO_SHA2_224_HMAC && alg <= CRYPTO_POLY1305)
+ return (true);
+ if (alg == CRYPTO_AES_CCM_CBC_MAC)
+ return (true);
+ return (false);
+}
+
+static bool
+alg_is_keyed_digest(int alg)
+{
+
+ if (alg >= CRYPTO_MD5_HMAC && alg <= CRYPTO_SHA1_KPDK)
+ return (true);
+ if (alg >= CRYPTO_SHA2_256_HMAC && alg <= CRYPTO_SHA2_512_HMAC)
+ return (true);
+ if (alg == CRYPTO_AES_NIST_GMAC)
+ return (true);
+ if (alg >= CRYPTO_BLAKE2B && alg <= CRYPTO_BLAKE2S)
+ return (true);
+ if (alg == CRYPTO_SHA2_224_HMAC)
+ return (true);
+ if (alg == CRYPTO_POLY1305)
+ return (true);
+ if (alg == CRYPTO_AES_CCM_CBC_MAC)
+ return (true);
+ return (false);
+}
+
+static bool
+alg_is_aead(int alg)
+{
+
+ if (alg == CRYPTO_AES_NIST_GCM_16)
+ return (true);
+ if (alg == CRYPTO_AES_CCM_16)
+ return (true);
+ return (false);
+}
+
+/* Various sanity checks on crypto session parameters. */
+static bool
+check_csp(const struct crypto_session_params *csp)
+{
+ struct auth_hash *axf;
+
+ /* Mode-independent checks. */
+ if (csp->csp_flags != 0)
+ return (false);
+ if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 ||
+ csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0)
+ return (false);
+ if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0)
+ return (false);
+ if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0)
+ return (false);
+
+ switch (csp->csp_mode) {
+ case CSP_MODE_COMPRESS:
+ if (!alg_is_compression(csp->csp_cipher_alg))
+ return (false);
+ if (csp->csp_flags != 0)
+ return (false);
+ if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 ||
+ csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
+ csp->csp_auth_mlen != 0)
+ return (false);
+ break;
+ case CSP_MODE_CIPHER:
+ if (!alg_is_cipher(csp->csp_cipher_alg))
+ return (false);
+ if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
+ if (csp->csp_cipher_klen == 0)
+ return (false);
+ if (csp->csp_cipher_alg != CRYPTO_ARC4) {
+ if (csp->csp_ivlen == 0)
+ return (false);
+ }
+ }
+ if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
+ return (false);
+ if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
+ csp->csp_auth_mlen != 0)
+ return (false);
+ break;
+ case CSP_MODE_DIGEST:
+ if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0)
+ return (false);
+
+ /* IV is optional for digests (e.g. GMAC). */
+ if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
+ return (false);
+ if (!alg_is_digest(csp->csp_auth_alg))
+ return (false);
+
+ /* Key is optional for BLAKE2 digests. */
+ if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
+ csp->csp_auth_alg == CRYPTO_BLAKE2S)
+ ;
+ else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
+ if (csp->csp_auth_klen == 0)
+ return (false);
+ } else {
+ if (csp->csp_auth_klen != 0)
+ return (false);
+ }
+ if (csp->csp_auth_mlen != 0) {
+ axf = crypto_auth_hash(csp);
+ if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
+ return (false);
+ }
+ break;
+ case CSP_MODE_AEAD:
+ if (!alg_is_aead(csp->csp_cipher_alg))
+ return (false);
+ if (csp->csp_cipher_klen == 0)
+ return (false);
+ if (csp->csp_ivlen == 0 ||
+ csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
+ return (false);
+ if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0)
+ return (false);
+
+ /*
+ * XXX: Would be nice to have a better way to get this
+ * value.
+ */
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ case CRYPTO_AES_CCM_16:
+ if (csp->csp_auth_mlen > 16)
+ return (false);
+ break;
+ }
+ break;
+ case CSP_MODE_ETA:
+ if (!alg_is_cipher(csp->csp_cipher_alg))
+ return (false);
+ if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
+ if (csp->csp_cipher_klen == 0)
+ return (false);
+ if (csp->csp_cipher_alg != CRYPTO_ARC4) {
+ if (csp->csp_ivlen == 0)
+ return (false);
+ }
+ }
+ if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
+ return (false);
+ if (!alg_is_digest(csp->csp_auth_alg))
+ return (false);
+
+ /* Key is optional for BLAKE2 digests. */
+ if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
+ csp->csp_auth_alg == CRYPTO_BLAKE2S)
+ ;
+ else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
+ if (csp->csp_auth_klen == 0)
+ return (false);
+ } else {
+ if (csp->csp_auth_klen != 0)
+ return (false);
+ }
+ if (csp->csp_auth_mlen != 0) {
+ axf = crypto_auth_hash(csp);
+ if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
+ return (false);
+ }
+ break;
+ default:
+ return (false);
+ }
+
+ return (true);
+}
+
+/*
+ * Delete a session after it has been detached from its driver.
+ */
+static void
+crypto_deletesession(crypto_session_t cses)
+{
+ struct cryptocap *cap;
+
+ cap = cses->cap;
+
+ explicit_bzero(cses->softc, cap->cc_session_size);
+ free(cses->softc, M_CRYPTO_DATA);
+ uma_zfree(cryptoses_zone, cses);
+
+ CRYPTO_DRIVER_LOCK();
+ cap->cc_sessions--;
+ if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP)
+ wakeup(cap);
+ CRYPTO_DRIVER_UNLOCK();
+ cap_rele(cap);
+}
+
/*
* Create a new session. The crid argument specifies a crypto
* driver to use or constraints on a driver to select (hardware
@@ -523,18 +931,17 @@ again:
* must be capable of the requested crypto algorithms.
*/
int
-crypto_newsession(crypto_session_t *cses, struct cryptoini *cri, int crid)
+crypto_newsession(crypto_session_t *cses,
+ const struct crypto_session_params *csp, int crid)
{
crypto_session_t res;
- void *softc_mem;
struct cryptocap *cap;
- u_int32_t hid;
- size_t softc_size;
int err;
-restart:
+ if (!check_csp(csp))
+ return (EINVAL);
+
res = NULL;
- softc_mem = NULL;
CRYPTO_DRIVER_LOCK();
if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
@@ -542,76 +949,39 @@ restart:
* Use specified driver; verify it is capable.
*/
cap = crypto_checkdriver(crid);
- if (cap != NULL && !driver_suitable(cap, cri))
+ if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0)
cap = NULL;
} else {
/*
* No requested driver; select based on crid flags.
*/
- cap = crypto_select_driver(cri, crid);
- /*
- * if NULL then can't do everything in one session.
- * XXX Fix this. We need to inject a "virtual" session
- * XXX layer right about here.
- */
+ cap = crypto_select_driver(csp, crid);
}
if (cap == NULL) {
+ CRYPTO_DRIVER_UNLOCK();
CRYPTDEB("no driver");
- err = EOPNOTSUPP;
- goto out;
+ return (EOPNOTSUPP);
}
+ cap_ref(cap);
cap->cc_sessions++;
- softc_size = cap->cc_session_size;
- hid = cap - crypto_drivers;
- cap = NULL;
CRYPTO_DRIVER_UNLOCK();
- softc_mem = malloc(softc_size, M_CRYPTO_DATA, M_WAITOK | M_ZERO);
res = uma_zalloc(cryptoses_zone, M_WAITOK | M_ZERO);
- res->softc = softc_mem;
-
- CRYPTO_DRIVER_LOCK();
- cap = crypto_checkdriver(hid);
- if (cap != NULL && (cap->cc_flags & CRYPTOCAP_F_CLEANUP) != 0) {
- cap->cc_sessions--;
- crypto_remove(cap);
- cap = NULL;
- }
- if (cap == NULL) {
- free(softc_mem, M_CRYPTO_DATA);
- uma_zfree(cryptoses_zone, res);
- CRYPTO_DRIVER_UNLOCK();
- goto restart;
- }
+ res->cap = cap;
+ res->softc = malloc(cap->cc_session_size, M_CRYPTO_DATA, M_WAITOK |
+ M_ZERO);
+ res->csp = *csp;
/* Call the driver initialization routine. */
- err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, cri);
+ err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp);
if (err != 0) {
CRYPTDEB("dev newsession failed: %d", err);
- goto out;
+ crypto_deletesession(res);
+ return (err);
}
- res->capabilities = cap->cc_flags & 0xff000000;
- res->hid = hid;
*cses = res;
-
-out:
- CRYPTO_DRIVER_UNLOCK();
- if (err != 0) {
- free(softc_mem, M_CRYPTO_DATA);
- if (res != NULL)
- uma_zfree(cryptoses_zone, res);
- }
- return err;
-}
-
-static void
-crypto_remove(struct cryptocap *cap)
-{
-
- mtx_assert(&crypto_drivers_mtx, MA_OWNED);
- if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
- bzero(cap, sizeof(*cap));
+ return (0);
}
/*
@@ -622,101 +992,81 @@ void
crypto_freesession(crypto_session_t cses)
{
struct cryptocap *cap;
- void *ses;
- size_t ses_size;
- u_int32_t hid;
if (cses == NULL)
return;
- CRYPTO_DRIVER_LOCK();
-
- hid = crypto_ses2hid(cses);
- KASSERT(hid < crypto_drivers_num,
- ("bogus crypto_session %p hid %u", cses, hid));
- cap = &crypto_drivers[hid];
-
- ses = cses->softc;
- ses_size = cap->cc_session_size;
-
- if (cap->cc_sessions)
- cap->cc_sessions--;
+ cap = cses->cap;
/* Call the driver cleanup routine, if available. */
CRYPTODEV_FREESESSION(cap->cc_dev, cses);
- explicit_bzero(ses, ses_size);
- free(ses, M_CRYPTO_DATA);
- uma_zfree(cryptoses_zone, cses);
-
- if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
- crypto_remove(cap);
-
- CRYPTO_DRIVER_UNLOCK();
+ crypto_deletesession(cses);
}
/*
- * Return an unused driver id. Used by drivers prior to registering
- * support for the algorithms they handle.
+ * Return a new driver id. Registers a driver with the system so that
+ * it can be probed by subsequent sessions.
*/
int32_t
crypto_get_driverid(device_t dev, size_t sessionsize, int flags)
{
- struct cryptocap *newdrv;
+ struct cryptocap *cap, **newdrv;
int i;
if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
- printf("%s: no flags specified when registering driver\n",
- device_get_nameunit(dev));
+ device_printf(dev,
+ "no flags specified when registering driver\n");
return -1;
}
+ cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
+ cap->cc_dev = dev;
+ cap->cc_session_size = sessionsize;
+ cap->cc_flags = flags;
+ refcount_init(&cap->cc_refs, 1);
+
CRYPTO_DRIVER_LOCK();
+ for (;;) {
+ for (i = 0; i < crypto_drivers_size; i++) {
+ if (crypto_drivers[i] == NULL)
+ break;
+ }
- for (i = 0; i < crypto_drivers_num; i++) {
- if (crypto_drivers[i].cc_dev == NULL &&
- (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
+ if (i < crypto_drivers_size)
break;
- }
- }
- /* Out of entries, allocate some more. */
- if (i == crypto_drivers_num) {
- /* Be careful about wrap-around. */
- if (2 * crypto_drivers_num <= crypto_drivers_num) {
+ /* Out of entries, allocate some more. */
+
+ if (2 * crypto_drivers_size <= crypto_drivers_size) {
CRYPTO_DRIVER_UNLOCK();
printf("crypto: driver count wraparound!\n");
- return -1;
+ cap_rele(cap);
+ return (-1);
}
+ CRYPTO_DRIVER_UNLOCK();
- newdrv = malloc(2 * crypto_drivers_num *
- sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
- if (newdrv == NULL) {
- CRYPTO_DRIVER_UNLOCK();
- printf("crypto: no space to expand driver table!\n");
- return -1;
- }
+ newdrv = malloc(2 * crypto_drivers_size *
+ sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
- bcopy(crypto_drivers, newdrv,
- crypto_drivers_num * sizeof(struct cryptocap));
+ CRYPTO_DRIVER_LOCK();
+ memcpy(newdrv, crypto_drivers,
+ crypto_drivers_size * sizeof(*crypto_drivers));
- crypto_drivers_num *= 2;
+ crypto_drivers_size *= 2;
free(crypto_drivers, M_CRYPTO_DATA);
crypto_drivers = newdrv;
}
- /* NB: state is zero'd on free */
- crypto_drivers[i].cc_sessions = 1; /* Mark */
- crypto_drivers[i].cc_dev = dev;
- crypto_drivers[i].cc_flags = flags;
- crypto_drivers[i].cc_session_size = sessionsize;
+ cap->cc_hid = i;
+ crypto_drivers[i] = cap;
+ CRYPTO_DRIVER_UNLOCK();
+
if (bootverbose)
printf("crypto: assign %s driver id %u, flags 0x%x\n",
device_get_nameunit(dev), i, flags);
- CRYPTO_DRIVER_UNLOCK();
-
return i;
}
@@ -729,20 +1079,22 @@ crypto_get_driverid(device_t dev, size_t sessionsize, int flags)
int
crypto_find_driver(const char *match)
{
+ struct cryptocap *cap;
int i, len = strlen(match);
CRYPTO_DRIVER_LOCK();
- for (i = 0; i < crypto_drivers_num; i++) {
- device_t dev = crypto_drivers[i].cc_dev;
- if (dev == NULL ||
- (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
+ for (i = 0; i < crypto_drivers_size; i++) {
+ if (crypto_drivers[i] == NULL)
continue;
- if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
- strncmp(match, device_get_name(dev), len) == 0)
- break;
+ cap = crypto_drivers[i];
+ if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 ||
+ strncmp(match, device_get_name(cap->cc_dev), len) == 0) {
+ CRYPTO_DRIVER_UNLOCK();
+ return (i);
+ }
}
CRYPTO_DRIVER_UNLOCK();
- return i < crypto_drivers_num ? i : -1;
+ return (-1);
}
/*
@@ -752,8 +1104,16 @@ crypto_find_driver(const char *match)
device_t
crypto_find_device_byhid(int hid)
{
- struct cryptocap *cap = crypto_checkdriver(hid);
- return cap != NULL ? cap->cc_dev : NULL;
+ struct cryptocap *cap;
+ device_t dev;
+
+ dev = NULL;
+ CRYPTO_DRIVER_LOCK();
+ cap = crypto_checkdriver(hid);
+ if (cap != NULL)
+ dev = cap->cc_dev;
+ CRYPTO_DRIVER_UNLOCK();
+ return (dev);
}
/*
@@ -762,8 +1122,16 @@ crypto_find_device_byhid(int hid)
int
crypto_getcaps(int hid)
{
- struct cryptocap *cap = crypto_checkdriver(hid);
- return cap != NULL ? cap->cc_flags : 0;
+ struct cryptocap *cap;
+ int flags;
+
+ flags = 0;
+ CRYPTO_DRIVER_LOCK();
+ cap = crypto_checkdriver(hid);
+ if (cap != NULL)
+ flags = cap->cc_flags;
+ CRYPTO_DRIVER_UNLOCK();
+ return (flags);
}
/*
@@ -803,103 +1171,6 @@ crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
}
/*
- * Register support for a non-key-related algorithm. This routine
- * is called once for each such algorithm supported by a driver.
- */
-int
-crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
- u_int32_t flags)
-{
- struct cryptocap *cap;
- int err;
-
- CRYPTO_DRIVER_LOCK();
-
- cap = crypto_checkdriver(driverid);
- /* NB: algorithms are in the range [1..max] */
- if (cap != NULL &&
- (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
- /*
- * XXX Do some performance testing to determine placing.
- * XXX We probably need an auxiliary data structure that
- * XXX describes relative performances.
- */
-
- cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
- cap->cc_max_op_len[alg] = maxoplen;
- if (bootverbose)
- printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
- , device_get_nameunit(cap->cc_dev)
- , alg
- , flags
- , maxoplen
- );
- cap->cc_sessions = 0; /* Unmark */
- err = 0;
- } else
- err = EINVAL;
-
- CRYPTO_DRIVER_UNLOCK();
- return err;
-}
-
-static void
-driver_finis(struct cryptocap *cap)
-{
- u_int32_t ses, kops;
-
- CRYPTO_DRIVER_ASSERT();
-
- ses = cap->cc_sessions;
- kops = cap->cc_koperations;
- bzero(cap, sizeof(*cap));
- if (ses != 0 || kops != 0) {
- /*
- * If there are pending sessions,
- * just mark as invalid.
- */
- cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
- cap->cc_sessions = ses;
- cap->cc_koperations = kops;
- }
-}
-
-/*
- * Unregister a crypto driver. If there are pending sessions using it,
- * leave enough information around so that subsequent calls using those
- * sessions will correctly detect the driver has been unregistered and
- * reroute requests.
- */
-int
-crypto_unregister(u_int32_t driverid, int alg)
-{
- struct cryptocap *cap;
- int i, err;
-
- CRYPTO_DRIVER_LOCK();
- cap = crypto_checkdriver(driverid);
- if (cap != NULL &&
- (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
- cap->cc_alg[alg] != 0) {
- cap->cc_alg[alg] = 0;
- cap->cc_max_op_len[alg] = 0;
-
- /* Was this the last algorithm ? */
- for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
- if (cap->cc_alg[i] != 0)
- break;
-
- if (i == CRYPTO_ALGORITHM_MAX + 1)
- driver_finis(cap);
- err = 0;
- } else
- err = EINVAL;
- CRYPTO_DRIVER_UNLOCK();
-
- return err;
-}
-
-/*
* Unregister all algorithms associated with a crypto driver.
* If there are pending sessions using it, leave enough information
* around so that subsequent calls using those sessions will
@@ -910,18 +1181,27 @@ int
crypto_unregister_all(u_int32_t driverid)
{
struct cryptocap *cap;
- int err;
CRYPTO_DRIVER_LOCK();
cap = crypto_checkdriver(driverid);
- if (cap != NULL) {
- driver_finis(cap);
- err = 0;
- } else
- err = EINVAL;
+ if (cap == NULL) {
+ CRYPTO_DRIVER_UNLOCK();
+ return (EINVAL);
+ }
+
+ cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
+ crypto_drivers[driverid] = NULL;
+
+ /*
+ * XXX: This doesn't do anything to kick sessions that
+ * have no pending operations.
+ */
+ while (cap->cc_sessions != 0 || cap->cc_koperations != 0)
+ mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0);
CRYPTO_DRIVER_UNLOCK();
+ cap_rele(cap);
- return err;
+ return (0);
}
/*
@@ -951,6 +1231,125 @@ crypto_unblock(u_int32_t driverid, int what)
return err;
}
+#ifdef INVARIANTS
+/* Various sanity checks on crypto requests. */
+static void
+crp_sanity(struct cryptop *crp)
+{
+ struct crypto_session_params *csp;
+
+ KASSERT(crp->crp_session != NULL, ("incoming crp without a session"));
+ KASSERT(crp->crp_ilen >= 0, ("incoming crp with -ve input length"));
+ KASSERT(crp->crp_etype == 0, ("incoming crp with error"));
+ KASSERT(!(crp->crp_flags & CRYPTO_F_DONE),
+ ("incoming crp already done"));
+
+ csp = &crp->crp_session->csp;
+ switch (csp->csp_mode) {
+ case CSP_MODE_COMPRESS:
+ KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS ||
+ crp->crp_op == CRYPTO_OP_DECOMPRESS,
+ ("invalid compression op %x", crp->crp_op));
+ break;
+ case CSP_MODE_CIPHER:
+ KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT ||
+ crp->crp_op == CRYPTO_OP_DECRYPT,
+ ("invalid cipher op %x", crp->crp_op));
+ break;
+ case CSP_MODE_DIGEST:
+ KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST ||
+ crp->crp_op == CRYPTO_OP_VERIFY_DIGEST,
+ ("invalid digest op %x", crp->crp_op));
+ break;
+ case CSP_MODE_AEAD:
+ KASSERT(crp->crp_op ==
+ (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
+ crp->crp_op ==
+ (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
+ ("invalid AEAD op %x", crp->crp_op));
+ if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16)
+ KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
+ ("GCM without a separate IV"));
+ if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16)
+ KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
+ ("CCM without a separate IV"));
+ break;
+ case CSP_MODE_ETA:
+ KASSERT(crp->crp_op ==
+ (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
+ crp->crp_op ==
+ (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
+ ("invalid ETA op %x", crp->crp_op));
+ break;
+ }
+ KASSERT((crp->crp_flags & CRYPTO_F_IV_GENERATE) == 0 ||
+ crp->crp_op == CRYPTO_OP_ENCRYPT ||
+ crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST),
+ ("IV_GENERATE set for non-encryption operation %x", crp->crp_op));
+ KASSERT((crp->crp_flags &
+ (CRYPTO_F_IV_SEPARATE | CRYPTO_F_IV_GENERATE)) !=
+ (CRYPTO_F_IV_SEPARATE | CRYPTO_F_IV_GENERATE),
+ ("crp with both IV_SEPARATE and IV_GENERATE set"));
+ KASSERT(crp->crp_buf_type >= CRYPTO_BUF_CONTIG &&
+ crp->crp_buf_type <= CRYPTO_BUF_MBUF,
+ ("invalid crp buffer type %d", crp->crp_buf_type));
+ if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
+ KASSERT(crp->crp_aad_start == 0 ||
+ crp->crp_aad_start < crp->crp_ilen,
+ ("invalid AAD start"));
+ KASSERT(crp->crp_aad_length != 0 || crp->crp_aad_start == 0,
+ ("AAD with zero length and non-zero start"));
+ KASSERT(crp->crp_aad_length == 0 ||
+ crp->crp_aad_start + crp->crp_aad_length <= crp->crp_ilen,
+ ("AAD outside input length"));
+ } else {
+ KASSERT(crp->crp_aad_start == 0 && crp->crp_aad_length == 0,
+ ("AAD region in request not supporting AAD"));
+ }
+ if (csp->csp_ivlen == 0) {
+ KASSERT((crp->crp_flags &
+ (CRYPTO_F_IV_SEPARATE | CRYPTO_F_IV_GENERATE)) == 0,
+ ("IV_GENERATE or IV_SEPARATE set when IV isn't used"));
+ KASSERT(crp->crp_iv_start == 0,
+ ("crp_iv_start set when IV isn't used"));
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) {
+ KASSERT(crp->crp_iv_start == 0,
+ ("IV_SEPARATE used with non-zero IV start"));
+ } else {
+ KASSERT(crp->crp_iv_start < crp->crp_ilen,
+ ("invalid IV start"));
+ KASSERT(crp->crp_iv_start + csp->csp_ivlen <= crp->crp_ilen,
+ ("IV outside input length"));
+ }
+ KASSERT(crp->crp_payload_start == 0 ||
+ crp->crp_payload_start < crp->crp_ilen,
+ ("invalid payload start"));
+ KASSERT(crp->crp_payload_start + crp->crp_payload_length <=
+ crp->crp_ilen, ("payload outside input length"));
+ if (csp->csp_mode == CSP_MODE_DIGEST ||
+ csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
+ KASSERT(crp->crp_digest_start == 0 ||
+ crp->crp_digest_start < crp->crp_ilen,
+ ("invalid digest start"));
+ /* XXX: For the mlen == 0 case this check isn't perfect. */
+ KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <=
+ crp->crp_ilen,
+ ("digest outside input length"));
+ } else {
+ KASSERT(crp->crp_digest_start == 0,
+ ("non-zero digest start for request without a digest"));
+ }
+ if (csp->csp_cipher_klen != 0)
+ KASSERT(csp->csp_cipher_key != NULL ||
+ crp->crp_cipher_key != NULL,
+ ("cipher request without a key"));
+ if (csp->csp_auth_klen != 0)
+ KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL,
+ ("auth request without a key"));
+ KASSERT(crp->crp_callback != NULL, ("incoming crp without callback"));
+}
+#endif
+
/*
* Add a crypto request to a queue, to be processed by the kernel thread.
*/
@@ -958,9 +1357,14 @@ int
crypto_dispatch(struct cryptop *crp)
{
struct cryptocap *cap;
- u_int32_t hid;
int result;
+#ifdef INVARIANTS
+ crp_sanity(crp);
+#endif
+
+ /* TODO: Handle CRYPTO_F_IV_GENERATE so drivers don't have to. */
+
cryptostats.cs_ops++;
#ifdef CRYPTO_TIMING
@@ -987,16 +1391,12 @@ crypto_dispatch(struct cryptop *crp)
}
if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
- hid = crypto_ses2hid(crp->crp_session);
-
/*
* Caller marked the request to be processed
* immediately; dispatch it directly to the
* driver unless the driver is currently blocked.
*/
- cap = crypto_checkdriver(hid);
- /* Driver cannot disappeared when there is an active session. */
- KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
+ cap = crp->crp_session->cap;
if (!cap->cc_qblocked) {
result = crypto_invoke(cap, crp, 0);
if (result != ERESTART)
@@ -1033,7 +1433,8 @@ crypto_kdispatch(struct cryptkop *krp)
cryptostats.cs_kops++;
- error = crypto_kinvoke(krp, krp->krp_crid);
+ krp->krp_cap = NULL;
+ error = crypto_kinvoke(krp);
if (error == ERESTART) {
CRYPTO_Q_LOCK();
TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
@@ -1081,15 +1482,14 @@ crypto_select_kdriver(const struct cryptkop *krp, int flags)
match = CRYPTOCAP_F_SOFTWARE;
best = NULL;
again:
- for (hid = 0; hid < crypto_drivers_num; hid++) {
- cap = &crypto_drivers[hid];
+ for (hid = 0; hid < crypto_drivers_size; hid++) {
/*
- * If it's not initialized, is in the process of
- * going away, or is not appropriate (hardware
- * or software based on match), then skip.
+ * If there is no driver for this slot, or the driver
+ * is not appropriate (hardware or software based on
+ * match), then skip.
*/
+ cap = crypto_drivers[hid];
if (cap->cc_dev == NULL ||
- (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
(cap->cc_flags & match) == 0)
continue;
@@ -1111,30 +1511,33 @@ again:
}
/*
- * Dispatch an asymmetric crypto request.
+ * Choose a driver for an asymmetric crypto request.
*/
-static int
-crypto_kinvoke(struct cryptkop *krp, int crid)
+static struct cryptocap *
+crypto_lookup_kdriver(struct cryptkop *krp)
{
- struct cryptocap *cap = NULL;
- int error;
+ struct cryptocap *cap;
+ uint32_t crid;
- KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
- KASSERT(krp->krp_callback != NULL,
- ("%s: krp->crp_callback == NULL", __func__));
+ /* If this request is requeued, it might already have a driver. */
+ cap = krp->krp_cap;
+ if (cap != NULL)
+ return (cap);
- CRYPTO_DRIVER_LOCK();
+ /* Use krp_crid to choose a driver. */
+ crid = krp->krp_crid;
if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
cap = crypto_checkdriver(crid);
if (cap != NULL) {
/*
- * Driver present, it must support the necessary
- * algorithm and, if s/w drivers are excluded,
- * it must be registered as hardware-backed.
+ * Driver present, it must support the
+ * necessary algorithm and, if s/w drivers are
+ * excluded, it must be registered as
+ * hardware-backed.
*/
if (!kdriver_suitable(cap, krp) ||
(!crypto_devallowsoft &&
- (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
+ (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
cap = NULL;
}
} else {
@@ -1145,32 +1548,61 @@ crypto_kinvoke(struct cryptkop *krp, int crid)
crid &= ~CRYPTOCAP_F_SOFTWARE;
cap = crypto_select_kdriver(krp, crid);
}
- if (cap != NULL && !cap->cc_kqblocked) {
- krp->krp_hid = cap - crypto_drivers;
- cap->cc_koperations++;
+
+ if (cap != NULL) {
+ krp->krp_cap = cap_ref(cap);
+ krp->krp_hid = cap->cc_hid;
+ }
+ return (cap);
+}
+
+/*
+ * Dispatch an asymmetric crypto request.
+ */
+static int
+crypto_kinvoke(struct cryptkop *krp)
+{
+ struct cryptocap *cap = NULL;
+ int error;
+
+ KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
+ KASSERT(krp->krp_callback != NULL,
+ ("%s: krp->crp_callback == NULL", __func__));
+
+ CRYPTO_DRIVER_LOCK();
+ cap = crypto_lookup_kdriver(krp);
+ if (cap == NULL) {
CRYPTO_DRIVER_UNLOCK();
- error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
- CRYPTO_DRIVER_LOCK();
- if (error == ERESTART) {
- cap->cc_koperations--;
- CRYPTO_DRIVER_UNLOCK();
- return (error);
- }
- } else {
+ krp->krp_status = ENODEV;
+ crypto_kdone(krp);
+ return (0);
+ }
+
+ /*
+ * If the device is blocked, return ERESTART to requeue it.
+ */
+ if (cap->cc_kqblocked) {
/*
- * NB: cap is !NULL if device is blocked; in
- * that case return ERESTART so the operation
- * is resubmitted if possible.
+ * XXX: Previously this set krp_status to ERESTART and
+ * invoked crypto_kdone but the caller would still
+ * requeue it.
*/
- error = (cap == NULL) ? ENODEV : ERESTART;
+ CRYPTO_DRIVER_UNLOCK();
+ return (ERESTART);
}
- CRYPTO_DRIVER_UNLOCK();
- if (error) {
- krp->krp_status = error;
- crypto_kdone(krp);
+ cap->cc_koperations++;
+ CRYPTO_DRIVER_UNLOCK();
+ error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
+ if (error == ERESTART) {
+ CRYPTO_DRIVER_LOCK();
+ cap->cc_koperations--;
+ CRYPTO_DRIVER_UNLOCK();
+ return (error);
}
- return 0;
+
+ KASSERT(error == 0, ("error %d returned from crypto_kprocess", error));
+ return (0);
}
#ifdef CRYPTO_TIMING
@@ -1204,13 +1636,10 @@ crypto_task_invoke(void *ctx, int pending)
{
struct cryptocap *cap;
struct cryptop *crp;
- int hid, result;
+ int result;
crp = (struct cryptop *)ctx;
-
- hid = crypto_ses2hid(crp->crp_session);
- cap = crypto_checkdriver(hid);
-
+ cap = crp->crp_session->cap;
result = crypto_invoke(cap, crp, 0);
if (result == ERESTART)
crypto_batch_enqueue(crp);
@@ -1226,14 +1655,15 @@ crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
KASSERT(crp->crp_callback != NULL,
("%s: crp->crp_callback == NULL", __func__));
- KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
+ KASSERT(crp->crp_session != NULL,
+ ("%s: crp->crp_session == NULL", __func__));
#ifdef CRYPTO_TIMING
if (crypto_timing)
crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
#endif
if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
- struct cryptodesc *crd;
+ struct crypto_session_params csp;
crypto_session_t nses;
/*
@@ -1242,14 +1672,32 @@ crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
*
* XXX: What if there are more already queued requests for this
* session?
+ *
+ * XXX: Real solution is to make sessions refcounted
+ * and force callers to hold a reference when
+ * assigning to crp_session. Could maybe change
+ * crypto_getreq to accept a session pointer to make
+ * that work. Alternatively, we could abandon the
+ * notion of rewriting crp_session in requests forcing
+ * the caller to deal with allocating a new session.
+ * Perhaps provide a method to allow a crp's session to
+ * be swapped that callers could use.
*/
+ csp = crp->crp_session->csp;
crypto_freesession(crp->crp_session);
- for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
- crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
-
- /* XXX propagate flags from initial session? */
- if (crypto_newsession(&nses, &(crp->crp_desc->CRD_INI),
+ /*
+ * XXX: Key pointers may no longer be valid. If we
+ * really want to support this we need to define the
+ * KPI such that 'csp' is required to be valid for the
+ * duration of a session by the caller perhaps.
+ *
+ * XXX: If the keys have been changed this will reuse
+ * the old keys. This probably suggests making
+ * rekeying more explicit and updating the key
+ * pointers in 'csp' when the keys change.
+ */
+ if (crypto_newsession(&nses, &csp,
CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
crp->crp_session = nses;
@@ -1264,13 +1712,9 @@ crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
}
}
-/*
- * Release a set of crypto descriptors.
- */
void
crypto_freereq(struct cryptop *crp)
{
- struct cryptodesc *crd;
if (crp == NULL)
return;
@@ -1300,36 +1744,18 @@ crypto_freereq(struct cryptop *crp)
}
#endif
- while ((crd = crp->crp_desc) != NULL) {
- crp->crp_desc = crd->crd_next;
- uma_zfree(cryptodesc_zone, crd);
- }
uma_zfree(cryptop_zone, crp);
}
-/*
- * Acquire a set of crypto descriptors.
- */
struct cryptop *
-crypto_getreq(int num)
+crypto_getreq(crypto_session_t cses, int how)
{
- struct cryptodesc *crd;
struct cryptop *crp;
- crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO);
- if (crp != NULL) {
- while (num--) {
- crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO);
- if (crd == NULL) {
- crypto_freereq(crp);
- return NULL;
- }
-
- crd->crd_next = crp->crp_desc;
- crp->crp_desc = crd;
- }
- }
- return crp;
+ MPASS(how == M_WAITOK || how == M_NOWAIT);
+ crp = uma_zalloc(cryptop_zone, how | M_ZERO);
+ crp->crp_session = cses;
+ return (crp);
}
/*
@@ -1432,15 +1858,14 @@ crypto_kdone(struct cryptkop *krp)
if (krp->krp_status != 0)
cryptostats.cs_kerrs++;
CRYPTO_DRIVER_LOCK();
- /* XXX: What if driver is loaded in the meantime? */
- if (krp->krp_hid < crypto_drivers_num) {
- cap = &crypto_drivers[krp->krp_hid];
- KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0"));
- cap->cc_koperations--;
- if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
- crypto_remove(cap);
- }
+ cap = krp->krp_cap;
+ KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0"));
+ cap->cc_koperations--;
+ if (cap->cc_koperations == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP)
+ wakeup(cap);
CRYPTO_DRIVER_UNLOCK();
+ krp->krp_cap = NULL;
+ cap_rele(cap);
ret_worker = CRYPTO_RETW(0);
@@ -1457,11 +1882,12 @@ crypto_getfeat(int *featp)
int hid, kalg, feat = 0;
CRYPTO_DRIVER_LOCK();
- for (hid = 0; hid < crypto_drivers_num; hid++) {
- const struct cryptocap *cap = &crypto_drivers[hid];
+ for (hid = 0; hid < crypto_drivers_size; hid++) {
+ const struct cryptocap *cap = crypto_drivers[hid];
- if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
- !crypto_devallowsoft) {
+ if (cap == NULL ||
+ ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
+ !crypto_devallowsoft)) {
continue;
}
for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
@@ -1500,7 +1926,6 @@ crypto_proc(void)
struct cryptop *crp, *submit;
struct cryptkop *krp;
struct cryptocap *cap;
- u_int32_t hid;
int result, hint;
#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
@@ -1517,15 +1942,14 @@ crypto_proc(void)
submit = NULL;
hint = 0;
TAILQ_FOREACH(crp, &crp_q, crp_next) {
- hid = crypto_ses2hid(crp->crp_session);
- cap = crypto_checkdriver(hid);
+ cap = crp->crp_session->cap;
/*
* Driver cannot disappeared when there is an active
* session.
*/
KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
__func__, __LINE__));
- if (cap == NULL || cap->cc_dev == NULL) {
+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
/* Op needs to be migrated, process it. */
if (submit == NULL)
submit = crp;
@@ -1541,7 +1965,7 @@ crypto_proc(void)
* better to just use a per-driver
* queue instead.
*/
- if (crypto_ses2hid(submit->crp_session) == hid)
+ if (submit->crp_session->cap == cap)
hint = CRYPTO_HINT_MORE;
break;
} else {
@@ -1554,11 +1978,12 @@ crypto_proc(void)
}
if (submit != NULL) {
TAILQ_REMOVE(&crp_q, submit, crp_next);
- hid = crypto_ses2hid(submit->crp_session);
- cap = crypto_checkdriver(hid);
+ cap = submit->crp_session->cap;
KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
__func__, __LINE__));
+ CRYPTO_Q_UNLOCK();
result = crypto_invoke(cap, submit, hint);
+ CRYPTO_Q_LOCK();
if (result == ERESTART) {
/*
* The driver ran out of resources, mark the
@@ -1569,8 +1994,7 @@ crypto_proc(void)
* at the front. This should be ok; putting
* it at the end does not work.
*/
- /* XXX validate sid again? */
- crypto_drivers[crypto_ses2hid(submit->crp_session)].cc_qblocked = 1;
+ cap->cc_qblocked = 1;
TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
cryptostats.cs_blocks++;
}
@@ -1578,19 +2002,15 @@ crypto_proc(void)
/* As above, but for key ops */
TAILQ_FOREACH(krp, &crp_kq, krp_next) {
- cap = crypto_checkdriver(krp->krp_hid);
- if (cap == NULL || cap->cc_dev == NULL) {
+ cap = krp->krp_cap;
+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
/*
- * Operation needs to be migrated, invalidate
- * the assigned device so it will reselect a
- * new one below. Propagate the original
- * crid selection flags if supplied.
+ * Operation needs to be migrated,
+ * clear krp_cap so a new driver is
+ * selected.
*/
- krp->krp_hid = krp->krp_crid &
- (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
- if (krp->krp_hid == 0)
- krp->krp_hid =
- CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
+ krp->krp_cap = NULL;
+ cap_rele(cap);
break;
}
if (!cap->cc_kqblocked)
@@ -1598,7 +2018,9 @@ crypto_proc(void)
}
if (krp != NULL) {
TAILQ_REMOVE(&crp_kq, krp, krp_next);
- result = crypto_kinvoke(krp, krp->krp_hid);
+ CRYPTO_Q_UNLOCK();
+ result = crypto_kinvoke(krp);
+ CRYPTO_Q_LOCK();
if (result == ERESTART) {
/*
* The driver ran out of resources, mark the
@@ -1609,8 +2031,7 @@ crypto_proc(void)
* at the front. This should be ok; putting
* it at the end does not work.
*/
- /* XXX validate sid again? */
- crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
+ krp->krp_cap->cc_kqblocked = 1;
TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
cryptostats.cs_kblocks++;
}
@@ -1731,9 +2152,9 @@ db_show_drivers(void)
, "QB"
, "KB"
);
- for (hid = 0; hid < crypto_drivers_num; hid++) {
- const struct cryptocap *cap = &crypto_drivers[hid];
- if (cap->cc_dev == NULL)
+ for (hid = 0; hid < crypto_drivers_size; hid++) {
+ const struct cryptocap *cap = crypto_drivers[hid];
+ if (cap == NULL)
continue;
db_printf("%-12s %4u %4u %08x %2u %2u\n"
, device_get_nameunit(cap->cc_dev)
@@ -1756,15 +2177,15 @@ DB_SHOW_COMMAND(crypto, db_show_crypto)
db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
"HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
- "Desc", "Callback");
+ "Device", "Callback");
TAILQ_FOREACH(crp, &crp_q, crp_next) {
db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
- , (int) crypto_ses2hid(crp->crp_session)
+ , crp->crp_session->cap->cc_hid
, (int) crypto_ses2caps(crp->crp_session)
, crp->crp_ilen, crp->crp_olen
, crp->crp_etype
, crp->crp_flags
- , crp->crp_desc
+ , device_get_nameunit(crp->crp_session->cap->cc_dev)
, crp->crp_callback
);
}
@@ -1775,7 +2196,7 @@ DB_SHOW_COMMAND(crypto, db_show_crypto)
TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) {
db_printf("%8td %4u %4u %04x %8p\n"
, CRYPTO_RETW_ID(ret_worker)
- , (int) crypto_ses2hid(crp->crp_session)
+ , crp->crp_session->cap->cc_hid
, crp->crp_etype
, crp->crp_flags
, crp->crp_callback
diff --git a/sys/opencrypto/cryptodev.c b/sys/opencrypto/cryptodev.c
index e43f1a9dff43..cf213f54d947 100644
--- a/sys/opencrypto/cryptodev.c
+++ b/sys/opencrypto/cryptodev.c
@@ -51,7 +51,6 @@ __FBSDID("$FreeBSD$");
#include <sys/file.h>
#include <sys/filedesc.h>
#include <sys/errno.h>
-#include <sys/uio.h>
#include <sys/random.h>
#include <sys/conf.h>
#include <sys/kernel.h>
@@ -270,23 +269,19 @@ struct csession {
u_int32_t ses;
struct mtx lock; /* for op submission */
- u_int32_t cipher;
struct enc_xform *txform;
- u_int32_t mac;
- struct auth_hash *thash;
-
- caddr_t key;
- int keylen;
+ int hashsize;
+ int ivsize;
+ int mode;
- caddr_t mackey;
- int mackeylen;
+ void *key;
+ void *mackey;
};
struct cryptop_data {
struct csession *cse;
- struct iovec iovec[1];
- struct uio uio;
+ char *buf;
bool done;
};
@@ -326,9 +321,9 @@ static struct fileops cryptofops = {
static struct csession *csefind(struct fcrypt *, u_int);
static bool csedelete(struct fcrypt *, u_int);
-static struct csession *csecreate(struct fcrypt *, crypto_session_t, caddr_t,
- u_int64_t, caddr_t, u_int64_t, u_int32_t, u_int32_t, struct enc_xform *,
- struct auth_hash *);
+static struct csession *csecreate(struct fcrypt *, crypto_session_t,
+ struct crypto_session_params *, struct enc_xform *, void *,
+ struct auth_hash *, void *);
static void csefree(struct csession *);
static int cryptodev_op(struct csession *, struct crypt_op *,
@@ -375,7 +370,7 @@ cryptof_ioctl(
struct thread *td)
{
#define SES2(p) ((struct session2_op *)p)
- struct cryptoini cria, crie;
+ struct crypto_session_params csp;
struct fcrypt *fcr = fp->f_data;
struct csession *cse;
struct session_op *sop;
@@ -383,6 +378,8 @@ cryptof_ioctl(
struct crypt_aead *caead;
struct enc_xform *txform = NULL;
struct auth_hash *thash = NULL;
+ void *key = NULL;
+ void *mackey = NULL;
struct crypt_kop *kop;
crypto_session_t cses;
u_int32_t ses;
@@ -487,18 +484,37 @@ cryptof_ioctl(
case CRYPTO_RIPEMD160_HMAC:
thash = &auth_hash_hmac_ripemd_160;
break;
+#ifdef COMPAT_FREEBSD12
case CRYPTO_AES_128_NIST_GMAC:
- thash = &auth_hash_nist_gmac_aes_128;
- break;
case CRYPTO_AES_192_NIST_GMAC:
- thash = &auth_hash_nist_gmac_aes_192;
- break;
case CRYPTO_AES_256_NIST_GMAC:
- thash = &auth_hash_nist_gmac_aes_256;
+ /* Should always be paired with GCM. */
+ if (sop->cipher != CRYPTO_AES_NIST_GCM_16) {
+ CRYPTDEB("GMAC without GCM");
+ return (EINVAL);
+ }
+ break;
+#endif
+ case CRYPTO_AES_NIST_GMAC:
+ switch (sop->mackeylen * 8) {
+ case 128:
+ thash = &auth_hash_nist_gmac_aes_128;
+ break;
+ case 192:
+ thash = &auth_hash_nist_gmac_aes_192;
+ break;
+ case 256:
+ thash = &auth_hash_nist_gmac_aes_256;
+ break;
+ default:
+ CRYPTDEB("invalid GMAC key length");
+ SDT_PROBE1(opencrypto, dev, ioctl, error,
+ __LINE__);
+ return (EINVAL);
+ }
break;
-
case CRYPTO_AES_CCM_CBC_MAC:
- switch (sop->keylen) {
+ switch (sop->mackeylen) {
case 16:
thash = &auth_hash_ccm_cbc_mac_128;
break;
@@ -554,12 +570,52 @@ cryptof_ioctl(
return (EINVAL);
}
- bzero(&crie, sizeof(crie));
- bzero(&cria, sizeof(cria));
+ if (txform == NULL && thash == NULL)
+ return (EINVAL);
+
+ memset(&csp, 0, sizeof(csp));
+
+ if (sop->cipher == CRYPTO_AES_NIST_GCM_16) {
+ switch (sop->mac) {
+#ifdef COMPAT_FREEBSD12
+ case CRYPTO_AES_128_NIST_GMAC:
+ case CRYPTO_AES_192_NIST_GMAC:
+ case CRYPTO_AES_256_NIST_GMAC:
+ if (sop->keylen != sop->mackeylen)
+ return (EINVAL);
+ break;
+#endif
+ case 0:
+ break;
+ default:
+ return (EINVAL);
+ }
+ csp.csp_mode = CSP_MODE_AEAD;
+ } else if (sop->cipher == CRYPTO_AES_CCM_16) {
+ switch (sop->mac) {
+#ifdef COMPAT_FREEBSD12
+ case CRYPTO_AES_CCM_CBC_MAC:
+ if (sop->keylen != sop->mackeylen)
+ return (EINVAL);
+ thash = NULL;
+ break;
+#endif
+ case 0:
+ break;
+ default:
+ return (EINVAL);
+ }
+ csp.csp_mode = CSP_MODE_AEAD;
+ } else if (txform && thash)
+ csp.csp_mode = CSP_MODE_ETA;
+ else if (txform)
+ csp.csp_mode = CSP_MODE_CIPHER;
+ else
+ csp.csp_mode = CSP_MODE_DIGEST;
if (txform) {
- crie.cri_alg = txform->type;
- crie.cri_klen = sop->keylen * 8;
+ csp.csp_cipher_alg = txform->type;
+ csp.csp_cipher_klen = sop->keylen;
if (sop->keylen > txform->maxkey ||
sop->keylen < txform->minkey) {
CRYPTDEB("invalid cipher parameters");
@@ -569,22 +625,21 @@ cryptof_ioctl(
goto bail;
}
- crie.cri_key = malloc(crie.cri_klen / 8,
- M_XDATA, M_WAITOK);
- if ((error = copyin(sop->key, crie.cri_key,
- crie.cri_klen / 8))) {
+ key = malloc(csp.csp_cipher_klen, M_XDATA, M_WAITOK);
+ error = copyin(sop->key, key, csp.csp_cipher_klen);
+ if (error) {
CRYPTDEB("invalid key");
SDT_PROBE1(opencrypto, dev, ioctl, error,
__LINE__);
goto bail;
}
- if (thash)
- crie.cri_next = &cria;
+ csp.csp_cipher_key = key;
+ csp.csp_ivlen = txform->ivsize;
}
if (thash) {
- cria.cri_alg = thash->type;
- cria.cri_klen = sop->mackeylen * 8;
+ csp.csp_auth_alg = thash->type;
+ csp.csp_auth_klen = sop->mackeylen;
if (sop->mackeylen > thash->keysize ||
sop->mackeylen < 0) {
CRYPTDEB("invalid mac key length");
@@ -594,17 +649,24 @@ cryptof_ioctl(
goto bail;
}
- if (cria.cri_klen) {
- cria.cri_key = malloc(cria.cri_klen / 8,
- M_XDATA, M_WAITOK);
- if ((error = copyin(sop->mackey, cria.cri_key,
- cria.cri_klen / 8))) {
+ if (csp.csp_auth_klen) {
+ mackey = malloc(csp.csp_auth_klen, M_XDATA,
+ M_WAITOK);
+ error = copyin(sop->mackey, mackey,
+ csp.csp_auth_klen);
+ if (error) {
CRYPTDEB("invalid mac key");
SDT_PROBE1(opencrypto, dev, ioctl,
error, __LINE__);
goto bail;
}
+ csp.csp_auth_key = mackey;
}
+
+ if (csp.csp_auth_alg == CRYPTO_AES_NIST_GMAC)
+ csp.csp_ivlen = AES_GCM_IV_LEN;
+ if (csp.csp_auth_alg == CRYPTO_AES_CCM_CBC_MAC)
+ csp.csp_ivlen = AES_CCM_IV_LEN;
}
/* NB: CIOCGSESSION2 has the crid */
@@ -623,16 +685,14 @@ cryptof_ioctl(
}
} else
crid = CRYPTOCAP_F_HARDWARE;
- error = crypto_newsession(&cses, (txform ? &crie : &cria), crid);
+ error = crypto_newsession(&cses, &csp, crid);
if (error) {
CRYPTDEB("crypto_newsession");
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
- cse = csecreate(fcr, cses, crie.cri_key, crie.cri_klen,
- cria.cri_key, cria.cri_klen, sop->cipher, sop->mac, txform,
- thash);
+ cse = csecreate(fcr, cses, &csp, txform, key, thash, mackey);
if (cse == NULL) {
crypto_freesession(cses);
@@ -652,10 +712,8 @@ cryptof_ioctl(
}
bail:
if (error) {
- if (crie.cri_key)
- free(crie.cri_key, M_XDATA);
- if (cria.cri_key)
- free(cria.cri_key, M_XDATA);
+ free(key, M_XDATA);
+ free(mackey, M_XDATA);
}
#ifdef COMPAT_FREEBSD32
else {
@@ -773,20 +831,11 @@ static struct cryptop_data *
cod_alloc(struct csession *cse, size_t len, struct thread *td)
{
struct cryptop_data *cod;
- struct uio *uio;
cod = malloc(sizeof(struct cryptop_data), M_XDATA, M_WAITOK | M_ZERO);
cod->cse = cse;
- uio = &cod->uio;
- uio->uio_iov = cod->iovec;
- uio->uio_iovcnt = 1;
- uio->uio_resid = len;
- uio->uio_segflg = UIO_SYSSPACE;
- uio->uio_rw = UIO_WRITE;
- uio->uio_td = td;
- uio->uio_iov[0].iov_len = len;
- uio->uio_iov[0].iov_base = malloc(len, M_XDATA, M_WAITOK);
+ cod->buf = malloc(len, M_XDATA, M_WAITOK);
return (cod);
}
@@ -794,7 +843,7 @@ static void
cod_free(struct cryptop_data *cod)
{
- free(cod->uio.uio_iov[0].iov_base, M_XDATA);
+ free(cod->buf, M_XDATA);
free(cod, M_XDATA);
}
@@ -803,8 +852,10 @@ cryptodev_warn(struct csession *cse)
{
static struct timeval arc4warn, blfwarn, castwarn, deswarn, md5warn;
static struct timeval skipwarn, tdeswarn;
+ const struct crypto_session_params *csp;
- switch (cse->cipher) {
+ csp = crypto_get_params(cse->cses);
+ switch (csp->csp_cipher_alg) {
case CRYPTO_DES_CBC:
if (ratecheck(&deswarn, &warninterval))
gone_in(13, "DES cipher via /dev/crypto");
@@ -831,7 +882,7 @@ cryptodev_warn(struct csession *cse)
break;
}
- switch (cse->mac) {
+ switch (csp->csp_auth_alg) {
case CRYPTO_MD5_HMAC:
if (ratecheck(&md5warn, &warninterval))
gone_in(13, "MD5-HMAC authenticator via /dev/crypto");
@@ -848,7 +899,6 @@ cryptodev_op(
{
struct cryptop_data *cod = NULL;
struct cryptop *crp = NULL;
- struct cryptodesc *crde = NULL, *crda = NULL;
int error;
if (cop->len > 256*1024-4) {
@@ -863,106 +913,135 @@ cryptodev_op(
}
}
- if (cse->thash)
- cod = cod_alloc(cse, cop->len + cse->thash->hashsize, td);
- else
- cod = cod_alloc(cse, cop->len, td);
-
- crp = crypto_getreq((cse->txform != NULL) + (cse->thash != NULL));
- if (crp == NULL) {
+ if (cop->mac && cse->hashsize == 0) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- error = ENOMEM;
+ error = EINVAL;
goto bail;
}
- if (cse->thash && cse->txform) {
- if (cop->flags & COP_F_CIPHER_FIRST) {
- crde = crp->crp_desc;
- crda = crde->crd_next;
- } else {
- crda = crp->crp_desc;
- crde = crda->crd_next;
+ /*
+ * The COP_F_CIPHER_FIRST flag predates explicit session
+ * modes, but the only way it was used was for EtA so allow it
+ * as long as it is consistent with EtA.
+ */
+ if (cop->flags & COP_F_CIPHER_FIRST) {
+ if (cop->op != COP_ENCRYPT) {
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ return (EINVAL);
}
- } else if (cse->thash) {
- crda = crp->crp_desc;
- } else if (cse->txform) {
- crde = crp->crp_desc;
- } else {
- SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- error = EINVAL;
- goto bail;
}
- if ((error = copyin(cop->src, cod->uio.uio_iov[0].iov_base,
- cop->len))) {
- SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- goto bail;
- }
+ cod = cod_alloc(cse, cop->len + cse->hashsize, td);
- if (crda) {
- crda->crd_skip = 0;
- crda->crd_len = cop->len;
- crda->crd_inject = cop->len;
+ crp = crypto_getreq(cse->cses, M_WAITOK);
- crda->crd_alg = cse->mac;
- crda->crd_key = cse->mackey;
- crda->crd_klen = cse->mackeylen * 8;
+ error = copyin(cop->src, cod->buf, cop->len);
+ if (error) {
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ goto bail;
}
-
- if (crde) {
- if (cop->op == COP_ENCRYPT)
- crde->crd_flags |= CRD_F_ENCRYPT;
- else
- crde->crd_flags &= ~CRD_F_ENCRYPT;
- crde->crd_len = cop->len;
- crde->crd_inject = 0;
-
- crde->crd_alg = cse->cipher;
- crde->crd_key = cse->key;
- crde->crd_klen = cse->keylen * 8;
+ crp->crp_payload_start = 0;
+ crp->crp_payload_length = cop->len;
+ if (cse->hashsize)
+ crp->crp_digest_start = cop->len;
+
+ switch (cse->mode) {
+ case CSP_MODE_COMPRESS:
+ switch (cop->op) {
+ case COP_ENCRYPT:
+ crp->crp_op = CRYPTO_OP_COMPRESS;
+ break;
+ case COP_DECRYPT:
+ crp->crp_op = CRYPTO_OP_DECOMPRESS;
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
+ }
+ break;
+ case CSP_MODE_CIPHER:
+ switch (cop->op) {
+ case COP_ENCRYPT:
+ crp->crp_op = CRYPTO_OP_ENCRYPT;
+ break;
+ case COP_DECRYPT:
+ crp->crp_op = CRYPTO_OP_DECRYPT;
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
+ }
+ break;
+ case CSP_MODE_DIGEST:
+ switch (cop->op) {
+ case 0:
+ case COP_ENCRYPT:
+ case COP_DECRYPT:
+ crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST;
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
+ }
+ break;
+ case CSP_MODE_ETA:
+ switch (cop->op) {
+ case COP_ENCRYPT:
+ crp->crp_op = CRYPTO_OP_ENCRYPT |
+ CRYPTO_OP_COMPUTE_DIGEST;
+ break;
+ case COP_DECRYPT:
+ crp->crp_op = CRYPTO_OP_DECRYPT |
+ CRYPTO_OP_VERIFY_DIGEST;
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
+ }
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
}
- crp->crp_ilen = cop->len;
- crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
- | (cop->flags & COP_F_BATCH);
- crp->crp_uio = &cod->uio;
+ crp->crp_ilen = cop->len + cse->hashsize;
+ crp->crp_flags = CRYPTO_F_CBIMM | (cop->flags & COP_F_BATCH);
+ crp->crp_buf = cod->buf;
+ crp->crp_buf_type = CRYPTO_BUF_CONTIG;
crp->crp_callback = cryptodev_cb;
- crp->crp_session = cse->cses;
crp->crp_opaque = cod;
if (cop->iv) {
- if (crde == NULL) {
+ if (cse->ivsize == 0) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
error = EINVAL;
goto bail;
}
- if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
+ error = copyin(cop->iv, crp->crp_iv, cse->ivsize);
+ if (error) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- error = EINVAL;
goto bail;
}
- if ((error = copyin(cop->iv, crde->crd_iv,
- cse->txform->ivsize))) {
+ crp->crp_flags |= CRYPTO_F_IV_SEPARATE;
+ } else if (cse->ivsize != 0) {
+ crp->crp_iv_start = 0;
+ crp->crp_payload_start += cse->ivsize;
+ crp->crp_payload_length -= cse->ivsize;
+ }
+
+ if (cop->mac != NULL) {
+ error = copyin(cop->mac, cod->buf + cop->len, cse->hashsize);
+ if (error) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
- crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
- crde->crd_skip = 0;
- } else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
- crde->crd_skip = 0;
- } else if (crde) {
- crde->crd_flags |= CRD_F_IV_PRESENT;
- crde->crd_skip = cse->txform->ivsize;
- crde->crd_len -= cse->txform->ivsize;
- }
-
- if (cop->mac && crda == NULL) {
- SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- error = EINVAL;
- goto bail;
}
cryptodev_warn(cse);
-
again:
/*
* Let the dispatch run unlocked, then, interlock against the
@@ -995,18 +1074,20 @@ again:
goto bail;
}
- if (cop->dst &&
- (error = copyout(cod->uio.uio_iov[0].iov_base, cop->dst,
- cop->len))) {
- SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- goto bail;
+ if (cop->dst != NULL) {
+ error = copyout(cod->buf, cop->dst, cop->len);
+ if (error) {
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ goto bail;
+ }
}
- if (cop->mac &&
- (error = copyout((caddr_t)cod->uio.uio_iov[0].iov_base + cop->len,
- cop->mac, cse->thash->hashsize))) {
- SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- goto bail;
+ if (cop->mac != NULL) {
+ error = copyout(cod->buf + cop->len, cop->mac, cse->hashsize);
+ if (error) {
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ goto bail;
+ }
}
bail:
@@ -1027,7 +1108,6 @@ cryptodev_aead(
{
struct cryptop_data *cod = NULL;
struct cryptop *crp = NULL;
- struct cryptodesc *crde = NULL, *crda = NULL;
int error;
if (caead->len > 256*1024-4 || caead->aadlen > 256*1024-4) {
@@ -1035,99 +1115,122 @@ cryptodev_aead(
return (E2BIG);
}
- if (cse->txform == NULL || cse->thash == NULL || caead->tag == NULL ||
+ if (cse->txform == NULL || cse->hashsize == 0 || caead->tag == NULL ||
(caead->len % cse->txform->blocksize) != 0) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
return (EINVAL);
}
- cod = cod_alloc(cse, caead->aadlen + caead->len + cse->thash->hashsize,
- td);
+ /*
+ * The COP_F_CIPHER_FIRST flag predates explicit session
+ * modes, but the only way it was used was for EtA so allow it
+ * as long as it is consistent with EtA.
+ */
+ if (caead->flags & COP_F_CIPHER_FIRST) {
+ if (caead->op != COP_ENCRYPT) {
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ return (EINVAL);
+ }
+ }
+
+ cod = cod_alloc(cse, caead->aadlen + caead->len + cse->hashsize, td);
- crp = crypto_getreq(2);
- if (crp == NULL) {
- error = ENOMEM;
+ crp = crypto_getreq(cse->cses, M_WAITOK);
+
+ error = copyin(caead->aad, cod->buf, caead->aadlen);
+ if (error) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
+ crp->crp_aad_start = 0;
+ crp->crp_aad_length = caead->aadlen;
- if (caead->flags & COP_F_CIPHER_FIRST) {
- crde = crp->crp_desc;
- crda = crde->crd_next;
- } else {
- crda = crp->crp_desc;
- crde = crda->crd_next;
- }
-
- if ((error = copyin(caead->aad, cod->uio.uio_iov[0].iov_base,
- caead->aadlen))) {
+ error = copyin(caead->src, cod->buf + caead->aadlen, caead->len);
+ if (error) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
-
- if ((error = copyin(caead->src, (char *)cod->uio.uio_iov[0].iov_base +
- caead->aadlen, caead->len))) {
+ crp->crp_payload_start = caead->aadlen;
+ crp->crp_payload_length = caead->len;
+ crp->crp_digest_start = caead->aadlen + caead->len;
+
+ switch (cse->mode) {
+ case CSP_MODE_AEAD:
+ switch (caead->op) {
+ case COP_ENCRYPT:
+ crp->crp_op = CRYPTO_OP_ENCRYPT |
+ CRYPTO_OP_COMPUTE_DIGEST;
+ break;
+ case COP_DECRYPT:
+ crp->crp_op = CRYPTO_OP_DECRYPT |
+ CRYPTO_OP_VERIFY_DIGEST;
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
+ }
+ break;
+ case CSP_MODE_ETA:
+ switch (caead->op) {
+ case COP_ENCRYPT:
+ crp->crp_op = CRYPTO_OP_ENCRYPT |
+ CRYPTO_OP_COMPUTE_DIGEST;
+ break;
+ case COP_DECRYPT:
+ crp->crp_op = CRYPTO_OP_DECRYPT |
+ CRYPTO_OP_VERIFY_DIGEST;
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
+ }
+ break;
+ default:
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
goto bail;
}
- /*
- * For GCM/CCM, crd_len covers only the AAD. For other ciphers
- * chained with an HMAC, crd_len covers both the AAD and the
- * cipher text.
- */
- crda->crd_skip = 0;
- if (cse->cipher == CRYPTO_AES_NIST_GCM_16 ||
- cse->cipher == CRYPTO_AES_CCM_16)
- crda->crd_len = caead->aadlen;
- else
- crda->crd_len = caead->aadlen + caead->len;
- crda->crd_inject = caead->aadlen + caead->len;
-
- crda->crd_alg = cse->mac;
- crda->crd_key = cse->mackey;
- crda->crd_klen = cse->mackeylen * 8;
-
- if (caead->op == COP_ENCRYPT)
- crde->crd_flags |= CRD_F_ENCRYPT;
- else
- crde->crd_flags &= ~CRD_F_ENCRYPT;
- crde->crd_skip = caead->aadlen;
- crde->crd_len = caead->len;
- crde->crd_inject = caead->aadlen;
-
- crde->crd_alg = cse->cipher;
- crde->crd_key = cse->key;
- crde->crd_klen = cse->keylen * 8;
-
- crp->crp_ilen = caead->aadlen + caead->len;
- crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
- | (caead->flags & COP_F_BATCH);
- crp->crp_uio = &cod->uio;
+ crp->crp_ilen = caead->aadlen + caead->len + cse->hashsize;
+ crp->crp_flags = CRYPTO_F_CBIMM | (caead->flags & COP_F_BATCH);
+ crp->crp_buf = cod->buf;
+ crp->crp_buf_type = CRYPTO_BUF_CONTIG;
crp->crp_callback = cryptodev_cb;
- crp->crp_session = cse->cses;
crp->crp_opaque = cod;
if (caead->iv) {
- if (caead->ivlen > sizeof(crde->crd_iv)) {
+ /*
+ * Permit a 16-byte IV for AES-XTS, but only use the
+ * first 8 bytes as a block number.
+ */
+ if (cse->mode == CSP_MODE_ETA &&
+ caead->ivlen == AES_BLOCK_LEN &&
+ cse->ivsize == AES_XTS_IV_LEN)
+ caead->ivlen = AES_XTS_IV_LEN;
+
+ if (caead->ivlen != cse->ivsize) {
error = EINVAL;
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
- if ((error = copyin(caead->iv, crde->crd_iv, caead->ivlen))) {
+ error = copyin(caead->iv, crp->crp_iv, cse->ivsize);
+ if (error) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
- crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
+ crp->crp_flags |= CRYPTO_F_IV_SEPARATE;
} else {
- crde->crd_flags |= CRD_F_IV_PRESENT;
- crde->crd_skip += cse->txform->ivsize;
- crde->crd_len -= cse->txform->ivsize;
+ crp->crp_iv_start = crp->crp_payload_start;
+ crp->crp_payload_start += cse->ivsize;
+ crp->crp_payload_length -= cse->ivsize;
}
- if ((error = copyin(caead->tag, (caddr_t)cod->uio.uio_iov[0].iov_base +
- caead->len + caead->aadlen, cse->thash->hashsize))) {
+ error = copyin(caead->tag, cod->buf + caead->len + caead->aadlen,
+ cse->hashsize);
+ if (error) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
@@ -1164,15 +1267,18 @@ again:
goto bail;
}
- if (caead->dst && (error = copyout(
- (caddr_t)cod->uio.uio_iov[0].iov_base + caead->aadlen, caead->dst,
- caead->len))) {
- SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- goto bail;
+ if (caead->dst != NULL) {
+ error = copyout(cod->buf + caead->aadlen, caead->dst,
+ caead->len);
+ if (error) {
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ goto bail;
+ }
}
- if ((error = copyout((caddr_t)cod->uio.uio_iov[0].iov_base +
- caead->aadlen + caead->len, caead->tag, cse->thash->hashsize))) {
+ error = copyout(cod->buf + caead->aadlen + caead->len, caead->tag,
+ cse->hashsize);
+ if (error) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
@@ -1202,13 +1308,11 @@ cryptodev_cb(struct cryptop *crp)
return (0);
}
-static int
-cryptodevkey_cb(void *op)
+static void
+cryptodevkey_cb(struct cryptkop *krp)
{
- struct cryptkop *krp = (struct cryptkop *) op;
wakeup_one(krp);
- return (0);
}
static int
@@ -1267,7 +1371,7 @@ cryptodev_key(struct crypt_kop *kop)
krp->krp_oparams = kop->crk_oparams;
krp->krp_crid = kop->crk_crid;
krp->krp_status = 0;
- krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
+ krp->krp_callback = cryptodevkey_cb;
for (i = 0; i < CRK_MAXPARAM; i++) {
if (kop->crk_param[i].crp_nbits > 65536) {
@@ -1303,7 +1407,7 @@ cryptodev_key(struct crypt_kop *kop)
goto fail;
}
- kop->crk_crid = krp->krp_crid; /* device that did the work */
+ kop->crk_crid = krp->krp_hid; /* device that did the work */
if (krp->krp_status != 0) {
error = krp->krp_status;
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
@@ -1429,9 +1533,9 @@ csedelete(struct fcrypt *fcr, u_int ses)
}
struct csession *
-csecreate(struct fcrypt *fcr, crypto_session_t cses, caddr_t key, u_int64_t keylen,
- caddr_t mackey, u_int64_t mackeylen, u_int32_t cipher, u_int32_t mac,
- struct enc_xform *txform, struct auth_hash *thash)
+csecreate(struct fcrypt *fcr, crypto_session_t cses,
+ struct crypto_session_params *csp, struct enc_xform *txform,
+ void *key, struct auth_hash *thash, void *mackey)
{
struct csession *cse;
@@ -1441,14 +1545,17 @@ csecreate(struct fcrypt *fcr, crypto_session_t cses, caddr_t key, u_int64_t keyl
mtx_init(&cse->lock, "cryptodev", "crypto session lock", MTX_DEF);
refcount_init(&cse->refs, 1);
cse->key = key;
- cse->keylen = keylen/8;
cse->mackey = mackey;
- cse->mackeylen = mackeylen/8;
+ cse->mode = csp->csp_mode;
cse->cses = cses;
- cse->cipher = cipher;
- cse->mac = mac;
cse->txform = txform;
- cse->thash = thash;
+ if (thash != NULL)
+ cse->hashsize = thash->hashsize;
+ else if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16)
+ cse->hashsize = AES_GMAC_HASH_LEN;
+ else if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16)
+ cse->hashsize = AES_CBC_MAC_HASH_LEN;
+ cse->ivsize = csp->csp_ivlen;
mtx_lock(&fcr->lock);
TAILQ_INSERT_TAIL(&fcr->csessions, cse, next);
cse->ses = fcr->sesn++;
diff --git a/sys/opencrypto/cryptodev.h b/sys/opencrypto/cryptodev.h
index bd71e518c576..f9aa8a053c36 100644
--- a/sys/opencrypto/cryptodev.h
+++ b/sys/opencrypto/cryptodev.h
@@ -71,7 +71,6 @@
/* Some initial values */
#define CRYPTO_DRIVERS_INITIAL 4
-#define CRYPTO_SW_SESSIONS 32
/* Hash values */
#define NULL_HASH_LEN 16
@@ -189,11 +188,13 @@
#define CRYPTO_CAMELLIA_CBC 21
#define CRYPTO_AES_XTS 22
#define CRYPTO_AES_ICM 23 /* commonly known as CTR mode */
-#define CRYPTO_AES_NIST_GMAC 24 /* cipher side */
+#define CRYPTO_AES_NIST_GMAC 24 /* GMAC only */
#define CRYPTO_AES_NIST_GCM_16 25 /* 16 byte ICV */
+#ifdef _KERNEL
#define CRYPTO_AES_128_NIST_GMAC 26 /* auth side */
#define CRYPTO_AES_192_NIST_GMAC 27 /* auth side */
#define CRYPTO_AES_256_NIST_GMAC 28 /* auth side */
+#endif
#define CRYPTO_BLAKE2B 29 /* Blake2b hash */
#define CRYPTO_BLAKE2S 30 /* Blake2s hash */
#define CRYPTO_CHACHA20 31 /* Chacha20 stream cipher */
@@ -378,6 +379,13 @@ struct cryptostats {
#ifdef _KERNEL
+/*
+ * Return values for cryptodev_probesession methods.
+ */
+#define CRYPTODEV_PROBE_HARDWARE (-100)
+#define CRYPTODEV_PROBE_ACCEL_SOFTWARE (-200)
+#define CRYPTODEV_PROBE_SOFTWARE (-500)
+
#if 0
#define CRYPTDEB(s, ...) do { \
printf("%s:%d: " s "\n", __FILE__, __LINE__, ## __VA_ARGS__); \
@@ -386,40 +394,29 @@ struct cryptostats {
#define CRYPTDEB(...) do { } while (0)
#endif
-/* Standard initialization structure beginning */
-struct cryptoini {
- int cri_alg; /* Algorithm to use */
- int cri_klen; /* Key length, in bits */
- int cri_mlen; /* Number of bytes we want from the
- entire hash. 0 means all. */
- caddr_t cri_key; /* key to use */
- u_int8_t cri_iv[EALG_MAX_BLOCK_LEN]; /* IV to use */
- struct cryptoini *cri_next;
-};
+struct crypto_session_params {
+ int csp_mode; /* Type of operations to perform. */
+
+#define CSP_MODE_NONE 0
+#define CSP_MODE_COMPRESS 1 /* Compression/decompression. */
+#define CSP_MODE_CIPHER 2 /* Encrypt/decrypt. */
+#define CSP_MODE_DIGEST 3 /* Compute/verify digest. */
+#define CSP_MODE_AEAD 4 /* Combined auth/encryption. */
+#define CSP_MODE_ETA 5 /* IPsec style encrypt-then-auth */
+
+ int csp_flags;
+
+ int csp_ivlen; /* IV length in bytes. */
+
+ int csp_cipher_alg;
+ int csp_cipher_klen; /* Key length in bytes. */
+ const void *csp_cipher_key;
-/* Describe boundaries of a single crypto operation */
-struct cryptodesc {
- int crd_skip; /* How many bytes to ignore from start */
- int crd_len; /* How many bytes to process */
- int crd_inject; /* Where to inject results, if applicable */
- int crd_flags;
-
-#define CRD_F_ENCRYPT 0x01 /* Set when doing encryption */
-#define CRD_F_IV_PRESENT 0x02 /* When encrypting, IV is already in
- place, so don't copy. */
-#define CRD_F_IV_EXPLICIT 0x04 /* IV explicitly provided */
-#define CRD_F_DSA_SHA_NEEDED 0x08 /* Compute SHA-1 of buffer for DSA */
-#define CRD_F_COMP 0x0f /* Set when doing compression */
-#define CRD_F_KEY_EXPLICIT 0x10 /* Key explicitly provided */
-
- struct cryptoini CRD_INI; /* Initialization/context data */
-#define crd_esn CRD_INI.cri_esn
-#define crd_iv CRD_INI.cri_iv
-#define crd_key CRD_INI.cri_key
-#define crd_alg CRD_INI.cri_alg
-#define crd_klen CRD_INI.cri_klen
-
- struct cryptodesc *crd_next;
+ int csp_auth_alg;
+ int csp_auth_klen; /* Key length in bytes. */
+ const void *csp_auth_key;
+ int csp_auth_mlen; /* Number of digest bytes to use.
+ 0 means all. */
};
/* Structure describing complete operation */
@@ -444,8 +441,6 @@ struct cryptop {
*/
int crp_flags;
-#define CRYPTO_F_IMBUF 0x0001 /* Input/output are mbuf chains */
-#define CRYPTO_F_IOV 0x0002 /* Input/output are uio */
#define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */
#define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */
#define CRYPTO_F_DONE 0x0020 /* Operation completed */
@@ -458,14 +453,35 @@ struct cryptop {
* order there are submitted. Applied only
* if CRYPTO_F_ASYNC flags is set
*/
+#define CRYPTO_F_IV_SEPARATE 0x0200 /* Use crp_iv[] as IV. */
+#define CRYPTO_F_IV_GENERATE 0x0400 /* Generate a random IV and store. */
+
+ int crp_op;
union {
caddr_t crp_buf; /* Data to be processed */
struct mbuf *crp_mbuf;
struct uio *crp_uio;
};
- void * crp_opaque; /* Opaque pointer, passed along */
- struct cryptodesc *crp_desc; /* Linked list of processing descriptors */
+ int crp_buf_type; /* Which union member describes data. */
+
+ int crp_aad_start; /* Location of AAD. */
+ int crp_aad_length; /* 0 => no AAD. */
+ int crp_iv_start; /* Location of IV. IV length is from
+ * the session.
+ */
+ int crp_payload_start; /* Location of ciphertext. */
+ int crp_payload_length;
+ int crp_digest_start; /* Location of MAC/tag. Length is
+ * from the session.
+ */
+
+ uint8_t crp_iv[EALG_MAX_BLOCK_LEN]; /* IV if IV_SEPARATE. */
+
+ const void *crp_cipher_key; /* New cipher key if non-NULL. */
+ const void *crp_auth_key; /* New auth key if non-NULL. */
+
+ void *crp_opaque; /* Opaque pointer, passed along */
int (*crp_callback)(struct cryptop *); /* Callback function */
@@ -485,11 +501,18 @@ struct cryptop {
(crp)->crp_flags & CRYPTO_F_ASYNC_KEEPORDER)
#define CRYPTO_BUF_CONTIG 0x0
-#define CRYPTO_BUF_IOV 0x1
+#define CRYPTO_BUF_UIO 0x1
#define CRYPTO_BUF_MBUF 0x2
-#define CRYPTO_OP_DECRYPT 0x0
-#define CRYPTO_OP_ENCRYPT 0x1
+/* Flags in crp_op. */
+#define CRYPTO_OP_DECRYPT 0x0
+#define CRYPTO_OP_ENCRYPT 0x1
+#define CRYPTO_OP_IS_ENCRYPT(op) ((op) & CRYPTO_OP_ENCRYPT)
+#define CRYPTO_OP_COMPUTE_DIGEST 0x0
+#define CRYPTO_OP_VERIFY_DIGEST 0x2
+#define CRYPTO_OP_DECOMPRESS CRYPTO_OP_DECRYPT
+#define CRYPTO_OP_COMPRESS CRYPTO_OP_ENCRYPT
+#define CRYPTO_OP_IS_COMPRESS(op) ((op) & CRYPTO_OP_COMPRESS)
/*
* Hints passed to process methods.
@@ -504,18 +527,24 @@ struct cryptkop {
u_short krp_iparams; /* # of input parameters */
u_short krp_oparams; /* # of output parameters */
u_int krp_crid; /* desired device, etc. */
- u_int32_t krp_hid;
+ uint32_t krp_hid; /* device used */
struct crparam krp_param[CRK_MAXPARAM]; /* kvm */
- int (*krp_callback)(struct cryptkop *);
+ void (*krp_callback)(struct cryptkop *);
+ struct cryptocap *krp_cap;
};
uint32_t crypto_ses2hid(crypto_session_t crypto_session);
uint32_t crypto_ses2caps(crypto_session_t crypto_session);
void *crypto_get_driver_session(crypto_session_t crypto_session);
+const struct crypto_session_params *crypto_get_params(
+ crypto_session_t crypto_session);
+struct auth_hash *crypto_auth_hash(const struct crypto_session_params *csp);
+struct enc_xform *crypto_cipher(const struct crypto_session_params *csp);
MALLOC_DECLARE(M_CRYPTO_DATA);
-extern int crypto_newsession(crypto_session_t *cses, struct cryptoini *cri, int hard);
+extern int crypto_newsession(crypto_session_t *cses,
+ const struct crypto_session_params *params, int hard);
extern void crypto_freesession(crypto_session_t cses);
#define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE
#define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE
@@ -525,10 +554,7 @@ extern int32_t crypto_get_driverid(device_t dev, size_t session_size,
extern int crypto_find_driver(const char *);
extern device_t crypto_find_device_byhid(int hid);
extern int crypto_getcaps(int hid);
-extern int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
- u_int32_t flags);
extern int crypto_kregister(u_int32_t, int, u_int32_t);
-extern int crypto_unregister(u_int32_t driverid, int alg);
extern int crypto_unregister_all(u_int32_t driverid);
extern int crypto_dispatch(struct cryptop *crp);
extern int crypto_kdispatch(struct cryptkop *);
@@ -540,17 +566,30 @@ extern void crypto_kdone(struct cryptkop *);
extern int crypto_getfeat(int *);
extern void crypto_freereq(struct cryptop *crp);
-extern struct cryptop *crypto_getreq(int num);
+extern struct cryptop *crypto_getreq(crypto_session_t cses, int how);
extern int crypto_usercrypto; /* userland may do crypto requests */
extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */
extern int crypto_devallowsoft; /* only use hardware crypto */
+/* Helper routines for drivers to initialize auth contexts for HMAC. */
+struct auth_hash;
+
+void hmac_init_ipad(struct auth_hash *axf, const char *key, int klen,
+ void *auth_ctx);
+void hmac_init_opad(struct auth_hash *axf, const char *key, int klen,
+ void *auth_ctx);
+
/*
* Crypto-related utility routines used mainly by drivers.
*
* XXX these don't really belong here; but for now they're
* kept apart from the rest of the system.
+ *
+ * Similar to m_copyback/data, *_copyback copy data from the 'src'
+ * buffer into the crypto request's data buffer while *_copydata copy
+ * data from the crypto request's data buffer into the the 'dst'
+ * buffer.
*/
struct uio;
extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
@@ -564,14 +603,13 @@ struct iovec;
extern int crypto_mbuftoiov(struct mbuf *mbuf, struct iovec **iovptr,
int *cnt, int *allocated);
-extern void crypto_copyback(int flags, caddr_t buf, int off, int size,
- c_caddr_t in);
-extern void crypto_copydata(int flags, caddr_t buf, int off, int size,
- caddr_t out);
-extern int crypto_apply(int flags, caddr_t buf, int off, int len,
+void crypto_copyback(struct cryptop *crp, int off, int size,
+ const void *src);
+void crypto_copydata(struct cryptop *crp, int off, int size, void *dst);
+int crypto_apply(struct cryptop *crp, int off, int len,
int (*f)(void *, void *, u_int), void *arg);
-
-extern void *crypto_contiguous_subsegment(int, void *, size_t, size_t);
+void *crypto_contiguous_subsegment(struct cryptop *crp, size_t skip,
+ size_t len);
#endif /* _KERNEL */
#endif /* _CRYPTO_CRYPTO_H_ */
diff --git a/sys/opencrypto/cryptodev_if.m b/sys/opencrypto/cryptodev_if.m
index 49caa5536c06..1d767be3f4e4 100644
--- a/sys/opencrypto/cryptodev_if.m
+++ b/sys/opencrypto/cryptodev_if.m
@@ -40,32 +40,138 @@ CODE {
};
/**
- * Crypto driver method to initialize a new session object with the given
- * initialization parameters (cryptoini). The driver's session memory object
- * is already allocated and zeroed, like driver softcs. It is accessed with
+ * @brief Probe to see if a crypto driver supports a session.
+ *
+ * The crypto framework invokes this method on each crypto driver when
+ * creating a session for symmetric crypto operations to determine if
+ * the driver supports the algorithms and mode requested by the
+ * session.
+ *
+ * If the driver does not support a session with the requested
+ * parameters, this function should fail with an error.
+ *
+ * If the driver does support a session with the requested parameters,
+ * this function should return a negative value indicating the
+ * priority of this driver. These negative values should be derived
+ * from one of the CRYPTODEV_PROBE_* constants in
+ * <opencrypto/cryptodev.h>.
+ *
+ * This function's return value is similar to that used by
+ * DEVICE_PROBE(9). However, a return value of zero is not supported
+ * and should not be used.
+ *
+ * @param dev the crypto driver device
+ * @param csp crypto session parameters
+ *
+ * @retval negative if the driver supports this session - the
+ * least negative value is used to select the
+ * driver for the session
+ * @retval EINVAL if the driver does not support the session
+ * @retval positive if some other error occurs
+ */
+METHOD int probesession {
+ device_t dev;
+ const struct crypto_session_params *csp;
+};
+
+/**
+ * @brief Initialize a new crypto session object
+ *
+ * Invoked by the crypto framework to initialize driver-specific data
+ * for a crypto session. The framework allocates and zeroes the
+ * driver's per-session memory object prior to invoking this method.
+ * The driver is able to access it's per-session memory object via
* crypto_get_driver_session().
+ *
+ * @param dev the crypto driver device
+ * @param crypto_session session being initialized
+ * @param csp crypto session parameters
+ *
+ * @retval 0 success
+ * @retval non-zero if some kind of error occurred
*/
METHOD int newsession {
device_t dev;
crypto_session_t crypto_session;
- struct cryptoini *cri;
+ const struct crypto_session_params *csp;
};
/**
- * Optional crypto driver method to release any additional allocations. OCF
- * owns session memory itself; it is zeroed before release.
+ * @brief Destroy a crypto session object
+ *
+ * The crypto framework invokes this method when tearing down a crypto
+ * session. After this callback returns, the frame will explicitly
+ * zero and free the drvier's per-session memory object. If the
+ * driver requires additional actions to destroy a session, it should
+ * perform those in this method. If the driver does not require
+ * additional actions it does not need to provide an implementation of
+ * this method.
+ *
+ * @param dev the crypto driver device
+ * @param crypto_session session being destroyed
*/
METHOD void freesession {
device_t dev;
crypto_session_t crypto_session;
} DEFAULT null_freesession;
+/**
+ * @brief Perform a symmetric crypto operation
+ *
+ * The crypto framework invokes this method for each symmetric crypto
+ * operation performed on a session. A reference to the containing
+ * session is stored as a member of 'struct cryptop'. This routine
+ * should not block, but queue the operation if necessary.
+ *
+ * This method may return ERESTART to indicate that any internal
+ * queues are full so the operation should be queued in the crypto
+ * framework and retried in the future.
+ *
+ * To report errors with a crypto operation, 'crp_etype' should be set
+ * and the operation completed by calling 'crypto_done'. This method
+ * should then return zero.
+ *
+ * @param dev the crypto driver device
+ * @param op crypto operation to perform
+ * @param flags set to CRYPTO_HINT_MORE if additional symmetric
+ * crypto operations are queued for this driver;
+ * otherwise set to zero.
+ *
+ * @retval 0 success
+ * @retval ERESTART internal queue is full
+ */
METHOD int process {
device_t dev;
struct cryptop *op;
int flags;
};
+/**
+ * @brief Perform an asymmetric crypto operation
+ *
+ * The crypto framework invokes this method for each asymmetric crypto
+ * operation. Each asymmetric crypto operation should be
+ * self-contained and is not assicated with any persistent session.
+ * This routine should not block, but queue the operation if
+ * necessary.
+ *
+ * This method may return ERESTART to indicate that any internal
+ * queues are full so the operation should be queued in the crypto
+ * framework and retried in the future.
+ *
+ * To report errors with a crypto operation, 'krp_status' should be set
+ * and the operation completed by calling 'crypto_kdone'. This method
+ * should then return zero.
+ *
+ * @param dev the crypto driver device
+ * @param op crypto operation to perform
+ * @param flags set to CRYPTO_HINT_MORE if additional asymmetric
+ * crypto operations are queued for this driver;
+ * otherwise set to zero.
+ *
+ * @retval 0 success
+ * @retval ERESTART internal queue is full
+ */
METHOD int kprocess {
device_t dev;
struct cryptkop *op;
diff --git a/sys/opencrypto/cryptosoft.c b/sys/opencrypto/cryptosoft.c
index 2d064ffa15e8..e98f710a4e5b 100644
--- a/sys/opencrypto/cryptosoft.c
+++ b/sys/opencrypto/cryptosoft.c
@@ -55,36 +55,60 @@ __FBSDID("$FreeBSD$");
#include <sys/md5.h>
#include <opencrypto/cryptodev.h>
-#include <opencrypto/cryptosoft.h>
#include <opencrypto/xform.h>
#include <sys/kobj.h>
#include <sys/bus.h>
#include "cryptodev_if.h"
-_Static_assert(AES_CCM_IV_LEN == AES_GCM_IV_LEN,
- "AES_GCM_IV_LEN must currently be the same as AES_CCM_IV_LEN");
+struct swcr_auth {
+ void *sw_ictx;
+ void *sw_octx;
+ struct auth_hash *sw_axf;
+ uint16_t sw_mlen;
+ uint16_t sw_octx_len;
+};
-static int32_t swcr_id;
+struct swcr_encdec {
+ uint8_t *sw_kschedule;
+ struct enc_xform *sw_exf;
+};
+
+struct swcr_compdec {
+ struct comp_algo *sw_cxf;
+};
-u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
-u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
+struct swcr_session {
+ struct mtx swcr_lock;
+ int (*swcr_process)(struct swcr_session *, struct cryptop *);
+
+ struct swcr_auth swcr_auth;
+ struct swcr_encdec swcr_encdec;
+ struct swcr_compdec swcr_compdec;
+};
+
+static int32_t swcr_id;
-static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
-static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
-static int swcr_authenc(struct cryptop *crp);
-static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
static void swcr_freesession(device_t dev, crypto_session_t cses);
+/* Used for CRYPTO_NULL_CBC. */
+static int
+swcr_null(struct swcr_session *ses, struct cryptop *crp)
+{
+
+ return (0);
+}
+
/*
* Apply a symmetric encryption/decryption algorithm.
*/
static int
-swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
- int flags)
+swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
{
unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
+ const struct crypto_session_params *csp;
+ struct swcr_encdec *sw;
struct enc_xform *exf;
int i, j, k, blks, ind, count, ivlen;
struct uio *uio, uiolcl;
@@ -92,51 +116,39 @@ swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
struct iovec *iov;
int iovcnt, iovalloc;
int error;
+ bool encrypting;
error = 0;
+ sw = &ses->swcr_encdec;
exf = sw->sw_exf;
blks = exf->blocksize;
ivlen = exf->ivsize;
/* Check for non-padded data */
- if (crd->crd_len % blks)
+ if ((crp->crp_payload_length % blks) != 0)
return EINVAL;
- if (crd->crd_alg == CRYPTO_AES_ICM &&
- (crd->crd_flags & CRD_F_IV_EXPLICIT) == 0)
+ if (exf == &enc_xform_aes_icm &&
+ (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
return (EINVAL);
- /* Initialize the IV */
- if (crd->crd_flags & CRD_F_ENCRYPT) {
- /* IV explicitly provided ? */
- if (crd->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(crd->crd_iv, iv, ivlen);
- else
- arc4rand(iv, ivlen, 0);
-
- /* Do we need to write the IV */
- if (!(crd->crd_flags & CRD_F_IV_PRESENT))
- crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv);
-
- } else { /* Decryption */
- /* IV explicitly provided ? */
- if (crd->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(crd->crd_iv, iv, ivlen);
- else {
- /* Get IV off buf */
- crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv);
- }
- }
-
- if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
- int error;
+ /* IV explicitly provided ? */
+ if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ bcopy(crp->crp_iv, iv, ivlen);
+ else if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(iv, ivlen, 0);
+ crypto_copyback(crp, crp->crp_iv_start, ivlen, iv);
+ } else
+ crypto_copydata(crp, crp->crp_iv_start, ivlen, iv);
+ if (crp->crp_cipher_key != NULL) {
if (sw->sw_kschedule)
exf->zerokey(&(sw->sw_kschedule));
+ csp = crypto_get_params(crp->crp_session);
error = exf->setkey(&sw->sw_kschedule,
- crd->crd_key, crd->crd_klen / 8);
+ crp->crp_cipher_key, csp->csp_cipher_klen);
if (error)
return (error);
}
@@ -145,20 +157,24 @@ swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
iovcnt = nitems(iovlcl);
iovalloc = 0;
uio = &uiolcl;
- if ((flags & CRYPTO_F_IMBUF) != 0) {
- error = crypto_mbuftoiov((struct mbuf *)buf, &iov, &iovcnt,
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ error = crypto_mbuftoiov(crp->crp_mbuf, &iov, &iovcnt,
&iovalloc);
if (error)
return (error);
uio->uio_iov = iov;
uio->uio_iovcnt = iovcnt;
- } else if ((flags & CRYPTO_F_IOV) != 0)
- uio = (struct uio *)buf;
- else {
- iov[0].iov_base = buf;
- iov[0].iov_len = crd->crd_skip + crd->crd_len;
+ break;
+ case CRYPTO_BUF_UIO:
+ uio = crp->crp_uio;
+ break;
+ case CRYPTO_BUF_CONTIG:
+ iov[0].iov_base = crp->crp_buf;
+ iov[0].iov_len = crp->crp_ilen;
uio->uio_iov = iov;
uio->uio_iovcnt = 1;
+ break;
}
ivp = iv;
@@ -171,14 +187,15 @@ swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
exf->reinit(sw->sw_kschedule, iv);
}
- count = crd->crd_skip;
+ count = crp->crp_payload_start;
ind = cuio_getptr(uio, count, &k);
if (ind == -1) {
error = EINVAL;
goto out;
}
- i = crd->crd_len;
+ i = crp->crp_payload_length;
+ encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
while (i > 0) {
/*
@@ -191,14 +208,14 @@ swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
/* Actual encryption/decryption */
if (exf->reinit) {
- if (crd->crd_flags & CRD_F_ENCRYPT) {
+ if (encrypting) {
exf->encrypt(sw->sw_kschedule,
blk);
} else {
exf->decrypt(sw->sw_kschedule,
blk);
}
- } else if (crd->crd_flags & CRD_F_ENCRYPT) {
+ } else if (encrypting) {
/* XOR with previous block */
for (j = 0; j < blks; j++)
blk[j] ^= ivp[j];
@@ -257,11 +274,10 @@ swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
idat = (uint8_t *)uio->uio_iov[ind].iov_base + k;
if (exf->reinit) {
- if ((crd->crd_flags & CRD_F_ENCRYPT) != 0 &&
- exf->encrypt_multi == NULL)
+ if (encrypting && exf->encrypt_multi == NULL)
exf->encrypt(sw->sw_kschedule,
idat);
- else if ((crd->crd_flags & CRD_F_ENCRYPT) != 0) {
+ else if (encrypting) {
nb = rounddown(rem, blks);
exf->encrypt_multi(sw->sw_kschedule,
idat, nb);
@@ -273,7 +289,7 @@ swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
exf->decrypt_multi(sw->sw_kschedule,
idat, nb);
}
- } else if (crd->crd_flags & CRD_F_ENCRYPT) {
+ } else if (encrypting) {
/* XOR with previous block/IV */
for (j = 0; j < blks; j++)
idat[j] ^= ivp[j];
@@ -325,13 +341,10 @@ out:
return (error);
}
-static int __result_use_check
-swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
- int klen)
+static void
+swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
+ const uint8_t *key, int klen)
{
- int k;
-
- klen /= 8;
switch (axf->type) {
case CRYPTO_MD5_HMAC:
@@ -342,22 +355,8 @@ swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
case CRYPTO_SHA2_512_HMAC:
case CRYPTO_NULL_HMAC:
case CRYPTO_RIPEMD160_HMAC:
- for (k = 0; k < klen; k++)
- key[k] ^= HMAC_IPAD_VAL;
-
- axf->Init(sw->sw_ictx);
- axf->Update(sw->sw_ictx, key, klen);
- axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
-
- for (k = 0; k < klen; k++)
- key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
-
- axf->Init(sw->sw_octx);
- axf->Update(sw->sw_octx, key, klen);
- axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
-
- for (k = 0; k < klen; k++)
- key[k] ^= HMAC_OPAD_VAL;
+ hmac_init_ipad(axf, key, klen, sw->sw_ictx);
+ hmac_init_opad(axf, key, klen, sw->sw_octx);
break;
case CRYPTO_MD5_KPDK:
case CRYPTO_SHA1_KPDK:
@@ -374,7 +373,6 @@ swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
*/
u_char buf[SHA1_RESULTLEN];
- sw->sw_klen = klen;
bcopy(key, sw->sw_octx, klen);
axf->Init(sw->sw_ictx);
axf->Update(sw->sw_ictx, key, klen);
@@ -382,55 +380,53 @@ swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
break;
}
case CRYPTO_POLY1305:
- if (klen != POLY1305_KEY_LEN) {
- CRYPTDEB("bad poly1305 key size %d", klen);
- return EINVAL;
- }
- /* FALLTHROUGH */
case CRYPTO_BLAKE2B:
case CRYPTO_BLAKE2S:
axf->Setkey(sw->sw_ictx, key, klen);
axf->Init(sw->sw_ictx);
break;
default:
- printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
- "doesn't use keys.\n", __func__, axf->type);
- return EINVAL;
+ panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
}
- return 0;
}
/*
- * Compute keyed-hash authenticator.
+ * Compute or verify hash.
*/
static int
-swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
- int flags)
+swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
{
- unsigned char aalg[HASH_MAX_LEN];
+ u_char aalg[HASH_MAX_LEN];
+ u_char uaalg[HASH_MAX_LEN];
+ const struct crypto_session_params *csp;
+ struct swcr_auth *sw;
struct auth_hash *axf;
union authctx ctx;
int err;
- if (sw->sw_ictx == 0)
- return EINVAL;
+ sw = &ses->swcr_auth;
axf = sw->sw_axf;
- if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
- err = swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
- if (err != 0)
- return err;
+ if (crp->crp_auth_key != NULL) {
+ csp = crypto_get_params(crp->crp_session);
+ swcr_authprepare(axf, sw, crp->crp_auth_key,
+ csp->csp_auth_klen);
}
bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
- err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
- (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
+ err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
+ (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
+ if (err)
+ return err;
+
+ err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length,
+ (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
if (err)
return err;
- switch (sw->sw_alg) {
+ switch (axf->type) {
case CRYPTO_SHA1:
case CRYPTO_SHA2_224:
case CRYPTO_SHA2_256:
@@ -468,7 +464,7 @@ swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
* and let Final() do the proper, natural "algofill"
* padding.
*/
- axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
+ axf->Update(&ctx, sw->sw_octx, sw->sw_octx_len);
axf->Final(aalg, &ctx);
break;
@@ -480,20 +476,22 @@ swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
break;
}
- /* Inject the authentication data */
- crypto_copyback(flags, buf, crd->crd_inject,
- sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
- return 0;
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
+ if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
+ return (EBADMSG);
+ } else {
+ /* Inject the authentication data */
+ crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
+ }
+ return (0);
}
CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */
CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */
-/*
- * Apply a combined encryption-authentication transformation
- */
static int
-swcr_authenc(struct cryptop *crp)
+swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
{
uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
u_char *blk = (u_char *)blkbuf;
@@ -501,288 +499,403 @@ swcr_authenc(struct cryptop *crp)
u_char uaalg[AALG_MAX_RESULT_LEN];
u_char iv[EALG_MAX_BLOCK_LEN];
union authctx ctx;
- struct swcr_session *ses;
- struct cryptodesc *crd, *crda = NULL, *crde = NULL;
- struct swcr_data *sw, *swa, *swe = NULL;
- struct auth_hash *axf = NULL;
- struct enc_xform *exf = NULL;
- caddr_t buf = (caddr_t)crp->crp_buf;
+ struct swcr_auth *swa;
+ struct auth_hash *axf;
uint32_t *blkp;
- int aadlen, blksz, i, ivlen, len, iskip, oskip, r;
- int isccm = 0;
+ int blksz, i, ivlen, len;
- ivlen = blksz = iskip = oskip = 0;
+ swa = &ses->swcr_auth;
+ axf = swa->sw_axf;
- ses = crypto_get_driver_session(crp->crp_session);
+ bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
+ blksz = axf->blocksize;
- for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
- for (i = 0; i < nitems(ses->swcr_algorithms) &&
- ses->swcr_algorithms[i].sw_alg != crd->crd_alg; i++)
- ;
- if (i == nitems(ses->swcr_algorithms))
- return (EINVAL);
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE)
+ return (EINVAL);
- sw = &ses->swcr_algorithms[i];
- switch (sw->sw_alg) {
- case CRYPTO_AES_CCM_16:
- case CRYPTO_AES_NIST_GCM_16:
- case CRYPTO_AES_NIST_GMAC:
- swe = sw;
- crde = crd;
- exf = swe->sw_exf;
- /* AES_CCM_IV_LEN and AES_GCM_IV_LEN are both 12 */
- ivlen = AES_CCM_IV_LEN;
- break;
- case CRYPTO_AES_CCM_CBC_MAC:
- isccm = 1;
- /* FALLTHROUGH */
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- swa = sw;
- crda = crd;
- axf = swa->sw_axf;
- if (swa->sw_ictx == 0)
- return (EINVAL);
- bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
- blksz = axf->blocksize;
- break;
- default:
- return (EINVAL);
- }
+ /* Initialize the IV */
+ ivlen = AES_GCM_IV_LEN;
+ if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ bcopy(crp->crp_iv, iv, ivlen);
+ else
+ crypto_copydata(crp, crp->crp_iv_start, ivlen, iv);
+
+ axf->Reinit(&ctx, iv, ivlen);
+ for (i = 0; i < crp->crp_payload_length; i += blksz) {
+ len = MIN(crp->crp_payload_length - i, blksz);
+ crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
+ bzero(blk + len, blksz - len);
+ axf->Update(&ctx, blk, blksz);
+ }
+
+ /* length block */
+ bzero(blk, blksz);
+ blkp = (uint32_t *)blk + 1;
+ *blkp = htobe32(crp->crp_payload_length * 8);
+ axf->Update(&ctx, blk, blksz);
+
+ /* Finalize MAC */
+ axf->Final(aalg, &ctx);
+
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
+ uaalg);
+ if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
+ return (EBADMSG);
+ } else {
+ /* Inject the authentication data */
+ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
}
- if (crde == NULL || crda == NULL)
+ return (0);
+}
+
+static int
+swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
+{
+ uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
+ u_char *blk = (u_char *)blkbuf;
+ u_char aalg[AALG_MAX_RESULT_LEN];
+ u_char uaalg[AALG_MAX_RESULT_LEN];
+ u_char iv[EALG_MAX_BLOCK_LEN];
+ union authctx ctx;
+ struct swcr_auth *swa;
+ struct swcr_encdec *swe;
+ struct auth_hash *axf;
+ struct enc_xform *exf;
+ uint32_t *blkp;
+ int blksz, i, ivlen, len, r;
+
+ swa = &ses->swcr_auth;
+ axf = swa->sw_axf;
+
+ bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
+ blksz = axf->blocksize;
+
+ swe = &ses->swcr_encdec;
+ exf = swe->sw_exf;
+
+ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
return (EINVAL);
- /*
- * We need to make sure that the auth algorithm matches the
- * encr algorithm. Specifically, for AES-GCM must go with
- * AES NIST GMAC, and AES-CCM must go with CBC-MAC.
- */
- if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16) {
- switch (crda->crd_alg) {
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- break; /* Good! */
- default:
- return (EINVAL); /* Not good! */
+
+ /* Initialize the IV */
+ ivlen = AES_GCM_IV_LEN;
+ bcopy(crp->crp_iv, iv, ivlen);
+
+ /* Supply MAC with IV */
+ axf->Reinit(&ctx, iv, ivlen);
+
+ /* Supply MAC with AAD */
+ for (i = 0; i < crp->crp_aad_length; i += blksz) {
+ len = MIN(crp->crp_aad_length - i, blksz);
+ crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
+ bzero(blk + len, blksz - len);
+ axf->Update(&ctx, blk, blksz);
+ }
+
+ exf->reinit(swe->sw_kschedule, iv);
+
+ /* Do encryption with MAC */
+ for (i = 0; i < crp->crp_payload_length; i += len) {
+ len = MIN(crp->crp_payload_length - i, blksz);
+ if (len < blksz)
+ bzero(blk, blksz);
+ crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ exf->encrypt(swe->sw_kschedule, blk);
+ axf->Update(&ctx, blk, len);
+ crypto_copyback(crp, crp->crp_payload_start + i, len,
+ blk);
+ } else {
+ axf->Update(&ctx, blk, len);
}
- } else if (crde->crd_alg == CRYPTO_AES_CCM_16 &&
- crda->crd_alg != CRYPTO_AES_CCM_CBC_MAC)
- return (EINVAL);
+ }
- if ((crde->crd_alg == CRYPTO_AES_NIST_GCM_16 ||
- crde->crd_alg == CRYPTO_AES_CCM_16) &&
- (crde->crd_flags & CRD_F_IV_EXPLICIT) == 0)
- return (EINVAL);
+ /* length block */
+ bzero(blk, blksz);
+ blkp = (uint32_t *)blk + 1;
+ *blkp = htobe32(crp->crp_aad_length * 8);
+ blkp = (uint32_t *)blk + 3;
+ *blkp = htobe32(crp->crp_payload_length * 8);
+ axf->Update(&ctx, blk, blksz);
+
+ /* Finalize MAC */
+ axf->Final(aalg, &ctx);
+
+ /* Validate tag */
+ if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
+ uaalg);
+
+ r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
+ if (r != 0)
+ return (EBADMSG);
+
+ /* tag matches, decrypt data */
+ for (i = 0; i < crp->crp_payload_length; i += blksz) {
+ len = MIN(crp->crp_payload_length - i, blksz);
+ if (len < blksz)
+ bzero(blk, blksz);
+ crypto_copydata(crp, crp->crp_payload_start + i, len,
+ blk);
+ exf->decrypt(swe->sw_kschedule, blk);
+ crypto_copyback(crp, crp->crp_payload_start + i, len,
+ blk);
+ }
+ } else {
+ /* Inject the authentication data */
+ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
+ aalg);
+ }
+
+ return (0);
+}
+
+static int
+swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
+{
+ uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
+ u_char *blk = (u_char *)blkbuf;
+ u_char aalg[AALG_MAX_RESULT_LEN];
+ u_char uaalg[AALG_MAX_RESULT_LEN];
+ u_char iv[EALG_MAX_BLOCK_LEN];
+ union authctx ctx;
+ struct swcr_auth *swa;
+ struct auth_hash *axf;
+ int blksz, i, ivlen, len;
- if (crde->crd_klen != crda->crd_klen)
+ swa = &ses->swcr_auth;
+ axf = swa->sw_axf;
+
+ bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
+ blksz = axf->blocksize;
+
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE)
return (EINVAL);
/* Initialize the IV */
- if (crde->crd_flags & CRD_F_ENCRYPT) {
- /* IV explicitly provided ? */
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(crde->crd_iv, iv, ivlen);
- else
- arc4rand(iv, ivlen, 0);
-
- /* Do we need to write the IV */
- if (!(crde->crd_flags & CRD_F_IV_PRESENT))
- crypto_copyback(crp->crp_flags, buf, crde->crd_inject,
- ivlen, iv);
-
- } else { /* Decryption */
- /* IV explicitly provided ? */
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(crde->crd_iv, iv, ivlen);
- else {
- /* Get IV off buf */
- crypto_copydata(crp->crp_flags, buf, crde->crd_inject,
- ivlen, iv);
- }
+ ivlen = AES_CCM_IV_LEN;
+ if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ bcopy(crp->crp_iv, iv, ivlen);
+ else
+ crypto_copydata(crp, crp->crp_iv_start, ivlen, iv);
+
+ /*
+ * AES CCM-CBC-MAC needs to know the length of both the auth
+ * data and payload data before doing the auth computation.
+ */
+ ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length;
+ ctx.aes_cbc_mac_ctx.cryptDataLength = 0;
+
+ axf->Reinit(&ctx, iv, ivlen);
+ for (i = 0; i < crp->crp_payload_length; i += blksz) {
+ len = MIN(crp->crp_payload_length - i, blksz);
+ crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
+ bzero(blk + len, blksz - len);
+ axf->Update(&ctx, blk, blksz);
}
- if (swa->sw_alg == CRYPTO_AES_CCM_CBC_MAC) {
- /*
- * AES CCM-CBC needs to know the length of
- * both the auth data, and payload data, before
- * doing the auth computation.
- */
- ctx.aes_cbc_mac_ctx.authDataLength = crda->crd_len;
- ctx.aes_cbc_mac_ctx.cryptDataLength = crde->crd_len;
+ /* Finalize MAC */
+ axf->Final(aalg, &ctx);
+
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
+ uaalg);
+ if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
+ return (EBADMSG);
+ } else {
+ /* Inject the authentication data */
+ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
}
+ return (0);
+}
+
+static int
+swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
+{
+ uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
+ u_char *blk = (u_char *)blkbuf;
+ u_char aalg[AALG_MAX_RESULT_LEN];
+ u_char uaalg[AALG_MAX_RESULT_LEN];
+ u_char iv[EALG_MAX_BLOCK_LEN];
+ union authctx ctx;
+ struct swcr_auth *swa;
+ struct swcr_encdec *swe;
+ struct auth_hash *axf;
+ struct enc_xform *exf;
+ int blksz, i, ivlen, len, r;
+
+ swa = &ses->swcr_auth;
+ axf = swa->sw_axf;
+
+ bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
+ blksz = axf->blocksize;
+
+ swe = &ses->swcr_encdec;
+ exf = swe->sw_exf;
+
+ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
+ return (EINVAL);
+
+ /* Initialize the IV */
+ ivlen = AES_CCM_IV_LEN;
+ bcopy(crp->crp_iv, iv, ivlen);
+
+ /*
+ * AES CCM-CBC-MAC needs to know the length of both the auth
+ * data and payload data before doing the auth computation.
+ */
+ ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
+ ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
+
/* Supply MAC with IV */
- if (axf->Reinit)
- axf->Reinit(&ctx, iv, ivlen);
+ axf->Reinit(&ctx, iv, ivlen);
/* Supply MAC with AAD */
- aadlen = crda->crd_len;
-
- for (i = iskip; i < crda->crd_len; i += blksz) {
- len = MIN(crda->crd_len - i, blksz - oskip);
- crypto_copydata(crp->crp_flags, buf, crda->crd_skip + i, len,
- blk + oskip);
- bzero(blk + len + oskip, blksz - len - oskip);
+ for (i = 0; i < crp->crp_aad_length; i += blksz) {
+ len = MIN(crp->crp_aad_length - i, blksz);
+ crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
+ bzero(blk + len, blksz - len);
axf->Update(&ctx, blk, blksz);
- oskip = 0; /* reset initial output offset */
}
- if (exf->reinit)
- exf->reinit(swe->sw_kschedule, iv);
+ exf->reinit(swe->sw_kschedule, iv);
/* Do encryption/decryption with MAC */
- for (i = 0; i < crde->crd_len; i += len) {
- if (exf->encrypt_multi != NULL) {
- len = rounddown(crde->crd_len - i, blksz);
- if (len == 0)
- len = blksz;
- else
- len = MIN(len, sizeof(blkbuf));
- } else
- len = blksz;
- len = MIN(crde->crd_len - i, len);
+ for (i = 0; i < crp->crp_payload_length; i += len) {
+ len = MIN(crp->crp_payload_length - i, blksz);
if (len < blksz)
bzero(blk, blksz);
- crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len,
- blk);
- /*
- * One of the problems with CCM+CBC is that the authentication
- * is done on the unecncrypted data. As a result, we have
- * to do the authentication update at different times,
- * depending on whether it's CCM or not.
- */
- if (crde->crd_flags & CRD_F_ENCRYPT) {
- if (isccm)
- axf->Update(&ctx, blk, len);
- if (exf->encrypt_multi != NULL)
- exf->encrypt_multi(swe->sw_kschedule, blk,
- len);
- else
- exf->encrypt(swe->sw_kschedule, blk);
- if (!isccm)
- axf->Update(&ctx, blk, len);
- crypto_copyback(crp->crp_flags, buf,
- crde->crd_skip + i, len, blk);
+ crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ axf->Update(&ctx, blk, len);
+ exf->encrypt(swe->sw_kschedule, blk);
+ crypto_copyback(crp, crp->crp_payload_start + i, len,
+ blk);
} else {
- if (isccm) {
- KASSERT(exf->encrypt_multi == NULL,
- ("assume CCM is single-block only"));
- exf->decrypt(swe->sw_kschedule, blk);
- }
+ /*
+ * One of the problems with CCM+CBC is that
+ * the authentication is done on the
+ * unecncrypted data. As a result, we have to
+ * decrypt the data twice: once to generate
+ * the tag and a second time after the tag is
+ * verified.
+ */
+ exf->decrypt(swe->sw_kschedule, blk);
axf->Update(&ctx, blk, len);
}
}
- /* Do any required special finalization */
- switch (crda->crd_alg) {
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- /* length block */
- bzero(blk, blksz);
- blkp = (uint32_t *)blk + 1;
- *blkp = htobe32(aadlen * 8);
- blkp = (uint32_t *)blk + 3;
- *blkp = htobe32(crde->crd_len * 8);
- axf->Update(&ctx, blk, blksz);
- break;
- }
-
/* Finalize MAC */
axf->Final(aalg, &ctx);
/* Validate tag */
- if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
- crypto_copydata(crp->crp_flags, buf, crda->crd_inject,
- axf->hashsize, uaalg);
-
- r = timingsafe_bcmp(aalg, uaalg, axf->hashsize);
- if (r == 0) {
- /* tag matches, decrypt data */
- if (isccm) {
- KASSERT(exf->reinit != NULL,
- ("AES-CCM reinit function must be set"));
- exf->reinit(swe->sw_kschedule, iv);
- }
- for (i = 0; i < crde->crd_len; i += blksz) {
- len = MIN(crde->crd_len - i, blksz);
- if (len < blksz)
- bzero(blk, blksz);
- crypto_copydata(crp->crp_flags, buf,
- crde->crd_skip + i, len, blk);
- exf->decrypt(swe->sw_kschedule, blk);
- crypto_copyback(crp->crp_flags, buf,
- crde->crd_skip + i, len, blk);
- }
- } else
+ if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
+ uaalg);
+
+ r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
+ if (r != 0)
return (EBADMSG);
+
+ /* tag matches, decrypt data */
+ exf->reinit(swe->sw_kschedule, iv);
+ for (i = 0; i < crp->crp_payload_length; i += blksz) {
+ len = MIN(crp->crp_payload_length - i, blksz);
+ if (len < blksz)
+ bzero(blk, blksz);
+ crypto_copydata(crp, crp->crp_payload_start + i, len,
+ blk);
+ exf->decrypt(swe->sw_kschedule, blk);
+ crypto_copyback(crp, crp->crp_payload_start + i, len,
+ blk);
+ }
} else {
/* Inject the authentication data */
- crypto_copyback(crp->crp_flags, buf, crda->crd_inject,
- axf->hashsize, aalg);
+ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
+ aalg);
}
return (0);
}
/*
+ * Apply a cipher and a digest to perform EtA.
+ */
+static int
+swcr_eta(struct swcr_session *ses, struct cryptop *crp)
+{
+ int error;
+
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ error = swcr_encdec(ses, crp);
+ if (error == 0)
+ error = swcr_authcompute(ses, crp);
+ } else {
+ error = swcr_authcompute(ses, crp);
+ if (error == 0)
+ error = swcr_encdec(ses, crp);
+ }
+ return (error);
+}
+
+/*
* Apply a compression/decompression algorithm
*/
static int
-swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
- caddr_t buf, int flags)
+swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
{
u_int8_t *data, *out;
struct comp_algo *cxf;
int adj;
u_int32_t result;
- cxf = sw->sw_cxf;
+ cxf = ses->swcr_compdec.sw_cxf;
/* We must handle the whole buffer of data in one time
* then if there is not all the data in the mbuf, we must
* copy in a buffer.
*/
- data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
+ data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT);
if (data == NULL)
return (EINVAL);
- crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
+ crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
+ data);
- if (crd->crd_flags & CRD_F_COMP)
- result = cxf->compress(data, crd->crd_len, &out);
+ if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
+ result = cxf->compress(data, crp->crp_payload_length, &out);
else
- result = cxf->decompress(data, crd->crd_len, &out);
+ result = cxf->decompress(data, crp->crp_payload_length, &out);
free(data, M_CRYPTO_DATA);
if (result == 0)
- return EINVAL;
+ return (EINVAL);
+ crp->crp_olen = result;
- /* Copy back the (de)compressed data. m_copyback is
- * extending the mbuf as necessary.
- */
- sw->sw_size = result;
/* Check the compressed size when doing compression */
- if (crd->crd_flags & CRD_F_COMP) {
- if (result >= crd->crd_len) {
+ if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
+ if (result >= crp->crp_payload_length) {
/* Compression was useless, we lost time */
free(out, M_CRYPTO_DATA);
- return 0;
+ return (0);
}
}
- crypto_copyback(flags, buf, crd->crd_skip, result, out);
- if (result < crd->crd_len) {
- adj = result - crd->crd_len;
- if (flags & CRYPTO_F_IMBUF) {
- adj = result - crd->crd_len;
- m_adj((struct mbuf *)buf, adj);
- } else if (flags & CRYPTO_F_IOV) {
- struct uio *uio = (struct uio *)buf;
+ /* Copy back the (de)compressed data. m_copyback is
+ * extending the mbuf as necessary.
+ */
+ crypto_copyback(crp, crp->crp_payload_start, result, out);
+ if (result < crp->crp_payload_length) {
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ adj = result - crp->crp_payload_length;
+ m_adj(crp->crp_mbuf, adj);
+ break;
+ case CRYPTO_BUF_UIO: {
+ struct uio *uio = crp->crp_uio;
int ind;
- adj = crd->crd_len - result;
+ adj = crp->crp_payload_length - result;
ind = uio->uio_iovcnt - 1;
while (adj > 0 && ind >= 0) {
@@ -796,512 +909,542 @@ swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
ind--;
uio->uio_iovcnt--;
}
+ }
+ break;
}
}
free(out, M_CRYPTO_DATA);
return 0;
}
-/*
- * Generate a new software session.
- */
static int
-swcr_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
+swcr_setup_encdec(struct swcr_session *ses,
+ const struct crypto_session_params *csp)
{
- struct swcr_session *ses;
- struct swcr_data *swd;
- struct auth_hash *axf;
+ struct swcr_encdec *swe;
struct enc_xform *txf;
- struct comp_algo *cxf;
- size_t i;
- int len;
int error;
- if (cses == NULL || cri == NULL)
- return EINVAL;
-
- ses = crypto_get_driver_session(cses);
- mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
+ swe = &ses->swcr_encdec;
+ txf = crypto_cipher(csp);
+ MPASS(txf->ivsize == csp->csp_ivlen);
+ if (csp->csp_cipher_key != NULL) {
+ error = txf->setkey(&swe->sw_kschedule,
+ csp->csp_cipher_key, csp->csp_cipher_klen);
+ if (error)
+ return (error);
+ }
+ swe->sw_exf = txf;
+ return (0);
+}
- for (i = 0; cri != NULL && i < nitems(ses->swcr_algorithms); i++) {
- swd = &ses->swcr_algorithms[i];
-
- switch (cri->cri_alg) {
- case CRYPTO_DES_CBC:
- txf = &enc_xform_des;
- goto enccommon;
- case CRYPTO_3DES_CBC:
- txf = &enc_xform_3des;
- goto enccommon;
- case CRYPTO_BLF_CBC:
- txf = &enc_xform_blf;
- goto enccommon;
- case CRYPTO_CAST_CBC:
- txf = &enc_xform_cast5;
- goto enccommon;
- case CRYPTO_SKIPJACK_CBC:
- txf = &enc_xform_skipjack;
- goto enccommon;
- case CRYPTO_RIJNDAEL128_CBC:
- txf = &enc_xform_rijndael128;
- goto enccommon;
- case CRYPTO_AES_XTS:
- txf = &enc_xform_aes_xts;
- goto enccommon;
- case CRYPTO_AES_ICM:
- txf = &enc_xform_aes_icm;
- goto enccommon;
- case CRYPTO_AES_NIST_GCM_16:
- txf = &enc_xform_aes_nist_gcm;
- goto enccommon;
- case CRYPTO_AES_CCM_16:
- txf = &enc_xform_ccm;
- goto enccommon;
- case CRYPTO_AES_NIST_GMAC:
- txf = &enc_xform_aes_nist_gmac;
- swd->sw_exf = txf;
- break;
- case CRYPTO_CAMELLIA_CBC:
- txf = &enc_xform_camellia;
- goto enccommon;
- case CRYPTO_NULL_CBC:
- txf = &enc_xform_null;
- goto enccommon;
- case CRYPTO_CHACHA20:
- txf = &enc_xform_chacha20;
- goto enccommon;
- enccommon:
- if (cri->cri_key != NULL) {
- error = txf->setkey(&swd->sw_kschedule,
- cri->cri_key, cri->cri_klen / 8);
- if (error) {
- swcr_freesession(dev, cses);
- return error;
- }
- }
- swd->sw_exf = txf;
- break;
-
- case CRYPTO_MD5_HMAC:
- axf = &auth_hash_hmac_md5;
- goto authcommon;
- case CRYPTO_SHA1_HMAC:
- axf = &auth_hash_hmac_sha1;
- goto authcommon;
- case CRYPTO_SHA2_224_HMAC:
- axf = &auth_hash_hmac_sha2_224;
- goto authcommon;
- case CRYPTO_SHA2_256_HMAC:
- axf = &auth_hash_hmac_sha2_256;
- goto authcommon;
- case CRYPTO_SHA2_384_HMAC:
- axf = &auth_hash_hmac_sha2_384;
- goto authcommon;
- case CRYPTO_SHA2_512_HMAC:
- axf = &auth_hash_hmac_sha2_512;
- goto authcommon;
- case CRYPTO_NULL_HMAC:
- axf = &auth_hash_null;
- goto authcommon;
- case CRYPTO_RIPEMD160_HMAC:
- axf = &auth_hash_hmac_ripemd_160;
- authcommon:
- swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
- M_NOWAIT);
- if (swd->sw_ictx == NULL) {
- swcr_freesession(dev, cses);
- return ENOBUFS;
- }
-
- swd->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
- M_NOWAIT);
- if (swd->sw_octx == NULL) {
- swcr_freesession(dev, cses);
- return ENOBUFS;
- }
+static int
+swcr_setup_auth(struct swcr_session *ses,
+ const struct crypto_session_params *csp)
+{
+ struct swcr_auth *swa;
+ struct auth_hash *axf;
- if (cri->cri_key != NULL) {
- error = swcr_authprepare(axf, swd,
- cri->cri_key, cri->cri_klen);
- if (error != 0) {
- swcr_freesession(dev, cses);
- return error;
- }
- }
+ swa = &ses->swcr_auth;
- swd->sw_mlen = cri->cri_mlen;
- swd->sw_axf = axf;
- break;
-
- case CRYPTO_MD5_KPDK:
- axf = &auth_hash_key_md5;
- goto auth2common;
-
- case CRYPTO_SHA1_KPDK:
- axf = &auth_hash_key_sha1;
- auth2common:
- swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
- M_NOWAIT);
- if (swd->sw_ictx == NULL) {
- swcr_freesession(dev, cses);
- return ENOBUFS;
- }
+ axf = crypto_auth_hash(csp);
+ swa->sw_axf = axf;
+ if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
+ return (EINVAL);
+ if (csp->csp_auth_mlen == 0)
+ swa->sw_mlen = axf->hashsize;
+ else
+ swa->sw_mlen = csp->csp_auth_mlen;
+ swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
+ if (swa->sw_ictx == NULL)
+ return (ENOBUFS);
- swd->sw_octx = malloc(cri->cri_klen / 8,
- M_CRYPTO_DATA, M_NOWAIT);
- if (swd->sw_octx == NULL) {
- swcr_freesession(dev, cses);
- return ENOBUFS;
- }
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_224_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_NULL_HMAC:
+ case CRYPTO_RIPEMD160_HMAC:
+ swa->sw_octx_len = axf->ctxsize;
+ swa->sw_octx = malloc(swa->sw_octx_len, M_CRYPTO_DATA,
+ M_NOWAIT);
+ if (swa->sw_octx == NULL)
+ return (ENOBUFS);
+
+ if (csp->csp_auth_key != NULL) {
+ swcr_authprepare(axf, swa, csp->csp_auth_key,
+ csp->csp_auth_klen);
+ }
- /* Store the key so we can "append" it to the payload */
- if (cri->cri_key != NULL) {
- error = swcr_authprepare(axf, swd,
- cri->cri_key, cri->cri_klen);
- if (error != 0) {
- swcr_freesession(dev, cses);
- return error;
- }
- }
+ if (csp->csp_mode == CSP_MODE_DIGEST)
+ ses->swcr_process = swcr_authcompute;
+ break;
+ case CRYPTO_MD5_KPDK:
+ case CRYPTO_SHA1_KPDK:
+ swa->sw_octx_len = csp->csp_auth_klen;
+ swa->sw_octx = malloc(swa->sw_octx_len, M_CRYPTO_DATA,
+ M_NOWAIT);
+ if (swa->sw_octx == NULL)
+ return (ENOBUFS);
+
+ /* Store the key so we can "append" it to the payload */
+ if (csp->csp_auth_key != NULL) {
+ swcr_authprepare(axf, swa, csp->csp_auth_key,
+ csp->csp_auth_klen);
+ }
- swd->sw_mlen = cri->cri_mlen;
- swd->sw_axf = axf;
- break;
+ if (csp->csp_mode == CSP_MODE_DIGEST)
+ ses->swcr_process = swcr_authcompute;
+ break;
#ifdef notdef
- case CRYPTO_MD5:
- axf = &auth_hash_md5;
- goto auth3common;
+ case CRYPTO_MD5:
#endif
+ case CRYPTO_SHA1:
+ case CRYPTO_SHA2_224:
+ case CRYPTO_SHA2_256:
+ case CRYPTO_SHA2_384:
+ case CRYPTO_SHA2_512:
+ axf->Init(swa->sw_ictx);
+ if (csp->csp_mode == CSP_MODE_DIGEST)
+ ses->swcr_process = swcr_authcompute;
+ break;
+ case CRYPTO_AES_NIST_GMAC:
+ axf->Init(swa->sw_ictx);
+ axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
+ csp->csp_auth_klen);
+ if (csp->csp_mode == CSP_MODE_DIGEST)
+ ses->swcr_process = swcr_gmac;
+ break;
+ case CRYPTO_POLY1305:
+ case CRYPTO_BLAKE2B:
+ case CRYPTO_BLAKE2S:
+ /*
+ * Blake2b and Blake2s support an optional key but do
+ * not require one.
+ */
+ if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
+ axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
+ csp->csp_auth_klen);
+ axf->Init(swa->sw_ictx);
+ if (csp->csp_mode == CSP_MODE_DIGEST)
+ ses->swcr_process = swcr_authcompute;
+ break;
+ case CRYPTO_AES_CCM_CBC_MAC:
+ axf->Init(swa->sw_ictx);
+ axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
+ csp->csp_auth_klen);
+ if (csp->csp_mode == CSP_MODE_DIGEST)
+ ses->swcr_process = swcr_ccm_cbc_mac;
+ break;
+ }
- case CRYPTO_SHA1:
- axf = &auth_hash_sha1;
- goto auth3common;
- case CRYPTO_SHA2_224:
- axf = &auth_hash_sha2_224;
- goto auth3common;
- case CRYPTO_SHA2_256:
- axf = &auth_hash_sha2_256;
- goto auth3common;
- case CRYPTO_SHA2_384:
- axf = &auth_hash_sha2_384;
- goto auth3common;
- case CRYPTO_SHA2_512:
- axf = &auth_hash_sha2_512;
-
- auth3common:
- swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
- M_NOWAIT);
- if (swd->sw_ictx == NULL) {
- swcr_freesession(dev, cses);
- return ENOBUFS;
- }
-
- axf->Init(swd->sw_ictx);
- swd->sw_mlen = cri->cri_mlen;
- swd->sw_axf = axf;
- break;
-
- case CRYPTO_AES_CCM_CBC_MAC:
- switch (cri->cri_klen) {
- case 128:
- axf = &auth_hash_ccm_cbc_mac_128;
- break;
- case 192:
- axf = &auth_hash_ccm_cbc_mac_192;
- break;
- case 256:
- axf = &auth_hash_ccm_cbc_mac_256;
- break;
- default:
- swcr_freesession(dev, cses);
- return EINVAL;
- }
- goto auth4common;
- case CRYPTO_AES_128_NIST_GMAC:
- axf = &auth_hash_nist_gmac_aes_128;
- goto auth4common;
-
- case CRYPTO_AES_192_NIST_GMAC:
- axf = &auth_hash_nist_gmac_aes_192;
- goto auth4common;
-
- case CRYPTO_AES_256_NIST_GMAC:
- axf = &auth_hash_nist_gmac_aes_256;
- auth4common:
- len = cri->cri_klen / 8;
- if (len != 16 && len != 24 && len != 32) {
- swcr_freesession(dev, cses);
- return EINVAL;
- }
+ return (0);
+}
- swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
- M_NOWAIT);
- if (swd->sw_ictx == NULL) {
- swcr_freesession(dev, cses);
- return ENOBUFS;
- }
- axf->Init(swd->sw_ictx);
- axf->Setkey(swd->sw_ictx, cri->cri_key, len);
- swd->sw_axf = axf;
- break;
+static int
+swcr_setup_gcm(struct swcr_session *ses,
+ const struct crypto_session_params *csp)
+{
+ struct swcr_encdec *swe;
+ struct swcr_auth *swa;
+ struct enc_xform *txf;
+ struct auth_hash *axf;
+ int error;
- case CRYPTO_BLAKE2B:
- axf = &auth_hash_blake2b;
- goto auth5common;
- case CRYPTO_BLAKE2S:
- axf = &auth_hash_blake2s;
- goto auth5common;
- case CRYPTO_POLY1305:
- axf = &auth_hash_poly1305;
- auth5common:
- swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
- M_NOWAIT);
- if (swd->sw_ictx == NULL) {
- swcr_freesession(dev, cses);
- return ENOBUFS;
- }
- axf->Setkey(swd->sw_ictx, cri->cri_key,
- cri->cri_klen / 8);
- axf->Init(swd->sw_ictx);
- swd->sw_axf = axf;
- break;
+ if (csp->csp_ivlen != AES_GCM_IV_LEN)
+ return (EINVAL);
- case CRYPTO_DEFLATE_COMP:
- cxf = &comp_algo_deflate;
- swd->sw_cxf = cxf;
- break;
- default:
- swcr_freesession(dev, cses);
- return EINVAL;
- }
-
- swd->sw_alg = cri->cri_alg;
- cri = cri->cri_next;
- ses->swcr_nalgs++;
+ /* First, setup the auth side. */
+ swa = &ses->swcr_auth;
+ switch (csp->csp_cipher_klen * 8) {
+ case 128:
+ axf = &auth_hash_nist_gmac_aes_128;
+ break;
+ case 192:
+ axf = &auth_hash_nist_gmac_aes_192;
+ break;
+ case 256:
+ axf = &auth_hash_nist_gmac_aes_256;
+ break;
+ default:
+ return (EINVAL);
}
-
- if (cri != NULL) {
- CRYPTDEB("Bogus session request for three or more algorithms");
- return EINVAL;
+ swa->sw_axf = axf;
+ if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
+ return (EINVAL);
+ if (csp->csp_auth_mlen == 0)
+ swa->sw_mlen = axf->hashsize;
+ else
+ swa->sw_mlen = csp->csp_auth_mlen;
+ swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
+ if (swa->sw_ictx == NULL)
+ return (ENOBUFS);
+ axf->Init(swa->sw_ictx);
+ if (csp->csp_cipher_key != NULL)
+ axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
+ csp->csp_cipher_klen);
+
+ /* Second, setup the cipher side. */
+ swe = &ses->swcr_encdec;
+ txf = &enc_xform_aes_nist_gcm;
+ if (csp->csp_cipher_key != NULL) {
+ error = txf->setkey(&swe->sw_kschedule,
+ csp->csp_cipher_key, csp->csp_cipher_klen);
+ if (error)
+ return (error);
}
- return 0;
+ swe->sw_exf = txf;
+
+ return (0);
}
-static void
-swcr_freesession(device_t dev, crypto_session_t cses)
+static int
+swcr_setup_ccm(struct swcr_session *ses,
+ const struct crypto_session_params *csp)
{
- struct swcr_session *ses;
- struct swcr_data *swd;
+ struct swcr_encdec *swe;
+ struct swcr_auth *swa;
struct enc_xform *txf;
struct auth_hash *axf;
- size_t i;
+ int error;
- ses = crypto_get_driver_session(cses);
+ if (csp->csp_ivlen != AES_CCM_IV_LEN)
+ return (EINVAL);
- mtx_destroy(&ses->swcr_lock);
- for (i = 0; i < nitems(ses->swcr_algorithms); i++) {
- swd = &ses->swcr_algorithms[i];
-
- switch (swd->sw_alg) {
- case CRYPTO_DES_CBC:
- case CRYPTO_3DES_CBC:
- case CRYPTO_BLF_CBC:
- case CRYPTO_CAST_CBC:
- case CRYPTO_SKIPJACK_CBC:
- case CRYPTO_RIJNDAEL128_CBC:
- case CRYPTO_AES_XTS:
- case CRYPTO_AES_ICM:
- case CRYPTO_AES_NIST_GCM_16:
- case CRYPTO_AES_NIST_GMAC:
- case CRYPTO_CAMELLIA_CBC:
- case CRYPTO_NULL_CBC:
- case CRYPTO_CHACHA20:
- case CRYPTO_AES_CCM_16:
- txf = swd->sw_exf;
+ /* First, setup the auth side. */
+ swa = &ses->swcr_auth;
+ switch (csp->csp_cipher_klen * 8) {
+ case 128:
+ axf = &auth_hash_ccm_cbc_mac_128;
+ break;
+ case 192:
+ axf = &auth_hash_ccm_cbc_mac_192;
+ break;
+ case 256:
+ axf = &auth_hash_ccm_cbc_mac_256;
+ break;
+ default:
+ return (EINVAL);
+ }
+ swa->sw_axf = axf;
+ if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
+ return (EINVAL);
+ if (csp->csp_auth_mlen == 0)
+ swa->sw_mlen = axf->hashsize;
+ else
+ swa->sw_mlen = csp->csp_auth_mlen;
+ swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
+ if (swa->sw_ictx == NULL)
+ return (ENOBUFS);
+ axf->Init(swa->sw_ictx);
+ if (csp->csp_cipher_key != NULL)
+ axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
+ csp->csp_cipher_klen);
+
+ /* Second, setup the cipher side. */
+ swe = &ses->swcr_encdec;
+ txf = &enc_xform_ccm;
+ if (csp->csp_cipher_key != NULL) {
+ error = txf->setkey(&swe->sw_kschedule,
+ csp->csp_cipher_key, csp->csp_cipher_klen);
+ if (error)
+ return (error);
+ }
+ swe->sw_exf = txf;
- if (swd->sw_kschedule)
- txf->zerokey(&(swd->sw_kschedule));
- break;
+ return (0);
+}
- case CRYPTO_MD5_HMAC:
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_SHA2_224_HMAC:
- case CRYPTO_SHA2_256_HMAC:
- case CRYPTO_SHA2_384_HMAC:
- case CRYPTO_SHA2_512_HMAC:
- case CRYPTO_RIPEMD160_HMAC:
- case CRYPTO_NULL_HMAC:
- case CRYPTO_AES_CCM_CBC_MAC:
- axf = swd->sw_axf;
+static bool
+swcr_auth_supported(const struct crypto_session_params *csp)
+{
+ struct auth_hash *axf;
- if (swd->sw_ictx) {
- bzero(swd->sw_ictx, axf->ctxsize);
- free(swd->sw_ictx, M_CRYPTO_DATA);
- }
- if (swd->sw_octx) {
- bzero(swd->sw_octx, axf->ctxsize);
- free(swd->sw_octx, M_CRYPTO_DATA);
- }
+ axf = crypto_auth_hash(csp);
+ if (axf == NULL)
+ return (false);
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_224_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_NULL_HMAC:
+ case CRYPTO_RIPEMD160_HMAC:
+ case CRYPTO_MD5_KPDK:
+ case CRYPTO_SHA1_KPDK:
+ break;
+ case CRYPTO_AES_NIST_GMAC:
+ switch (csp->csp_auth_klen * 8) {
+ case 128:
+ case 192:
+ case 256:
break;
+ default:
+ return (false);
+ }
+ if (csp->csp_auth_key == NULL)
+ return (false);
+ if (csp->csp_ivlen != AES_GCM_IV_LEN)
+ return (false);
+ break;
+ case CRYPTO_POLY1305:
+ if (csp->csp_auth_klen != POLY1305_KEY_LEN)
+ return (false);
+ break;
+ case CRYPTO_AES_CCM_CBC_MAC:
+ switch (csp->csp_auth_klen * 8) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ return (false);
+ }
+ if (csp->csp_auth_key == NULL)
+ return (false);
+ if (csp->csp_ivlen != AES_CCM_IV_LEN)
+ return (false);
+ break;
+ }
+ return (true);
+}
- case CRYPTO_MD5_KPDK:
- case CRYPTO_SHA1_KPDK:
- axf = swd->sw_axf;
+static bool
+swcr_cipher_supported(const struct crypto_session_params *csp)
+{
+ struct enc_xform *txf;
- if (swd->sw_ictx) {
- bzero(swd->sw_ictx, axf->ctxsize);
- free(swd->sw_ictx, M_CRYPTO_DATA);
- }
- if (swd->sw_octx) {
- bzero(swd->sw_octx, swd->sw_klen);
- free(swd->sw_octx, M_CRYPTO_DATA);
- }
- break;
+ txf = crypto_cipher(csp);
+ if (txf == NULL)
+ return (false);
+ if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
+ txf->ivsize != csp->csp_ivlen)
+ return (false);
+ return (true);
+}
- case CRYPTO_BLAKE2B:
- case CRYPTO_BLAKE2S:
- case CRYPTO_MD5:
- case CRYPTO_POLY1305:
- case CRYPTO_SHA1:
- case CRYPTO_SHA2_224:
- case CRYPTO_SHA2_256:
- case CRYPTO_SHA2_384:
- case CRYPTO_SHA2_512:
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- axf = swd->sw_axf;
-
- if (swd->sw_ictx) {
- explicit_bzero(swd->sw_ictx, axf->ctxsize);
- free(swd->sw_ictx, M_CRYPTO_DATA);
- }
- break;
+static int
+swcr_probesession(device_t dev, const struct crypto_session_params *csp)
+{
+ if (csp->csp_flags != 0)
+ return (EINVAL);
+ switch (csp->csp_mode) {
+ case CSP_MODE_COMPRESS:
+ switch (csp->csp_cipher_alg) {
case CRYPTO_DEFLATE_COMP:
- /* Nothing to do */
break;
+ default:
+ return (EINVAL);
+ }
+ break;
+ case CSP_MODE_CIPHER:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ case CRYPTO_AES_CCM_16:
+ return (EINVAL);
+ default:
+ if (!swcr_cipher_supported(csp))
+ return (EINVAL);
+ break;
+ }
+ break;
+ case CSP_MODE_DIGEST:
+ if (!swcr_auth_supported(csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_AEAD:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ case CRYPTO_AES_CCM_16:
+ break;
+ default:
+ return (EINVAL);
+ }
+ break;
+ case CSP_MODE_ETA:
+ /* AEAD algorithms cannot be used for EtA. */
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ case CRYPTO_AES_CCM_16:
+ return (EINVAL);
+ }
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_AES_NIST_GMAC:
+ case CRYPTO_AES_CCM_CBC_MAC:
+ return (EINVAL);
}
+
+ if (!swcr_cipher_supported(csp) ||
+ !swcr_auth_supported(csp))
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
}
+
+ return (CRYPTODEV_PROBE_SOFTWARE);
}
/*
- * Process a software request.
+ * Generate a new software session.
*/
static int
-swcr_process(device_t dev, struct cryptop *crp, int hint)
+swcr_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
{
- struct swcr_session *ses = NULL;
- struct cryptodesc *crd;
- struct swcr_data *sw;
- size_t i;
-
- /* Sanity check */
- if (crp == NULL)
- return EINVAL;
-
- if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
- crp->crp_etype = EINVAL;
- goto done;
- }
+ struct swcr_session *ses;
+ struct swcr_encdec *swe;
+ struct swcr_auth *swa;
+ struct comp_algo *cxf;
+ int error;
- ses = crypto_get_driver_session(crp->crp_session);
- mtx_lock(&ses->swcr_lock);
+ ses = crypto_get_driver_session(cses);
+ mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
- /* Go through crypto descriptors, processing as we go */
- for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
- /*
- * Find the crypto context.
- *
- * XXX Note that the logic here prevents us from having
- * XXX the same algorithm multiple times in a session
- * XXX (or rather, we can but it won't give us the right
- * XXX results). To do that, we'd need some way of differentiating
- * XXX between the various instances of an algorithm (so we can
- * XXX locate the correct crypto context).
- */
- for (i = 0; i < nitems(ses->swcr_algorithms) &&
- ses->swcr_algorithms[i].sw_alg != crd->crd_alg; i++)
- ;
-
- /* No such context ? */
- if (i == nitems(ses->swcr_algorithms)) {
- crp->crp_etype = EINVAL;
- goto done;
- }
- sw = &ses->swcr_algorithms[i];
- switch (sw->sw_alg) {
- case CRYPTO_DES_CBC:
- case CRYPTO_3DES_CBC:
- case CRYPTO_BLF_CBC:
- case CRYPTO_CAST_CBC:
- case CRYPTO_SKIPJACK_CBC:
- case CRYPTO_RIJNDAEL128_CBC:
- case CRYPTO_AES_XTS:
- case CRYPTO_AES_ICM:
- case CRYPTO_CAMELLIA_CBC:
- case CRYPTO_CHACHA20:
- if ((crp->crp_etype = swcr_encdec(crd, sw,
- crp->crp_buf, crp->crp_flags)) != 0)
- goto done;
+ error = 0;
+ swe = &ses->swcr_encdec;
+ swa = &ses->swcr_auth;
+ switch (csp->csp_mode) {
+ case CSP_MODE_COMPRESS:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_DEFLATE_COMP:
+ cxf = &comp_algo_deflate;
break;
+#ifdef INVARIANTS
+ default:
+ panic("bad compression algo");
+#endif
+ }
+ ses->swcr_compdec.sw_cxf = cxf;
+ ses->swcr_process = swcr_compdec;
+ break;
+ case CSP_MODE_CIPHER:
+ switch (csp->csp_cipher_alg) {
case CRYPTO_NULL_CBC:
- crp->crp_etype = 0;
+ ses->swcr_process = swcr_null;
break;
- case CRYPTO_MD5_HMAC:
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_SHA2_224_HMAC:
- case CRYPTO_SHA2_256_HMAC:
- case CRYPTO_SHA2_384_HMAC:
- case CRYPTO_SHA2_512_HMAC:
- case CRYPTO_RIPEMD160_HMAC:
- case CRYPTO_NULL_HMAC:
- case CRYPTO_MD5_KPDK:
- case CRYPTO_SHA1_KPDK:
- case CRYPTO_MD5:
- case CRYPTO_SHA1:
- case CRYPTO_SHA2_224:
- case CRYPTO_SHA2_256:
- case CRYPTO_SHA2_384:
- case CRYPTO_SHA2_512:
- case CRYPTO_BLAKE2B:
- case CRYPTO_BLAKE2S:
- case CRYPTO_POLY1305:
- if ((crp->crp_etype = swcr_authcompute(crd, sw,
- crp->crp_buf, crp->crp_flags)) != 0)
- goto done;
+#ifdef INVARIANTS
+ case CRYPTO_AES_NIST_GCM_16:
+ case CRYPTO_AES_CCM_16:
+ panic("bad cipher algo");
+#endif
+ default:
+ error = swcr_setup_encdec(ses, csp);
+ if (error == 0)
+ ses->swcr_process = swcr_encdec;
+ }
+ break;
+ case CSP_MODE_DIGEST:
+ error = swcr_setup_auth(ses, csp);
+ break;
+ case CSP_MODE_AEAD:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ error = swcr_setup_gcm(ses, csp);
+ if (error == 0)
+ ses->swcr_process = swcr_gcm;
break;
-
+ case CRYPTO_AES_CCM_16:
+ error = swcr_setup_ccm(ses, csp);
+ if (error == 0)
+ ses->swcr_process = swcr_ccm;
+ break;
+#ifdef INVARIANTS
+ default:
+ panic("bad aead algo");
+#endif
+ }
+ break;
+ case CSP_MODE_ETA:
+#ifdef INVARIANTS
+ switch (csp->csp_cipher_alg) {
case CRYPTO_AES_NIST_GCM_16:
- case CRYPTO_AES_NIST_GMAC:
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
case CRYPTO_AES_CCM_16:
+ panic("bad eta cipher algo");
+ }
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_AES_NIST_GMAC:
case CRYPTO_AES_CCM_CBC_MAC:
- crp->crp_etype = swcr_authenc(crp);
- goto done;
+ panic("bad eta auth algo");
+ }
+#endif
- case CRYPTO_DEFLATE_COMP:
- if ((crp->crp_etype = swcr_compdec(crd, sw,
- crp->crp_buf, crp->crp_flags)) != 0)
- goto done;
- else
- crp->crp_olen = (int)sw->sw_size;
+ error = swcr_setup_auth(ses, csp);
+ if (error)
break;
+ if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
+ /* Effectively degrade to digest mode. */
+ ses->swcr_process = swcr_authcompute;
+ break;
+ }
- default:
- /* Unknown/unsupported algorithm */
- crp->crp_etype = EINVAL;
- goto done;
+ error = swcr_setup_encdec(ses, csp);
+ if (error == 0)
+ ses->swcr_process = swcr_eta;
+ break;
+ default:
+ error = EINVAL;
+ }
+
+ if (error)
+ swcr_freesession(dev, cses);
+ return (error);
+}
+
+static void
+swcr_freesession(device_t dev, crypto_session_t cses)
+{
+ struct swcr_session *ses;
+ struct swcr_auth *swa;
+ struct enc_xform *txf;
+ struct auth_hash *axf;
+
+ ses = crypto_get_driver_session(cses);
+
+ mtx_destroy(&ses->swcr_lock);
+
+ txf = ses->swcr_encdec.sw_exf;
+ if (txf != NULL) {
+ if (ses->swcr_encdec.sw_kschedule != NULL)
+ txf->zerokey(&(ses->swcr_encdec.sw_kschedule));
+ }
+
+ axf = ses->swcr_auth.sw_axf;
+ if (axf != NULL) {
+ swa = &ses->swcr_auth;
+ if (swa->sw_ictx != NULL) {
+ explicit_bzero(swa->sw_ictx, axf->ctxsize);
+ free(swa->sw_ictx, M_CRYPTO_DATA);
+ }
+ if (swa->sw_octx != NULL) {
+ explicit_bzero(swa->sw_octx, swa->sw_octx_len);
+ free(swa->sw_octx, M_CRYPTO_DATA);
}
}
+}
-done:
- if (ses)
- mtx_unlock(&ses->swcr_lock);
+/*
+ * Process a software request.
+ */
+static int
+swcr_process(device_t dev, struct cryptop *crp, int hint)
+{
+ struct swcr_session *ses;
+
+ ses = crypto_get_driver_session(crp->crp_session);
+ mtx_lock(&ses->swcr_lock);
+
+ crp->crp_etype = ses->swcr_process(ses, crp);
+
+ mtx_unlock(&ses->swcr_lock);
crypto_done(crp);
- return 0;
+ return (0);
}
static void
@@ -1323,58 +1466,15 @@ swcr_probe(device_t dev)
static int
swcr_attach(device_t dev)
{
- memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
- memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
if (swcr_id < 0) {
device_printf(dev, "cannot initialize!");
- return ENOMEM;
+ return (ENXIO);
}
-#define REGISTER(alg) \
- crypto_register(swcr_id, alg, 0,0)
- REGISTER(CRYPTO_DES_CBC);
- REGISTER(CRYPTO_3DES_CBC);
- REGISTER(CRYPTO_BLF_CBC);
- REGISTER(CRYPTO_CAST_CBC);
- REGISTER(CRYPTO_SKIPJACK_CBC);
- REGISTER(CRYPTO_NULL_CBC);
- REGISTER(CRYPTO_MD5_HMAC);
- REGISTER(CRYPTO_SHA1_HMAC);
- REGISTER(CRYPTO_SHA2_224_HMAC);
- REGISTER(CRYPTO_SHA2_256_HMAC);
- REGISTER(CRYPTO_SHA2_384_HMAC);
- REGISTER(CRYPTO_SHA2_512_HMAC);
- REGISTER(CRYPTO_RIPEMD160_HMAC);
- REGISTER(CRYPTO_NULL_HMAC);
- REGISTER(CRYPTO_MD5_KPDK);
- REGISTER(CRYPTO_SHA1_KPDK);
- REGISTER(CRYPTO_MD5);
- REGISTER(CRYPTO_SHA1);
- REGISTER(CRYPTO_SHA2_224);
- REGISTER(CRYPTO_SHA2_256);
- REGISTER(CRYPTO_SHA2_384);
- REGISTER(CRYPTO_SHA2_512);
- REGISTER(CRYPTO_RIJNDAEL128_CBC);
- REGISTER(CRYPTO_AES_XTS);
- REGISTER(CRYPTO_AES_ICM);
- REGISTER(CRYPTO_AES_NIST_GCM_16);
- REGISTER(CRYPTO_AES_NIST_GMAC);
- REGISTER(CRYPTO_AES_128_NIST_GMAC);
- REGISTER(CRYPTO_AES_192_NIST_GMAC);
- REGISTER(CRYPTO_AES_256_NIST_GMAC);
- REGISTER(CRYPTO_CAMELLIA_CBC);
- REGISTER(CRYPTO_DEFLATE_COMP);
- REGISTER(CRYPTO_BLAKE2B);
- REGISTER(CRYPTO_BLAKE2S);
- REGISTER(CRYPTO_CHACHA20);
- REGISTER(CRYPTO_AES_CCM_16);
- REGISTER(CRYPTO_AES_CCM_CBC_MAC);
- REGISTER(CRYPTO_POLY1305);
-#undef REGISTER
- return 0;
+ return (0);
}
static int
@@ -1390,6 +1490,7 @@ static device_method_t swcr_methods[] = {
DEVMETHOD(device_attach, swcr_attach),
DEVMETHOD(device_detach, swcr_detach),
+ DEVMETHOD(cryptodev_probesession, swcr_probesession),
DEVMETHOD(cryptodev_newsession, swcr_newsession),
DEVMETHOD(cryptodev_freesession,swcr_freesession),
DEVMETHOD(cryptodev_process, swcr_process),
diff --git a/sys/opencrypto/cryptosoft.h b/sys/opencrypto/cryptosoft.h
deleted file mode 100644
index d787dc243ae6..000000000000
--- a/sys/opencrypto/cryptosoft.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* $FreeBSD$ */
-/* $OpenBSD: cryptosoft.h,v 1.10 2002/04/22 23:10:09 deraadt Exp $ */
-
-/*-
- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
- *
- * This code was written by Angelos D. Keromytis in Athens, Greece, in
- * February 2000. Network Security Technologies Inc. (NSTI) kindly
- * supported the development of this code.
- *
- * Copyright (c) 2000 Angelos D. Keromytis
- *
- * Permission to use, copy, and modify this software with or without fee
- * is hereby granted, provided that this entire notice is included in
- * all source code copies of any software which is or includes a copy or
- * modification of this software.
- *
- * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
- * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
- * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
- * PURPOSE.
- */
-
-#ifndef _CRYPTO_CRYPTOSOFT_H_
-#define _CRYPTO_CRYPTOSOFT_H_
-
-/* Software session entry */
-struct swcr_data {
- int sw_alg; /* Algorithm */
- union {
- struct {
- u_int8_t *SW_ictx;
- u_int8_t *SW_octx;
- u_int16_t SW_klen;
- u_int16_t SW_mlen;
- struct auth_hash *SW_axf;
- } SWCR_AUTH;
- struct {
- u_int8_t *SW_kschedule;
- struct enc_xform *SW_exf;
- } SWCR_ENC;
- struct {
- u_int32_t SW_size;
- struct comp_algo *SW_cxf;
- } SWCR_COMP;
- } SWCR_UN;
-
-#define sw_ictx SWCR_UN.SWCR_AUTH.SW_ictx
-#define sw_octx SWCR_UN.SWCR_AUTH.SW_octx
-#define sw_klen SWCR_UN.SWCR_AUTH.SW_klen
-#define sw_mlen SWCR_UN.SWCR_AUTH.SW_mlen
-#define sw_axf SWCR_UN.SWCR_AUTH.SW_axf
-#define sw_kschedule SWCR_UN.SWCR_ENC.SW_kschedule
-#define sw_exf SWCR_UN.SWCR_ENC.SW_exf
-#define sw_size SWCR_UN.SWCR_COMP.SW_size
-#define sw_cxf SWCR_UN.SWCR_COMP.SW_cxf
-};
-
-struct swcr_session {
- struct mtx swcr_lock;
- struct swcr_data swcr_algorithms[2];
- unsigned swcr_nalgs;
-};
-
-#ifdef _KERNEL
-extern u_int8_t hmac_ipad_buffer[];
-extern u_int8_t hmac_opad_buffer[];
-#endif /* _KERNEL */
-
-#endif /* _CRYPTO_CRYPTO_H_ */
diff --git a/sys/opencrypto/ktls_ocf.c b/sys/opencrypto/ktls_ocf.c
index 0f04e1268e2c..b607f2eead3d 100644
--- a/sys/opencrypto/ktls_ocf.c
+++ b/sys/opencrypto/ktls_ocf.c
@@ -45,7 +45,6 @@ __FBSDID("$FreeBSD$");
struct ocf_session {
crypto_session_t sid;
- int crda_alg;
struct mtx lock;
};
@@ -100,8 +99,6 @@ ktls_ocf_tls12_gcm_encrypt(struct ktls_session *tls,
{
struct uio uio;
struct tls_aead_data ad;
- struct tls_nonce_data nd;
- struct cryptodesc *crde, *crda;
struct cryptop *crp;
struct ocf_session *os;
struct ocf_operation *oo;
@@ -116,19 +113,15 @@ ktls_ocf_tls12_gcm_encrypt(struct ktls_session *tls,
oo->os = os;
iov = oo->iov;
- crp = crypto_getreq(2);
- if (crp == NULL) {
- free(oo, M_KTLS_OCF);
- return (ENOMEM);
- }
+ crp = crypto_getreq(os->sid, M_WAITOK);
/* Setup the IV. */
- memcpy(nd.fixed, tls->params.iv, TLS_AEAD_GCM_LEN);
- memcpy(&nd.seq, hdr + 1, sizeof(nd.seq));
+ memcpy(crp->crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
+ memcpy(crp->crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
/* Setup the AAD. */
tls_comp_len = ntohs(hdr->tls_length) -
- (AES_GMAC_HASH_LEN + sizeof(nd.seq));
+ (AES_GMAC_HASH_LEN + sizeof(uint64_t));
ad.seq = htobe64(seqno);
ad.type = hdr->tls_type;
ad.tls_vmajor = hdr->tls_vmajor;
@@ -160,26 +153,20 @@ ktls_ocf_tls12_gcm_encrypt(struct ktls_session *tls,
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_td = curthread;
- crp->crp_session = os->sid;
- crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM;
+ crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
+ crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
+ crp->crp_buf_type = CRYPTO_BUF_UIO;
crp->crp_uio = &uio;
crp->crp_ilen = uio.uio_resid;
crp->crp_opaque = oo;
crp->crp_callback = ktls_ocf_callback;
- crde = crp->crp_desc;
- crda = crde->crd_next;
-
- crda->crd_alg = os->crda_alg;
- crda->crd_skip = 0;
- crda->crd_len = sizeof(ad);
- crda->crd_inject = crp->crp_ilen - AES_GMAC_HASH_LEN;
-
- crde->crd_alg = CRYPTO_AES_NIST_GCM_16;
- crde->crd_skip = sizeof(ad);
- crde->crd_len = crp->crp_ilen - (sizeof(ad) + AES_GMAC_HASH_LEN);
- crde->crd_flags = CRD_F_ENCRYPT | CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
- memcpy(crde->crd_iv, &nd, sizeof(nd));
+ crp->crp_aad_start = 0;
+ crp->crp_aad_length = sizeof(ad);
+ crp->crp_payload_start = sizeof(ad);
+ crp->crp_payload_length = crp->crp_ilen -
+ (sizeof(ad) + AES_GMAC_HASH_LEN);
+ crp->crp_digest_start = crp->crp_ilen - AES_GMAC_HASH_LEN;
counter_u64_add(ocf_tls12_gcm_crypts, 1);
for (;;) {
@@ -216,7 +203,6 @@ ktls_ocf_tls13_gcm_encrypt(struct ktls_session *tls,
struct uio uio;
struct tls_aead_data_13 ad;
char nonce[12];
- struct cryptodesc *crde, *crda;
struct cryptop *crp;
struct ocf_session *os;
struct ocf_operation *oo;
@@ -230,11 +216,7 @@ ktls_ocf_tls13_gcm_encrypt(struct ktls_session *tls,
oo->os = os;
iov = oo->iov;
- crp = crypto_getreq(2);
- if (crp == NULL) {
- free(oo, M_KTLS_OCF);
- return (ENOMEM);
- }
+ crp = crypto_getreq(os->sid, M_WAITOK);
/* Setup the nonce. */
memcpy(nonce, tls->params.iv, tls->params.iv_len);
@@ -272,26 +254,21 @@ ktls_ocf_tls13_gcm_encrypt(struct ktls_session *tls,
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_td = curthread;
- crp->crp_session = os->sid;
- crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM;
+ crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
+ crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
+ crp->crp_buf_type = CRYPTO_BUF_UIO;
crp->crp_uio = &uio;
crp->crp_ilen = uio.uio_resid;
crp->crp_opaque = oo;
crp->crp_callback = ktls_ocf_callback;
- crde = crp->crp_desc;
- crda = crde->crd_next;
-
- crda->crd_alg = os->crda_alg;
- crda->crd_skip = 0;
- crda->crd_len = sizeof(ad);
- crda->crd_inject = crp->crp_ilen - AES_GMAC_HASH_LEN;
-
- crde->crd_alg = CRYPTO_AES_NIST_GCM_16;
- crde->crd_skip = sizeof(ad);
- crde->crd_len = crp->crp_ilen - (sizeof(ad) + AES_GMAC_HASH_LEN);
- crde->crd_flags = CRD_F_ENCRYPT | CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
- memcpy(crde->crd_iv, nonce, sizeof(nonce));
+ crp->crp_aad_start = 0;
+ crp->crp_aad_length = sizeof(ad);
+ crp->crp_payload_start = sizeof(ad);
+ crp->crp_payload_length = crp->crp_ilen -
+ (sizeof(ad) + AES_GMAC_HASH_LEN);
+ crp->crp_digest_start = crp->crp_ilen - AES_GMAC_HASH_LEN;
+ memcpy(crp->crp_iv, nonce, sizeof(nonce));
counter_u64_add(ocf_tls13_gcm_crypts, 1);
for (;;) {
@@ -326,6 +303,7 @@ ktls_ocf_free(struct ktls_session *tls)
struct ocf_session *os;
os = tls->cipher;
+ crypto_freesession(os->sid);
mtx_destroy(&os->lock);
explicit_bzero(os, sizeof(*os));
free(os, M_KTLS_OCF);
@@ -334,27 +312,26 @@ ktls_ocf_free(struct ktls_session *tls)
static int
ktls_ocf_try(struct socket *so, struct ktls_session *tls)
{
- struct cryptoini cria, crie;
+ struct crypto_session_params csp;
struct ocf_session *os;
int error;
- memset(&cria, 0, sizeof(cria));
- memset(&crie, 0, sizeof(crie));
+ memset(&csp, 0, sizeof(csp));
switch (tls->params.cipher_algorithm) {
case CRYPTO_AES_NIST_GCM_16:
switch (tls->params.cipher_key_len) {
case 128 / 8:
- cria.cri_alg = CRYPTO_AES_128_NIST_GMAC;
- break;
case 256 / 8:
- cria.cri_alg = CRYPTO_AES_256_NIST_GMAC;
break;
default:
return (EINVAL);
}
- cria.cri_key = tls->params.cipher_key;
- cria.cri_klen = tls->params.cipher_key_len * 8;
+ csp.csp_mode = CSP_MODE_AEAD;
+ csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16;
+ csp.csp_cipher_key = tls->params.cipher_key;
+ csp.csp_cipher_klen = tls->params.cipher_key_len;
+ csp.csp_ivlen = AES_GCM_IV_LEN;
break;
default:
return (EPROTONOSUPPORT);
@@ -370,19 +347,13 @@ ktls_ocf_try(struct socket *so, struct ktls_session *tls)
if (os == NULL)
return (ENOMEM);
- crie.cri_alg = tls->params.cipher_algorithm;
- crie.cri_key = tls->params.cipher_key;
- crie.cri_klen = tls->params.cipher_key_len * 8;
-
- crie.cri_next = &cria;
- error = crypto_newsession(&os->sid, &crie,
+ error = crypto_newsession(&os->sid, &csp,
CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
if (error) {
free(os, M_KTLS_OCF);
return (error);
}
- os->crda_alg = cria.cri_alg;
mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF);
tls->cipher = os;
if (tls->params.tls_vminor == TLS_MINOR_VER_THREE)
diff --git a/sys/opencrypto/xform_gmac.c b/sys/opencrypto/xform_gmac.c
index 156ed7f13443..7359e014e5d5 100644
--- a/sys/opencrypto/xform_gmac.c
+++ b/sys/opencrypto/xform_gmac.c
@@ -66,7 +66,7 @@ struct enc_xform enc_xform_aes_nist_gmac = {
/* Authentication instances */
struct auth_hash auth_hash_nist_gmac_aes_128 = {
- CRYPTO_AES_128_NIST_GMAC, "GMAC-AES-128",
+ CRYPTO_AES_NIST_GMAC, "GMAC-AES-128",
AES_128_GMAC_KEY_LEN, AES_GMAC_HASH_LEN, sizeof(struct aes_gmac_ctx),
GMAC_BLOCK_LEN,
(void (*)(void *)) AES_GMAC_Init,
@@ -77,7 +77,7 @@ struct auth_hash auth_hash_nist_gmac_aes_128 = {
};
struct auth_hash auth_hash_nist_gmac_aes_192 = {
- CRYPTO_AES_192_NIST_GMAC, "GMAC-AES-192",
+ CRYPTO_AES_NIST_GMAC, "GMAC-AES-192",
AES_192_GMAC_KEY_LEN, AES_GMAC_HASH_LEN, sizeof(struct aes_gmac_ctx),
GMAC_BLOCK_LEN,
(void (*)(void *)) AES_GMAC_Init,
@@ -88,7 +88,7 @@ struct auth_hash auth_hash_nist_gmac_aes_192 = {
};
struct auth_hash auth_hash_nist_gmac_aes_256 = {
- CRYPTO_AES_256_NIST_GMAC, "GMAC-AES-256",
+ CRYPTO_AES_NIST_GMAC, "GMAC-AES-256",
AES_256_GMAC_KEY_LEN, AES_GMAC_HASH_LEN, sizeof(struct aes_gmac_ctx),
GMAC_BLOCK_LEN,
(void (*)(void *)) AES_GMAC_Init,