aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/uipc_ktls.c
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2021-05-25 23:59:19 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2021-05-25 23:59:19 +0000
commit21e3c1fbe2460f144f6d4dfd61c3346b2de59667 (patch)
tree910ad4329d2e53a49de497176702d4af23d0e820 /sys/kern/uipc_ktls.c
parent4a92afae7fcbb0a8453712dfec5de086aaf5cba4 (diff)
downloadsrc-21e3c1fbe2460f144f6d4dfd61c3346b2de59667.tar.gz
src-21e3c1fbe2460f144f6d4dfd61c3346b2de59667.zip
Assume OCF is the only KTLS software backend.
This removes support for loadable software backends. The KTLS OCF support is now always included in kernels with KERN_TLS and the ktls_ocf.ko module has been removed. The software encryption routines now take an mbuf directly and use the TLS mbuf as the crypto buffer when possible. Bump __FreeBSD_version for software backends in ports. Reviewed by: gallatin, markj Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D30138
Diffstat (limited to 'sys/kern/uipc_ktls.c')
-rw-r--r--sys/kern/uipc_ktls.c232
1 files changed, 51 insertions, 181 deletions
diff --git a/sys/kern/uipc_ktls.c b/sys/kern/uipc_ktls.c
index 1fd1eca39b59..5ca677a26e2c 100644
--- a/sys/kern/uipc_ktls.c
+++ b/sys/kern/uipc_ktls.c
@@ -93,8 +93,6 @@ struct ktls_domain_info {
struct ktls_domain_info ktls_domains[MAXMEMDOM];
static struct ktls_wq *ktls_wq;
static struct proc *ktls_proc;
-LIST_HEAD(, ktls_crypto_backend) ktls_backends;
-static struct rmlock ktls_backends_lock;
static uma_zone_t ktls_session_zone;
static uma_zone_t ktls_buffer_zone;
static uint16_t ktls_cpuid_lookup[MAXCPU];
@@ -104,10 +102,6 @@ SYSCTL_NODE(_kern_ipc, OID_AUTO, tls, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"Kernel TLS offload stats");
-static int ktls_allow_unload;
-SYSCTL_INT(_kern_ipc_tls, OID_AUTO, allow_unload, CTLFLAG_RDTUN,
- &ktls_allow_unload, 0, "Allow software crypto modules to unload");
-
#ifdef RSS
static int ktls_bind_threads = 1;
#else
@@ -270,76 +264,6 @@ static void ktls_reset_send_tag(void *context, int pending);
#endif
static void ktls_work_thread(void *ctx);
-int
-ktls_crypto_backend_register(struct ktls_crypto_backend *be)
-{
- struct ktls_crypto_backend *curr_be, *tmp;
-
- if (be->api_version != KTLS_API_VERSION) {
- printf("KTLS: API version mismatch (%d vs %d) for %s\n",
- be->api_version, KTLS_API_VERSION,
- be->name);
- return (EINVAL);
- }
-
- rm_wlock(&ktls_backends_lock);
- printf("KTLS: Registering crypto method %s with prio %d\n",
- be->name, be->prio);
- if (LIST_EMPTY(&ktls_backends)) {
- LIST_INSERT_HEAD(&ktls_backends, be, next);
- } else {
- LIST_FOREACH_SAFE(curr_be, &ktls_backends, next, tmp) {
- if (curr_be->prio < be->prio) {
- LIST_INSERT_BEFORE(curr_be, be, next);
- break;
- }
- if (LIST_NEXT(curr_be, next) == NULL) {
- LIST_INSERT_AFTER(curr_be, be, next);
- break;
- }
- }
- }
- rm_wunlock(&ktls_backends_lock);
- return (0);
-}
-
-int
-ktls_crypto_backend_deregister(struct ktls_crypto_backend *be)
-{
- struct ktls_crypto_backend *tmp;
-
- /*
- * Don't error if the backend isn't registered. This permits
- * MOD_UNLOAD handlers to use this function unconditionally.
- */
- rm_wlock(&ktls_backends_lock);
- LIST_FOREACH(tmp, &ktls_backends, next) {
- if (tmp == be)
- break;
- }
- if (tmp == NULL) {
- rm_wunlock(&ktls_backends_lock);
- return (0);
- }
-
- if (!ktls_allow_unload) {
- rm_wunlock(&ktls_backends_lock);
- printf(
- "KTLS: Deregistering crypto method %s is not supported\n",
- be->name);
- return (EBUSY);
- }
-
- if (be->use_count) {
- rm_wunlock(&ktls_backends_lock);
- return (EBUSY);
- }
-
- LIST_REMOVE(be, next);
- rm_wunlock(&ktls_backends_lock);
- return (0);
-}
-
#if defined(INET) || defined(INET6)
static u_int
ktls_get_cpu(struct socket *so)
@@ -358,7 +282,7 @@ ktls_get_cpu(struct socket *so)
#endif
/*
* Just use the flowid to shard connections in a repeatable
- * fashion. Note that some crypto backends rely on the
+ * fashion. Note that TLS 1.0 sessions rely on the
* serialization provided by having the same connection use
* the same queue.
*/
@@ -426,9 +350,6 @@ ktls_init(void *dummy __unused)
cpuset_t mask;
int count, domain, error, i;
- rm_init(&ktls_backends_lock, "ktls backends");
- LIST_INIT(&ktls_backends);
-
ktls_wq = malloc(sizeof(*ktls_wq) * (mp_maxid + 1), M_KTLS,
M_WAITOK | M_ZERO);
@@ -761,7 +682,6 @@ ktls_cleanup(struct ktls_session *tls)
counter_u64_add(ktls_offload_active, -1);
switch (tls->mode) {
case TCP_TLS_MODE_SW:
- MPASS(tls->be != NULL);
switch (tls->params.cipher_algorithm) {
case CRYPTO_AES_CBC:
counter_u64_add(ktls_sw_cbc, -1);
@@ -773,7 +693,7 @@ ktls_cleanup(struct ktls_session *tls)
counter_u64_add(ktls_sw_chacha20, -1);
break;
}
- tls->free(tls);
+ ktls_ocf_free(tls);
break;
case TCP_TLS_MODE_IFNET:
switch (tls->params.cipher_algorithm) {
@@ -996,33 +916,11 @@ ktls_try_ifnet(struct socket *so, struct ktls_session *tls, bool force)
static int
ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction)
{
- struct rm_priotracker prio;
- struct ktls_crypto_backend *be;
+ int error;
- /*
- * Choose the best software crypto backend. Backends are
- * stored in sorted priority order (larget value == most
- * important at the head of the list), so this just stops on
- * the first backend that claims the session by returning
- * success.
- */
- if (ktls_allow_unload)
- rm_rlock(&ktls_backends_lock, &prio);
- LIST_FOREACH(be, &ktls_backends, next) {
- if (be->try(so, tls, direction) == 0)
- break;
- KASSERT(tls->cipher == NULL,
- ("ktls backend leaked a cipher pointer"));
- }
- if (be != NULL) {
- if (ktls_allow_unload)
- be->use_count++;
- tls->be = be;
- }
- if (ktls_allow_unload)
- rm_runlock(&ktls_backends_lock, &prio);
- if (be == NULL)
- return (EOPNOTSUPP);
+ error = ktls_ocf_try(so, tls, direction);
+ if (error)
+ return (error);
tls->mode = TCP_TLS_MODE_SW;
switch (tls->params.cipher_algorithm) {
case CRYPTO_AES_CBC:
@@ -1531,14 +1429,8 @@ ktls_modify_txrtlmt(struct ktls_session *tls, uint64_t max_pacing_rate)
void
ktls_destroy(struct ktls_session *tls)
{
- struct rm_priotracker prio;
ktls_cleanup(tls);
- if (tls->be != NULL && ktls_allow_unload) {
- rm_rlock(&ktls_backends_lock, &prio);
- tls->be->use_count--;
- rm_runlock(&ktls_backends_lock, &prio);
- }
uma_zfree(ktls_session_zone, tls);
}
@@ -1621,7 +1513,7 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
*
* Compute the final trailer length assuming
* at most one block of padding.
- * tls->params.sb_tls_tlen is the maximum
+ * tls->params.tls_tlen is the maximum
* possible trailer length (padding + digest).
* delta holds the number of excess padding
* bytes if the maximum were used. Those
@@ -2096,12 +1988,10 @@ ktls_encrypt(struct ktls_wq *wq, struct mbuf *top)
struct socket *so;
struct mbuf *m;
vm_paddr_t parray[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)];
- struct iovec src_iov[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)];
struct iovec dst_iov[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)];
vm_page_t pg;
void *cbuf;
int error, i, len, npages, off, total_pages;
- bool is_anon;
so = top->m_epg_so;
tls = top->m_epg_tls;
@@ -2146,75 +2036,45 @@ ktls_encrypt(struct ktls_wq *wq, struct mbuf *top)
m->m_epg_npgs, ktls_maxlen));
/*
- * Generate source and destination ivoecs to pass to
- * the SW encryption backend. For writable mbufs, the
- * destination iovec is a copy of the source and
- * encryption is done in place. For file-backed mbufs
- * (from sendfile), anonymous wired pages are
- * allocated and assigned to the destination iovec.
+ * For anonymous mbufs, encryption is done in place.
+ * For file-backed mbufs (from sendfile), anonymous
+ * wired pages are allocated and used as the
+ * encryption destination.
*/
- is_anon = (m->m_epg_flags & EPG_FLAG_ANON) != 0;
-
- off = m->m_epg_1st_off;
- for (i = 0; i < m->m_epg_npgs; i++, off = 0) {
- len = m_epg_pagelen(m, i, off);
- src_iov[i].iov_len = len;
- src_iov[i].iov_base =
- (char *)(void *)PHYS_TO_DMAP(m->m_epg_pa[i]) + off;
- }
-
- if (is_anon) {
- memcpy(dst_iov, src_iov, i * sizeof(struct iovec));
- } else if ((cbuf = ktls_buffer_alloc(wq, m)) != NULL) {
- len = ptoa(m->m_epg_npgs - 1) + m->m_epg_last_len -
- m->m_epg_1st_off;
- dst_iov[0].iov_base = (char *)cbuf + m->m_epg_1st_off;
- dst_iov[0].iov_len = len;
- parray[0] = DMAP_TO_PHYS((vm_offset_t)cbuf);
- i = 1;
+ if ((m->m_epg_flags & EPG_FLAG_ANON) != 0) {
+ error = (*tls->sw_encrypt)(tls, m, NULL, 0);
} else {
- cbuf = NULL;
- off = m->m_epg_1st_off;
- for (i = 0; i < m->m_epg_npgs; i++, off = 0) {
- do {
- pg = vm_page_alloc(NULL, 0,
- VM_ALLOC_NORMAL |
- VM_ALLOC_NOOBJ |
- VM_ALLOC_NODUMP |
- VM_ALLOC_WIRED |
- VM_ALLOC_WAITFAIL);
- } while (pg == NULL);
-
- len = m_epg_pagelen(m, i, off);
- parray[i] = VM_PAGE_TO_PHYS(pg);
- dst_iov[i].iov_base =
- (char *)(void *)PHYS_TO_DMAP(
- parray[i]) + off;
- dst_iov[i].iov_len = len;
+ if ((cbuf = ktls_buffer_alloc(wq, m)) != NULL) {
+ len = ptoa(m->m_epg_npgs - 1) +
+ m->m_epg_last_len - m->m_epg_1st_off;
+ dst_iov[0].iov_base = (char *)cbuf +
+ m->m_epg_1st_off;
+ dst_iov[0].iov_len = len;
+ parray[0] = DMAP_TO_PHYS((vm_offset_t)cbuf);
+ i = 1;
+ } else {
+ off = m->m_epg_1st_off;
+ for (i = 0; i < m->m_epg_npgs; i++, off = 0) {
+ do {
+ pg = vm_page_alloc(NULL, 0,
+ VM_ALLOC_NORMAL |
+ VM_ALLOC_NOOBJ |
+ VM_ALLOC_NODUMP |
+ VM_ALLOC_WIRED |
+ VM_ALLOC_WAITFAIL);
+ } while (pg == NULL);
+
+ len = m_epg_pagelen(m, i, off);
+ parray[i] = VM_PAGE_TO_PHYS(pg);
+ dst_iov[i].iov_base =
+ (char *)(void *)PHYS_TO_DMAP(
+ parray[i]) + off;
+ dst_iov[i].iov_len = len;
+ }
}
- }
- if (__predict_false(m->m_epg_npgs == 0)) {
- /* TLS 1.0 empty fragment. */
- npages++;
- } else
- npages += m->m_epg_npgs;
+ error = (*tls->sw_encrypt)(tls, m, dst_iov, i);
- error = (*tls->sw_encrypt)(tls,
- (const struct tls_record_layer *)m->m_epg_hdr,
- m->m_epg_trail, src_iov, dst_iov, m->m_epg_npgs, i,
- m->m_epg_seqno, m->m_epg_record_type);
- if (error) {
- counter_u64_add(ktls_offload_failed_crypto, 1);
- break;
- }
-
- /*
- * For file-backed mbufs, release the file-backed
- * pages and replace them in the ext_pgs array with
- * the anonymous wired pages allocated above.
- */
- if (!is_anon) {
/* Free the old pages. */
m->m_ext.ext_free(m);
@@ -2236,6 +2096,16 @@ ktls_encrypt(struct ktls_wq *wq, struct mbuf *top)
/* Pages are now writable. */
m->m_epg_flags |= EPG_FLAG_ANON;
}
+ if (error) {
+ counter_u64_add(ktls_offload_failed_crypto, 1);
+ break;
+ }
+
+ if (__predict_false(m->m_epg_npgs == 0)) {
+ /* TLS 1.0 empty fragment. */
+ npages++;
+ } else
+ npages += m->m_epg_npgs;
/*
* Drop a reference to the session now that it is no