aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/kern/uipc_ktls.c113
-rw-r--r--sys/sys/ktls.h5
2 files changed, 115 insertions, 3 deletions
diff --git a/sys/kern/uipc_ktls.c b/sys/kern/uipc_ktls.c
index 12bb02876083..1e778dbf113a 100644
--- a/sys/kern/uipc_ktls.c
+++ b/sys/kern/uipc_ktls.c
@@ -162,6 +162,11 @@ static COUNTER_U64_DEFINE_EARLY(ktls_tasks_active);
SYSCTL_COUNTER_U64(_kern_ipc_tls, OID_AUTO, tasks_active, CTLFLAG_RD,
&ktls_tasks_active, "Number of active tasks");
+static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_pending);
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_pending, CTLFLAG_RD,
+ &ktls_cnt_tx_pending,
+ "Number of TLS 1.0 records waiting for earlier TLS records");
+
static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_queued);
SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_inqueue, CTLFLAG_RD,
&ktls_cnt_tx_queued,
@@ -612,6 +617,9 @@ ktls_create_session(struct socket *so, struct tls_enable *en,
case CRYPTO_SHA1_HMAC:
if (en->tls_vminor == TLS_MINOR_VER_ZERO) {
/* Implicit IV, no nonce. */
+ tls->sequential_records = true;
+ tls->next_seqno = be64dec(en->rec_seq);
+ STAILQ_INIT(&tls->pending_records);
} else {
tls->params.tls_hlen += AES_BLOCK_LEN;
}
@@ -1490,6 +1498,20 @@ void
ktls_destroy(struct ktls_session *tls)
{
+ if (tls->sequential_records) {
+ struct mbuf *m, *n;
+ int page_count;
+
+ STAILQ_FOREACH_SAFE(m, &tls->pending_records, m_epg_stailq, n) {
+ page_count = m->m_epg_enc_cnt;
+ while (page_count > 0) {
+ KASSERT(page_count >= m->m_epg_nrdy,
+ ("%s: too few pages", __func__));
+ page_count -= m->m_epg_nrdy;
+ m = m_free(m);
+ }
+ }
+ }
ktls_cleanup(tls);
uma_zfree(ktls_session_zone, tls);
}
@@ -2092,10 +2114,29 @@ ktls_encrypt_record(struct ktls_wq *wq, struct mbuf *m,
return (error);
}
+/* Number of TLS records in a batch passed to ktls_enqueue(). */
+static u_int
+ktls_batched_records(struct mbuf *m)
+{
+ int page_count, records;
+
+ records = 0;
+ page_count = m->m_epg_enc_cnt;
+ while (page_count > 0) {
+ records++;
+ page_count -= m->m_epg_nrdy;
+ m = m->m_next;
+ }
+ KASSERT(page_count == 0, ("%s: mismatched page count", __func__));
+ return (records);
+}
+
void
ktls_enqueue(struct mbuf *m, struct socket *so, int page_count)
{
+ struct ktls_session *tls;
struct ktls_wq *wq;
+ int queued;
bool running;
KASSERT(((m->m_flags & (M_EXTPG | M_NOTREADY)) ==
@@ -2113,14 +2154,80 @@ ktls_enqueue(struct mbuf *m, struct socket *so, int page_count)
*/
m->m_epg_so = so;
- wq = &ktls_wq[m->m_epg_tls->wq_index];
+ queued = 1;
+ tls = m->m_epg_tls;
+ wq = &ktls_wq[tls->wq_index];
mtx_lock(&wq->mtx);
- STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
+ if (__predict_false(tls->sequential_records)) {
+ /*
+ * For TLS 1.0, records must be encrypted
+ * sequentially. For a given connection, all records
+ * queued to the associated work queue are processed
+ * sequentially. However, sendfile(2) might complete
+ * I/O requests spanning multiple TLS records out of
+ * order. Here we ensure TLS records are enqueued to
+ * the work queue in FIFO order.
+ *
+ * tls->next_seqno holds the sequence number of the
+ * next TLS record that should be enqueued to the work
+ * queue. If this next record is not tls->next_seqno,
+ * it must be a future record, so insert it, sorted by
+ * TLS sequence number, into tls->pending_records and
+ * return.
+ *
+ * If this TLS record matches tls->next_seqno, place
+ * it in the work queue and then check
+ * tls->pending_records to see if any
+ * previously-queued records are now ready for
+ * encryption.
+ */
+ if (m->m_epg_seqno != tls->next_seqno) {
+ struct mbuf *n, *p;
+
+ p = NULL;
+ STAILQ_FOREACH(n, &tls->pending_records, m_epg_stailq) {
+ if (n->m_epg_seqno > m->m_epg_seqno)
+ break;
+ p = n;
+ }
+ if (n == NULL)
+ STAILQ_INSERT_TAIL(&tls->pending_records, m,
+ m_epg_stailq);
+ else if (p == NULL)
+ STAILQ_INSERT_HEAD(&tls->pending_records, m,
+ m_epg_stailq);
+ else
+ STAILQ_INSERT_AFTER(&tls->pending_records, p, m,
+ m_epg_stailq);
+ mtx_unlock(&wq->mtx);
+ counter_u64_add(ktls_cnt_tx_pending, 1);
+ return;
+ }
+
+ tls->next_seqno += ktls_batched_records(m);
+ STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
+
+ while (!STAILQ_EMPTY(&tls->pending_records)) {
+ struct mbuf *n;
+
+ n = STAILQ_FIRST(&tls->pending_records);
+ if (n->m_epg_seqno != tls->next_seqno)
+ break;
+
+ queued++;
+ STAILQ_REMOVE_HEAD(&tls->pending_records, m_epg_stailq);
+ tls->next_seqno += ktls_batched_records(n);
+ STAILQ_INSERT_TAIL(&wq->m_head, n, m_epg_stailq);
+ }
+ counter_u64_add(ktls_cnt_tx_pending, -(queued - 1));
+ } else
+ STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
+
running = wq->running;
mtx_unlock(&wq->mtx);
if (!running)
wakeup(wq);
- counter_u64_add(ktls_cnt_tx_queued, 1);
+ counter_u64_add(ktls_cnt_tx_queued, queued);
}
/*
diff --git a/sys/sys/ktls.h b/sys/sys/ktls.h
index 71d55ee1b3d8..cd0a786bb345 100644
--- a/sys/sys/ktls.h
+++ b/sys/sys/ktls.h
@@ -198,6 +198,11 @@ struct ktls_session {
bool reset_pending;
bool disable_ifnet_pending;
bool sync_dispatch;
+ bool sequential_records;
+
+ /* Only used for TLS 1.0. */
+ uint64_t next_seqno;
+ STAILQ_HEAD(, mbuf) pending_records;
} __aligned(CACHE_LINE_SIZE);
extern unsigned int ktls_ifnet_max_rexmit_pct;