aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans Petter Selasky <hselasky@FreeBSD.org>2021-03-31 10:36:36 +0000
committerHans Petter Selasky <hselasky@FreeBSD.org>2021-06-02 11:55:45 +0000
commite31579b8558db508dfc3f8fc276611a7c3c93aa1 (patch)
tree8d3887610e40354d6a37a095f312768fa72b0f10
parent28d95cc69fee30b160418d83bebaf7fddae6058e (diff)
downloadsrc-e31579b8558db508dfc3f8fc276611a7c3c93aa1.tar.gz
src-e31579b8558db508dfc3f8fc276611a7c3c93aa1.zip
Add TCP LRO support for VLAN and VxLAN.
This change makes the TCP LRO code more generic and flexible with regards to supporting multiple different TCP encapsulation protocols and in general lays the ground for broader TCP LRO support. The main job of the TCP LRO code is to merge TCP packets for the same flow, to reduce the number of calls to upper layers. This reduces CPU and increases performance, due to being able to send larger TSO offloaded data chunks at a time. Basically the TCP LRO makes it possible to avoid per-packet interaction by the host CPU. Because the current TCP LRO code was tightly bound and optimized for TCP/IP over ethernet only, several larger changes were needed. Also a minor bug was fixed in the flushing mechanism for inactive entries, where the expire time, "le->mtime" was not always properly set. To avoid having to re-run time consuming regression tests for every change, it was chosen to squash the following list of changes into a single commit: - Refactor parsing of all address information into the "lro_parser" structure. This easily allows to reuse parsing code for inner headers. - Speedup header data comparison. Don't compare field by field, but instead use an unsigned long array, where the fields get packed. - Refactor the IPv4/TCP/UDP checksum computations, so that they may be computed recursivly, only applying deltas as the result of updating payload data. - Make smaller inline functions doing one operation at a time instead of big functions having repeated code. - Refactor the TCP ACK compression code to only execute once per TCP LRO flush. This gives a minor performance improvement and keeps the code simple. - Use sbintime() for all time-keeping. This change also fixes flushing of inactive entries. - Try to shrink the size of the LRO entry, because it is frequently zeroed. - Removed unused TCP LRO macros. - Cleanup unused TCP LRO statistics counters while at it. - Try to use __predict_true() and predict_false() to optimise CPU branch predictions. Bump the __FreeBSD_version due to adding new member to the "lro_ctrl" structure. Tested by: Netflix Reviewed by: rrs (transport) Differential Revision: https://reviews.freebsd.org/D29564 Sponsored by: Mellanox Technologies // NVIDIA Networking (cherry picked from commit 9ca874cf740ee68c5742df8b5f9e20910085c011)
-rw-r--r--sys/netinet/in_pcb.h4
-rw-r--r--sys/netinet/tcp_lro.c1829
-rw-r--r--sys/netinet/tcp_lro.h151
-rw-r--r--sys/netinet/tcp_subr.c2
-rw-r--r--sys/netinet/tcp_var.h2
-rw-r--r--sys/sys/mbuf.h6
-rw-r--r--sys/sys/param.h2
7 files changed, 1244 insertions, 752 deletions
diff --git a/sys/netinet/in_pcb.h b/sys/netinet/in_pcb.h
index 080d07cc7218..9604a837cfb4 100644
--- a/sys/netinet/in_pcb.h
+++ b/sys/netinet/in_pcb.h
@@ -731,8 +731,8 @@ int inp_so_options(const struct inpcb *inp);
/*
* Flags for inp_flags2.
*/
-#define INP_2UNUSED1 0x00000001
-#define INP_2UNUSED2 0x00000002
+#define INP_MBUF_L_ACKS 0x00000001 /* We need large mbufs for ack compression */
+#define INP_MBUF_ACKCMP 0x00000002 /* TCP mbuf ack compression ok */
#define INP_PCBGROUPWILD 0x00000004 /* in pcbgroup wildcard list */
#define INP_REUSEPORT 0x00000008 /* SO_REUSEPORT option is set */
#define INP_FREED 0x00000010 /* inp itself is not valid */
diff --git a/sys/netinet/tcp_lro.c b/sys/netinet/tcp_lro.c
index baa2fe522e90..09fc024c3d73 100644
--- a/sys/netinet/tcp_lro.c
+++ b/sys/netinet/tcp_lro.c
@@ -4,7 +4,7 @@
* Copyright (c) 2007, Myricom Inc.
* Copyright (c) 2008, Intel Corporation.
* Copyright (c) 2012 The FreeBSD Foundation
- * Copyright (c) 2016 Mellanox Technologies.
+ * Copyright (c) 2016-2021 Mellanox Technologies.
* All rights reserved.
*
* Portions of this software were developed by Bjoern Zeeb
@@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <net/if.h>
#include <net/if_var.h>
#include <net/ethernet.h>
+#include <net/bpf.h>
#include <net/vnet.h>
#include <netinet/in_systm.h>
@@ -64,54 +65,64 @@ __FBSDID("$FreeBSD$");
#include <netinet/tcp_seq.h>
#include <netinet/tcp_lro.h>
#include <netinet/tcp_var.h>
+#include <netinet/tcpip.h>
#include <netinet/tcp_hpts.h>
#include <netinet/tcp_log_buf.h>
+#include <netinet/udp.h>
#include <netinet6/ip6_var.h>
#include <machine/in_cksum.h>
static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures");
-#define TCP_LRO_UPDATE_CSUM 1
-#ifndef TCP_LRO_UPDATE_CSUM
-#define TCP_LRO_INVALID_CSUM 0x0000
-#endif
+#define TCP_LRO_TS_OPTION \
+ ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \
+ (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
static void tcp_lro_rx_done(struct lro_ctrl *lc);
-static int tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m,
- uint32_t csum, int use_hash);
+static int tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m,
+ uint32_t csum, bool use_hash);
+
+#ifdef TCPHPTS
+static bool do_bpf_strip_and_compress(struct inpcb *, struct lro_ctrl *,
+ struct lro_entry *, struct mbuf **, struct mbuf **, struct mbuf **, bool *, bool);
+
+#endif
SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"TCP LRO");
-static long tcplro_stacks_wanting_mbufq = 0;
+static long tcplro_stacks_wanting_mbufq;
counter_u64_t tcp_inp_lro_direct_queue;
counter_u64_t tcp_inp_lro_wokeup_queue;
counter_u64_t tcp_inp_lro_compressed;
-counter_u64_t tcp_inp_lro_single_push;
counter_u64_t tcp_inp_lro_locks_taken;
-counter_u64_t tcp_inp_lro_sack_wake;
+counter_u64_t tcp_extra_mbuf;
+counter_u64_t tcp_would_have_but;
+counter_u64_t tcp_comp_total;
+counter_u64_t tcp_uncomp_total;
static unsigned tcp_lro_entries = TCP_LRO_ENTRIES;
-static int32_t hold_lock_over_compress = 0;
-SYSCTL_INT(_net_inet_tcp_lro, OID_AUTO, hold_lock, CTLFLAG_RW,
- &hold_lock_over_compress, 0,
- "Do we hold the lock over the compress of mbufs?");
SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries,
CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0,
"default number of LRO entries");
+
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, fullqueue, CTLFLAG_RD,
&tcp_inp_lro_direct_queue, "Number of lro's fully queued to transport");
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, wokeup, CTLFLAG_RD,
&tcp_inp_lro_wokeup_queue, "Number of lro's where we woke up transport via hpts");
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, compressed, CTLFLAG_RD,
&tcp_inp_lro_compressed, "Number of lro's compressed and sent to transport");
-SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, single, CTLFLAG_RD,
- &tcp_inp_lro_single_push, "Number of lro's sent with single segment");
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lockcnt, CTLFLAG_RD,
&tcp_inp_lro_locks_taken, "Number of lro's inp_wlocks taken");
-SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, sackwakeups, CTLFLAG_RD,
- &tcp_inp_lro_sack_wake, "Number of wakeups caused by sack/fin");
+SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, extra_mbuf, CTLFLAG_RD,
+ &tcp_extra_mbuf, "Number of times we had an extra compressed ack dropped into the tp");
+SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, would_have_but, CTLFLAG_RD,
+ &tcp_would_have_but, "Number of times we would have had an extra compressed, but mget failed");
+SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, with_m_ackcmp, CTLFLAG_RD,
+ &tcp_comp_total, "Number of mbufs queued with M_ACKCMP flags set");
+SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, without_m_ackcmp, CTLFLAG_RD,
+ &tcp_uncomp_total, "Number of mbufs queued without M_ACKCMP");
void
tcp_lro_reg_mbufq(void)
@@ -203,34 +214,243 @@ tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp,
return (0);
}
-static struct tcphdr *
-tcp_lro_get_th(struct lro_entry *le, struct mbuf *m)
+struct vxlan_header {
+ uint32_t vxlh_flags;
+ uint32_t vxlh_vni;
+};
+
+static inline void *
+tcp_lro_low_level_parser(void *ptr, struct lro_parser *parser, bool update_data, bool is_vxlan)
{
- struct ether_header *eh;
- struct tcphdr *th = NULL;
-#ifdef INET6
- struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */
-#endif
+ const struct ether_vlan_header *eh;
+ void *old;
+ uint16_t eth_type;
+
+ if (update_data)
+ memset(parser, 0, sizeof(*parser));
+
+ old = ptr;
+
+ if (is_vxlan) {
+ const struct vxlan_header *vxh;
+ vxh = ptr;
+ ptr = (uint8_t *)ptr + sizeof(*vxh);
+ if (update_data) {
+ parser->data.vxlan_vni =
+ vxh->vxlh_vni & htonl(0xffffff00);
+ }
+ }
+
+ eh = ptr;
+ if (__predict_false(eh->evl_encap_proto == htons(ETHERTYPE_VLAN))) {
+ eth_type = eh->evl_proto;
+ if (update_data) {
+ /* strip priority and keep VLAN ID only */
+ parser->data.vlan_id = eh->evl_tag & htons(EVL_VLID_MASK);
+ }
+ /* advance to next header */
+ ptr = (uint8_t *)ptr + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ } else {
+ eth_type = eh->evl_encap_proto;
+ /* advance to next header */
+ ptr = (uint8_t *)ptr + ETHER_HDR_LEN;
+ }
+
+ switch (eth_type) {
#ifdef INET
- struct ip *ip4 = NULL; /* Keep compiler happy. */
+ case htons(ETHERTYPE_IP):
+ parser->ip4 = ptr;
+ /* Ensure there are no IPv4 options. */
+ if ((parser->ip4->ip_hl << 2) != sizeof (*parser->ip4))
+ break;
+ /* .. and the packet is not fragmented. */
+ if (parser->ip4->ip_off & htons(IP_MF|IP_OFFMASK))
+ break;
+ ptr = (uint8_t *)ptr + (parser->ip4->ip_hl << 2);
+ if (update_data) {
+ parser->data.s_addr.v4 = parser->ip4->ip_src;
+ parser->data.d_addr.v4 = parser->ip4->ip_dst;
+ }
+ switch (parser->ip4->ip_p) {
+ case IPPROTO_UDP:
+ parser->udp = ptr;
+ if (update_data) {
+ parser->data.lro_type = LRO_TYPE_IPV4_UDP;
+ parser->data.s_port = parser->udp->uh_sport;
+ parser->data.d_port = parser->udp->uh_dport;
+ } else {
+ MPASS(parser->data.lro_type == LRO_TYPE_IPV4_UDP);
+ }
+ ptr = ((uint8_t *)ptr + sizeof(*parser->udp));
+ parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
+ return (ptr);
+ case IPPROTO_TCP:
+ parser->tcp = ptr;
+ if (update_data) {
+ parser->data.lro_type = LRO_TYPE_IPV4_TCP;
+ parser->data.s_port = parser->tcp->th_sport;
+ parser->data.d_port = parser->tcp->th_dport;
+ } else {
+ MPASS(parser->data.lro_type == LRO_TYPE_IPV4_TCP);
+ }
+ ptr = (uint8_t *)ptr + (parser->tcp->th_off << 2);
+ parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
+ return (ptr);
+ default:
+ break;
+ }
+ break;
#endif
-
- eh = mtod(m, struct ether_header *);
- switch (le->eh_type) {
#ifdef INET6
- case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(eh + 1);
- th = (struct tcphdr *)(ip6 + 1);
+ case htons(ETHERTYPE_IPV6):
+ parser->ip6 = ptr;
+ ptr = (uint8_t *)ptr + sizeof(*parser->ip6);
+ if (update_data) {
+ parser->data.s_addr.v6 = parser->ip6->ip6_src;
+ parser->data.d_addr.v6 = parser->ip6->ip6_dst;
+ }
+ switch (parser->ip6->ip6_nxt) {
+ case IPPROTO_UDP:
+ parser->udp = ptr;
+ if (update_data) {
+ parser->data.lro_type = LRO_TYPE_IPV6_UDP;
+ parser->data.s_port = parser->udp->uh_sport;
+ parser->data.d_port = parser->udp->uh_dport;
+ } else {
+ MPASS(parser->data.lro_type == LRO_TYPE_IPV6_UDP);
+ }
+ ptr = (uint8_t *)ptr + sizeof(*parser->udp);
+ parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
+ return (ptr);
+ case IPPROTO_TCP:
+ parser->tcp = ptr;
+ if (update_data) {
+ parser->data.lro_type = LRO_TYPE_IPV6_TCP;
+ parser->data.s_port = parser->tcp->th_sport;
+ parser->data.d_port = parser->tcp->th_dport;
+ } else {
+ MPASS(parser->data.lro_type == LRO_TYPE_IPV6_TCP);
+ }
+ ptr = (uint8_t *)ptr + (parser->tcp->th_off << 2);
+ parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
+ return (ptr);
+ default:
+ break;
+ }
break;
#endif
+ default:
+ break;
+ }
+ /* Invalid packet - cannot parse */
+ return (NULL);
+}
+
+static const int vxlan_csum = CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID |
+ CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID;
+
+static inline struct lro_parser *
+tcp_lro_parser(struct mbuf *m, struct lro_parser *po, struct lro_parser *pi, bool update_data)
+{
+ void *data_ptr;
+
+ /* Try to parse outer headers first. */
+ data_ptr = tcp_lro_low_level_parser(m->m_data, po, update_data, false);
+ if (data_ptr == NULL || po->total_hdr_len > m->m_len)
+ return (NULL);
+
+ if (update_data) {
+ /* Store VLAN ID, if any. */
+ if (__predict_false(m->m_flags & M_VLANTAG)) {
+ po->data.vlan_id =
+ htons(m->m_pkthdr.ether_vtag) & htons(EVL_VLID_MASK);
+ }
+ }
+
+ switch (po->data.lro_type) {
+ case LRO_TYPE_IPV4_UDP:
+ case LRO_TYPE_IPV6_UDP:
+ /* Check for VXLAN headers. */
+ if ((m->m_pkthdr.csum_flags & vxlan_csum) != vxlan_csum)
+ break;
+
+ /* Try to parse inner headers. */
+ data_ptr = tcp_lro_low_level_parser(data_ptr, pi, update_data, true);
+ if (data_ptr == NULL || pi->total_hdr_len > m->m_len)
+ break;
+
+ /* Verify supported header types. */
+ switch (pi->data.lro_type) {
+ case LRO_TYPE_IPV4_TCP:
+ case LRO_TYPE_IPV6_TCP:
+ return (pi);
+ default:
+ break;
+ }
+ break;
+ case LRO_TYPE_IPV4_TCP:
+ case LRO_TYPE_IPV6_TCP:
+ if (update_data)
+ memset(pi, 0, sizeof(*pi));
+ return (po);
+ default:
+ break;
+ }
+ return (NULL);
+}
+
+static inline int
+tcp_lro_trim_mbuf_chain(struct mbuf *m, const struct lro_parser *po)
+{
+ int len;
+
+ switch (po->data.lro_type) {
#ifdef INET
- case ETHERTYPE_IP:
- ip4 = (struct ip *)(eh + 1);
- th = (struct tcphdr *)(ip4 + 1);
+ case LRO_TYPE_IPV4_TCP:
+ len = ((uint8_t *)po->ip4 - (uint8_t *)m->m_data) +
+ ntohs(po->ip4->ip_len);
+ break;
+#endif
+#ifdef INET6
+ case LRO_TYPE_IPV6_TCP:
+ len = ((uint8_t *)po->ip6 - (uint8_t *)m->m_data) +
+ ntohs(po->ip6->ip6_plen) + sizeof(*po->ip6);
break;
#endif
+ default:
+ return (TCP_LRO_CANNOT);
+ }
+
+ /*
+ * If the frame is padded beyond the end of the IP packet,
+ * then trim the extra bytes off:
+ */
+ if (__predict_true(m->m_pkthdr.len == len)) {
+ return (0);
+ } else if (m->m_pkthdr.len > len) {
+ m_adj(m, len - m->m_pkthdr.len);
+ return (0);
+ }
+ return (TCP_LRO_CANNOT);
+}
+
+static struct tcphdr *
+tcp_lro_get_th(struct mbuf *m)
+{
+ return ((struct tcphdr *)((uint8_t *)m->m_data + m->m_pkthdr.lro_tcp_h_off));
+}
+
+static void
+lro_free_mbuf_chain(struct mbuf *m)
+{
+ struct mbuf *save;
+
+ while (m) {
+ save = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ m_freem(m);
+ m = save;
}
- return (th);
}
void
@@ -245,7 +465,7 @@ tcp_lro_free(struct lro_ctrl *lc)
/* free active mbufs, if any */
while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
tcp_lro_active_remove(le);
- m_freem(le->m_head);
+ lro_free_mbuf_chain(le->m_head);
}
/* free hash table */
@@ -264,86 +484,67 @@ tcp_lro_free(struct lro_ctrl *lc)
}
static uint16_t
-tcp_lro_csum_th(struct tcphdr *th)
+tcp_lro_rx_csum_tcphdr(const struct tcphdr *th)
{
- uint32_t ch;
- uint16_t *p, l;
-
- ch = th->th_sum = 0x0000;
- l = th->th_off;
- p = (uint16_t *)th;
- while (l > 0) {
- ch += *p;
- p++;
- ch += *p;
- p++;
- l--;
+ const uint16_t *ptr;
+ uint32_t csum;
+ uint16_t len;
+
+ csum = -th->th_sum; /* exclude checksum field */
+ len = th->th_off;
+ ptr = (const uint16_t *)th;
+ while (len--) {
+ csum += *ptr;
+ ptr++;
+ csum += *ptr;
+ ptr++;
}
- while (ch > 0xffff)
- ch = (ch >> 16) + (ch & 0xffff);
+ while (csum > 0xffff)
+ csum = (csum >> 16) + (csum & 0xffff);
- return (ch & 0xffff);
+ return (csum);
}
static uint16_t
-tcp_lro_rx_csum_fixup(struct lro_entry *le, void *l3hdr, struct tcphdr *th,
- uint16_t tcp_data_len, uint16_t csum)
+tcp_lro_rx_csum_data(const struct lro_parser *pa, uint16_t tcp_csum)
{
uint32_t c;
uint16_t cs;
- c = csum;
+ c = tcp_csum;
- /* Remove length from checksum. */
- switch (le->eh_type) {
+ switch (pa->data.lro_type) {
#ifdef INET6
- case ETHERTYPE_IPV6:
- {
- struct ip6_hdr *ip6;
-
- ip6 = (struct ip6_hdr *)l3hdr;
- if (le->append_cnt == 0)
- cs = ip6->ip6_plen;
- else {
- uint32_t cx;
-
- cx = ntohs(ip6->ip6_plen);
- cs = in6_cksum_pseudo(ip6, cx, ip6->ip6_nxt, 0);
- }
+ case LRO_TYPE_IPV6_TCP:
+ /* Compute full pseudo IPv6 header checksum. */
+ cs = in6_cksum_pseudo(pa->ip6, ntohs(pa->ip6->ip6_plen), pa->ip6->ip6_nxt, 0);
break;
- }
#endif
#ifdef INET
- case ETHERTYPE_IP:
- {
- struct ip *ip4;
-
- ip4 = (struct ip *)l3hdr;
- if (le->append_cnt == 0)
- cs = ip4->ip_len;
- else {
- cs = in_addword(ntohs(ip4->ip_len) - sizeof(*ip4),
- IPPROTO_TCP);
- cs = in_pseudo(ip4->ip_src.s_addr, ip4->ip_dst.s_addr,
- htons(cs));
- }
+ case LRO_TYPE_IPV4_TCP:
+ /* Compute full pseudo IPv4 header checsum. */
+ cs = in_addword(ntohs(pa->ip4->ip_len) - sizeof(*pa->ip4), IPPROTO_TCP);
+ cs = in_pseudo(pa->ip4->ip_src.s_addr, pa->ip4->ip_dst.s_addr, htons(cs));
break;
- }
#endif
default:
cs = 0; /* Keep compiler happy. */
+ break;
}
+ /* Complement checksum. */
cs = ~cs;
c += cs;
- /* Remove TCP header csum. */
- cs = ~tcp_lro_csum_th(th);
+ /* Remove TCP header checksum. */
+ cs = ~tcp_lro_rx_csum_tcphdr(pa->tcp);
c += cs;
+
+ /* Compute checksum remainder. */
while (c > 0xffff)
c = (c >> 16) + (c & 0xffff);
- return (c & 0xffff);
+ return (c);
}
static void
@@ -361,83 +562,51 @@ void
tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
{
struct lro_entry *le, *le_tmp;
- struct timeval tv;
+ sbintime_t sbt;
if (LIST_EMPTY(&lc->lro_active))
return;
- getmicrouptime(&tv);
- timevalsub(&tv, timeout);
+ /* get timeout time */
+ sbt = getsbinuptime() - tvtosbt(*timeout);
+
LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) {
- if (timevalcmp(&tv, &le->mtime, >=)) {
+ if (sbt >= le->alloc_time) {
tcp_lro_active_remove(le);
tcp_lro_flush(lc, le);
}
}
}
-#ifdef INET6
-static int
-tcp_lro_rx_ipv6(struct lro_ctrl *lc, struct mbuf *m, struct ip6_hdr *ip6,
- struct tcphdr **th)
-{
-
- /* XXX-BZ we should check the flow-label. */
-
- /* XXX-BZ We do not yet support ext. hdrs. */
- if (ip6->ip6_nxt != IPPROTO_TCP)
- return (TCP_LRO_NOT_SUPPORTED);
-
- /* Find the TCP header. */
- *th = (struct tcphdr *)(ip6 + 1);
-
- return (0);
-}
-#endif
-
#ifdef INET
static int
-tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4,
- struct tcphdr **th)
+tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4)
{
- int csum_flags;
uint16_t csum;
- if (ip4->ip_p != IPPROTO_TCP)
- return (TCP_LRO_NOT_SUPPORTED);
-
- /* Ensure there are no options. */
- if ((ip4->ip_hl << 2) != sizeof (*ip4))
- return (TCP_LRO_CANNOT);
-
- /* .. and the packet is not fragmented. */
- if (ip4->ip_off & htons(IP_MF|IP_OFFMASK))
- return (TCP_LRO_CANNOT);
-
/* Legacy IP has a header checksum that needs to be correct. */
- csum_flags = m->m_pkthdr.csum_flags;
- if (csum_flags & CSUM_IP_CHECKED) {
- if (__predict_false((csum_flags & CSUM_IP_VALID) == 0)) {
+ if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
+ if (__predict_false((m->m_pkthdr.csum_flags & CSUM_IP_VALID) == 0)) {
lc->lro_bad_csum++;
return (TCP_LRO_CANNOT);
}
} else {
csum = in_cksum_hdr(ip4);
- if (__predict_false((csum) != 0)) {
+ if (__predict_false(csum != 0)) {
lc->lro_bad_csum++;
return (TCP_LRO_CANNOT);
}
}
- /* Find the TCP header (we assured there are no IP options). */
- *th = (struct tcphdr *)(ip4 + 1);
return (0);
}
#endif
+#ifdef TCPHPTS
static void
-tcp_lro_log(struct tcpcb *tp, struct lro_ctrl *lc,
- struct lro_entry *le, struct mbuf *m, int frm, int32_t tcp_data_len,
- uint32_t th_seq , uint32_t th_ack, uint16_t th_win)
+tcp_lro_log(struct tcpcb *tp, const struct lro_ctrl *lc,
+ const struct lro_entry *le, const struct mbuf *m,
+ int frm, int32_t tcp_data_len, uint32_t th_seq,
+ uint32_t th_ack, uint16_t th_win)
{
if (tp->t_logstate != TCP_LOG_STATE_OFF) {
union tcp_log_stackspecific log;
@@ -452,13 +621,13 @@ tcp_lro_log(struct tcpcb *tp, struct lro_ctrl *lc,
log.u_bbr.flex2 = m->m_pkthdr.len;
else
log.u_bbr.flex2 = 0;
- log.u_bbr.flex3 = le->append_cnt;
- log.u_bbr.flex4 = le->p_len;
- log.u_bbr.flex5 = le->m_head->m_pkthdr.len;
- log.u_bbr.delRate = le->m_head->m_flags;
- log.u_bbr.rttProp = le->m_head->m_pkthdr.rcv_tstmp;
- log.u_bbr.flex6 = lc->lro_length_lim;
- log.u_bbr.flex7 = lc->lro_ackcnt_lim;
+ log.u_bbr.flex3 = le->m_head->m_pkthdr.lro_nsegs;
+ log.u_bbr.flex4 = le->m_head->m_pkthdr.lro_tcp_d_len;
+ if (le->m_head) {
+ log.u_bbr.flex5 = le->m_head->m_pkthdr.len;
+ log.u_bbr.delRate = le->m_head->m_flags;
+ log.u_bbr.rttProp = le->m_head->m_pkthdr.rcv_tstmp;
+ }
log.u_bbr.inflight = th_seq;
log.u_bbr.timeStamp = cts;
log.u_bbr.epoch = le->next_seq;
@@ -468,9 +637,13 @@ tcp_lro_log(struct tcpcb *tp, struct lro_ctrl *lc,
log.u_bbr.cwnd_gain = le->window;
log.u_bbr.cur_del_rate = (uintptr_t)m;
log.u_bbr.bw_inuse = (uintptr_t)le->m_head;
- log.u_bbr.pkts_out = le->mbuf_cnt; /* Total mbufs added */
- log.u_bbr.applimited = le->ulp_csum;
- log.u_bbr.lost = le->mbuf_appended;
+ log.u_bbr.flex6 = sbttous(lc->lro_last_queue_time);
+ log.u_bbr.flex7 = le->compressed;
+ log.u_bbr.pacing_gain = le->uncompressed;
+ if (in_epoch(net_epoch_preempt))
+ log.u_bbr.inhpts = 1;
+ else
+ log.u_bbr.inhpts = 0;
TCP_LOG_EVENTP(tp, NULL,
&tp->t_inpcb->inp_socket->so_rcv,
&tp->t_inpcb->inp_socket->so_snd,
@@ -478,205 +651,294 @@ tcp_lro_log(struct tcpcb *tp, struct lro_ctrl *lc,
0, &log, false, &tv);
}
}
+#endif
-static void
-tcp_flush_out_le(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, int locked)
+static inline void
+tcp_lro_assign_and_checksum_16(uint16_t *ptr, uint16_t value, uint16_t *psum)
{
- if (le->append_cnt > 1) {
- struct tcphdr *th;
- uint16_t p_len;
+ uint32_t csum;
- p_len = htons(le->p_len);
- switch (le->eh_type) {
-#ifdef INET6
- case ETHERTYPE_IPV6:
- {
- struct ip6_hdr *ip6;
+ csum = 0xffff - *ptr + value;
+ while (csum > 0xffff)
+ csum = (csum >> 16) + (csum & 0xffff);
+ *ptr = value;
+ *psum = csum;
+}
+
+static uint16_t
+tcp_lro_update_checksum(const struct lro_parser *pa, const struct lro_entry *le,
+ uint16_t payload_len, uint16_t delta_sum)
+{
+ uint32_t csum;
+ uint16_t tlen;
+ uint16_t temp[5] = {};
+
+ switch (pa->data.lro_type) {
+ case LRO_TYPE_IPV4_TCP:
+ /* Compute new IPv4 length. */
+ tlen = (pa->ip4->ip_hl << 2) + (pa->tcp->th_off << 2) + payload_len;
+ tcp_lro_assign_and_checksum_16(&pa->ip4->ip_len, htons(tlen), &temp[0]);
+
+ /* Subtract delta from current IPv4 checksum. */
+ csum = pa->ip4->ip_sum + 0xffff - temp[0];
+ while (csum > 0xffff)
+ csum = (csum >> 16) + (csum & 0xffff);
+ tcp_lro_assign_and_checksum_16(&pa->ip4->ip_sum, csum, &temp[1]);
+ goto update_tcp_header;
+
+ case LRO_TYPE_IPV6_TCP:
+ /* Compute new IPv6 length. */
+ tlen = (pa->tcp->th_off << 2) + payload_len;
+ tcp_lro_assign_and_checksum_16(&pa->ip6->ip6_plen, htons(tlen), &temp[0]);
+ goto update_tcp_header;
+
+ case LRO_TYPE_IPV4_UDP:
+ /* Compute new IPv4 length. */
+ tlen = (pa->ip4->ip_hl << 2) + sizeof(*pa->udp) + payload_len;
+ tcp_lro_assign_and_checksum_16(&pa->ip4->ip_len, htons(tlen), &temp[0]);
+
+ /* Subtract delta from current IPv4 checksum. */
+ csum = pa->ip4->ip_sum + 0xffff - temp[0];
+ while (csum > 0xffff)
+ csum = (csum >> 16) + (csum & 0xffff);
+ tcp_lro_assign_and_checksum_16(&pa->ip4->ip_sum, csum, &temp[1]);
+ goto update_udp_header;
+
+ case LRO_TYPE_IPV6_UDP:
+ /* Compute new IPv6 length. */
+ tlen = sizeof(*pa->udp) + payload_len;
+ tcp_lro_assign_and_checksum_16(&pa->ip6->ip6_plen, htons(tlen), &temp[0]);
+ goto update_udp_header;
+
+ default:
+ return (0);
+ }
+
+update_tcp_header:
+ /* Compute current TCP header checksum. */
+ temp[2] = tcp_lro_rx_csum_tcphdr(pa->tcp);
+
+ /* Incorporate the latest ACK into the TCP header. */
+ pa->tcp->th_ack = le->ack_seq;
+ pa->tcp->th_win = le->window;
- ip6 = le->le_ip6;
- ip6->ip6_plen = p_len;
- th = (struct tcphdr *)(ip6 + 1);
+ /* Incorporate latest timestamp into the TCP header. */
+ if (le->timestamp != 0) {
+ uint32_t *ts_ptr;
+
+ ts_ptr = (uint32_t *)(pa->tcp + 1);
+ ts_ptr[1] = htonl(le->tsval);
+ ts_ptr[2] = le->tsecr;
+ }
+
+ /* Compute new TCP header checksum. */
+ temp[3] = tcp_lro_rx_csum_tcphdr(pa->tcp);
+
+ /* Compute new TCP checksum. */
+ csum = pa->tcp->th_sum + 0xffff - delta_sum +
+ 0xffff - temp[0] + 0xffff - temp[3] + temp[2];
+ while (csum > 0xffff)
+ csum = (csum >> 16) + (csum & 0xffff);
+
+ /* Assign new TCP checksum. */
+ tcp_lro_assign_and_checksum_16(&pa->tcp->th_sum, csum, &temp[4]);
+
+ /* Compute all modififications affecting next checksum. */
+ csum = temp[0] + temp[1] + 0xffff - temp[2] +
+ temp[3] + temp[4] + delta_sum;
+ while (csum > 0xffff)
+ csum = (csum >> 16) + (csum & 0xffff);
+
+ /* Return delta checksum to next stage, if any. */
+ return (csum);
+
+update_udp_header:
+ tlen = sizeof(*pa->udp) + payload_len;
+ /* Assign new UDP length and compute checksum delta. */
+ tcp_lro_assign_and_checksum_16(&pa->udp->uh_ulen, htons(tlen), &temp[2]);
+
+ /* Check if there is a UDP checksum. */
+ if (__predict_false(pa->udp->uh_sum != 0)) {
+ /* Compute new UDP checksum. */
+ csum = pa->udp->uh_sum + 0xffff - delta_sum +
+ 0xffff - temp[0] + 0xffff - temp[2];
+ while (csum > 0xffff)
+ csum = (csum >> 16) + (csum & 0xffff);
+ /* Assign new UDP checksum. */
+ tcp_lro_assign_and_checksum_16(&pa->udp->uh_sum, csum, &temp[3]);
+ }
+
+ /* Compute all modififications affecting next checksum. */
+ csum = temp[0] + temp[1] + temp[2] + temp[3] + delta_sum;
+ while (csum > 0xffff)
+ csum = (csum >> 16) + (csum & 0xffff);
+
+ /* Return delta checksum to next stage, if any. */
+ return (csum);
+}
+
+static void
+tcp_flush_out_entry(struct lro_ctrl *lc, struct lro_entry *le)
+{
+ /* Check if we need to recompute any checksums. */
+ if (le->m_head->m_pkthdr.lro_nsegs > 1) {
+ uint16_t csum;
+
+ switch (le->inner.data.lro_type) {
+ case LRO_TYPE_IPV4_TCP:
+ csum = tcp_lro_update_checksum(&le->inner, le,
+ le->m_head->m_pkthdr.lro_tcp_d_len,
+ le->m_head->m_pkthdr.lro_tcp_d_csum);
+ csum = tcp_lro_update_checksum(&le->outer, NULL,
+ le->m_head->m_pkthdr.lro_tcp_d_len +
+ le->inner.total_hdr_len, csum);
le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
- CSUM_PSEUDO_HDR;
- le->p_len += ETHER_HDR_LEN + sizeof(*ip6);
+ CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
+ le->m_head->m_pkthdr.csum_data = 0xffff;
break;
- }
-#endif
-#ifdef INET
- case ETHERTYPE_IP:
- {
- struct ip *ip4;
- uint32_t cl;
- uint16_t c;
-
- ip4 = le->le_ip4;
- /* Fix IP header checksum for new length. */
- c = ~ip4->ip_sum;
- cl = c;
- c = ~ip4->ip_len;
- cl += c + p_len;
- while (cl > 0xffff)
- cl = (cl >> 16) + (cl & 0xffff);
- c = cl;
- ip4->ip_sum = ~c;
- ip4->ip_len = p_len;
- th = (struct tcphdr *)(ip4 + 1);
+ case LRO_TYPE_IPV6_TCP:
+ csum = tcp_lro_update_checksum(&le->inner, le,
+ le->m_head->m_pkthdr.lro_tcp_d_len,
+ le->m_head->m_pkthdr.lro_tcp_d_csum);
+ csum = tcp_lro_update_checksum(&le->outer, NULL,
+ le->m_head->m_pkthdr.lro_tcp_d_len +
+ le->inner.total_hdr_len, csum);
le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
- CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
- le->p_len += ETHER_HDR_LEN;
+ CSUM_PSEUDO_HDR;
+ le->m_head->m_pkthdr.csum_data = 0xffff;
+ break;
+ case LRO_TYPE_NONE:
+ switch (le->outer.data.lro_type) {
+ case LRO_TYPE_IPV4_TCP:
+ csum = tcp_lro_update_checksum(&le->outer, le,
+ le->m_head->m_pkthdr.lro_tcp_d_len,
+ le->m_head->m_pkthdr.lro_tcp_d_csum);
+ le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
+ CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
+ le->m_head->m_pkthdr.csum_data = 0xffff;
+ break;
+ case LRO_TYPE_IPV6_TCP:
+ csum = tcp_lro_update_checksum(&le->outer, le,
+ le->m_head->m_pkthdr.lro_tcp_d_len,
+ le->m_head->m_pkthdr.lro_tcp_d_csum);
+ le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
+ CSUM_PSEUDO_HDR;
+ le->m_head->m_pkthdr.csum_data = 0xffff;
+ break;
+ default:
+ break;
+ }
break;
- }
-#endif
default:
- th = NULL; /* Keep compiler happy. */
- }
- le->m_head->m_pkthdr.csum_data = 0xffff;
- le->m_head->m_pkthdr.len = le->p_len;
-
- /* Incorporate the latest ACK into the TCP header. */
- th->th_ack = le->ack_seq;
- th->th_win = le->window;
- /* Incorporate latest timestamp into the TCP header. */
- if (le->timestamp != 0) {
- uint32_t *ts_ptr;
-
- ts_ptr = (uint32_t *)(th + 1);
- ts_ptr[1] = htonl(le->tsval);
- ts_ptr[2] = le->tsecr;
- }
- /* Update the TCP header checksum. */
- le->ulp_csum += p_len;
- le->ulp_csum += tcp_lro_csum_th(th);
- while (le->ulp_csum > 0xffff)
- le->ulp_csum = (le->ulp_csum >> 16) +
- (le->ulp_csum & 0xffff);
- th->th_sum = (le->ulp_csum & 0xffff);
- th->th_sum = ~th->th_sum;
- if (tp && locked) {
- tcp_lro_log(tp, lc, le, NULL, 7, 0, 0, 0, 0);
+ break;
}
}
+
/*
* Break any chain, this is not set to NULL on the singleton
* case m_nextpkt points to m_head. Other case set them
* m_nextpkt to NULL in push_and_replace.
*/
le->m_head->m_nextpkt = NULL;
- le->m_head->m_pkthdr.lro_nsegs = le->append_cnt;
- if (tp && locked) {
- tcp_lro_log(tp, lc, le, le->m_head, 8, 0, 0, 0, 0);
- }
+ lc->lro_queued += le->m_head->m_pkthdr.lro_nsegs;
(*lc->ifp->if_input)(lc->ifp, le->m_head);
- lc->lro_queued += le->append_cnt;
}
static void
-tcp_set_le_to_m(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m)
+tcp_set_entry_to_mbuf(struct lro_ctrl *lc, struct lro_entry *le,
+ struct mbuf *m, struct tcphdr *th)
{
- struct ether_header *eh;
- void *l3hdr = NULL; /* Keep compiler happy. */
- struct tcphdr *th;
-#ifdef INET6
- struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */
-#endif
-#ifdef INET
- struct ip *ip4 = NULL; /* Keep compiler happy. */
-#endif
uint32_t *ts_ptr;
- int error, l, ts_failed = 0;
uint16_t tcp_data_len;
- uint16_t csum;
+ uint16_t tcp_opt_len;
- error = -1;
- eh = mtod(m, struct ether_header *);
- /*
- * We must reset the other pointers since the mbuf
- * we were pointing too is about to go away.
- */
- switch (le->eh_type) {
-#ifdef INET6
- case ETHERTYPE_IPV6:
- l3hdr = ip6 = (struct ip6_hdr *)(eh + 1);
- error = tcp_lro_rx_ipv6(lc, m, ip6, &th);
- le->le_ip6 = ip6;
- le->source_ip6 = ip6->ip6_src;
- le->dest_ip6 = ip6->ip6_dst;
- le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN - sizeof(*ip6);
- break;
-#endif
-#ifdef INET
- case ETHERTYPE_IP:
- l3hdr = ip4 = (struct ip *)(eh + 1);
- error = tcp_lro_rx_ipv4(lc, m, ip4, &th);
- le->le_ip4 = ip4;
- le->source_ip4 = ip4->ip_src.s_addr;
- le->dest_ip4 = ip4->ip_dst.s_addr;
- le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN;
- break;
-#endif
- }
- KASSERT(error == 0, ("%s: le=%p tcp_lro_rx_xxx failed\n",
- __func__, le));
ts_ptr = (uint32_t *)(th + 1);
- l = (th->th_off << 2);
- l -= sizeof(*th);
- if (l != 0 &&
- (__predict_false(l != TCPOLEN_TSTAMP_APPA) ||
- (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16|
- TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) {
- /* We have failed to find a timestamp some other option? */
- ts_failed = 1;
- }
- if ((l != 0) && (ts_failed == 0)) {
+ tcp_opt_len = (th->th_off << 2);
+ tcp_opt_len -= sizeof(*th);
+
+ /* Check if there is a timestamp option. */
+ if (tcp_opt_len == 0 ||
+ __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
+ *ts_ptr != TCP_LRO_TS_OPTION)) {
+ /* We failed to find the timestamp option. */
+ le->timestamp = 0;
+ } else {
le->timestamp = 1;
le->tsval = ntohl(*(ts_ptr + 1));
le->tsecr = *(ts_ptr + 2);
- } else
- le->timestamp = 0;
- le->source_port = th->th_sport;
- le->dest_port = th->th_dport;
- /* Pull out the csum */
- tcp_data_len = m->m_pkthdr.lro_len;
+ }
+
+ tcp_data_len = m->m_pkthdr.lro_tcp_d_len;
+
+ /* Pull out TCP sequence numbers and window size. */
le->next_seq = ntohl(th->th_seq) + tcp_data_len;
le->ack_seq = th->th_ack;
le->window = th->th_win;
- csum = th->th_sum;
- /* Setup the data pointers */
+
+ /* Setup new data pointers. */
le->m_head = m;
le->m_tail = m_last(m);
- le->append_cnt = 0;
- le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len,
- ~csum);
- le->append_cnt++;
- th->th_sum = csum; /* Restore checksum on first packet. */
}
static void
-tcp_push_and_replace(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m, int locked)
+tcp_push_and_replace(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m)
{
+ struct lro_parser *pa;
+
/*
- * Push up the stack the current le and replace
- * it with m.
+ * Push up the stack of the current entry
+ * and replace it with "m".
*/
struct mbuf *msave;
/* Grab off the next and save it */
msave = le->m_head->m_nextpkt;
le->m_head->m_nextpkt = NULL;
- /* Now push out the old le entry */
- tcp_flush_out_le(tp, lc, le, locked);
+
+ /* Now push out the old entry */
+ tcp_flush_out_entry(lc, le);
+
+ /* Re-parse new header, should not fail. */
+ pa = tcp_lro_parser(m, &le->outer, &le->inner, false);
+ KASSERT(pa != NULL,
+ ("tcp_push_and_replace: LRO parser failed on m=%p\n", m));
+
/*
- * Now to replace the data properly in the le
- * we have to reset the tcp header and
+ * Now to replace the data properly in the entry
+ * we have to reset the TCP header and
* other fields.
*/
- tcp_set_le_to_m(lc, le, m);
+ tcp_set_entry_to_mbuf(lc, le, m, pa->tcp);
+
/* Restore the next list */
m->m_nextpkt = msave;
}
static void
-tcp_lro_condense(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, int locked)
+tcp_lro_mbuf_append_pkthdr(struct mbuf *m, const struct mbuf *p)
+{
+ uint32_t csum;
+
+ if (m->m_pkthdr.lro_nsegs == 1) {
+ /* Compute relative checksum. */
+ csum = p->m_pkthdr.lro_tcp_d_csum;
+ } else {
+ /* Merge TCP data checksums. */
+ csum = (uint32_t)m->m_pkthdr.lro_tcp_d_csum +
+ (uint32_t)p->m_pkthdr.lro_tcp_d_csum;
+ while (csum > 0xffff)
+ csum = (csum >> 16) + (csum & 0xffff);
+ }
+
+ /* Update various counters. */
+ m->m_pkthdr.len += p->m_pkthdr.lro_tcp_d_len;
+ m->m_pkthdr.lro_tcp_d_csum = csum;
+ m->m_pkthdr.lro_tcp_d_len += p->m_pkthdr.lro_tcp_d_len;
+ m->m_pkthdr.lro_nsegs += p->m_pkthdr.lro_nsegs;
+}
+
+static void
+tcp_lro_condense(struct lro_ctrl *lc, struct lro_entry *le)
{
/*
* Walk through the mbuf chain we
@@ -686,8 +948,10 @@ tcp_lro_condense(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, in
uint32_t *ts_ptr;
struct mbuf *m;
struct tcphdr *th;
- uint16_t tcp_data_len, csum_upd;
- int l;
+ uint32_t tcp_data_len_total;
+ uint32_t tcp_data_seg_total;
+ uint16_t tcp_data_len;
+ uint16_t tcp_opt_len;
/*
* First we must check the lead (m_head)
@@ -696,27 +960,25 @@ tcp_lro_condense(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, in
* right away (sack etc).
*/
again:
-
m = le->m_head->m_nextpkt;
if (m == NULL) {
- /* Just the one left */
+ /* Just one left. */
return;
}
- th = tcp_lro_get_th(le, le->m_head);
- KASSERT(th != NULL,
- ("le:%p m:%p th comes back NULL?", le, le->m_head));
- l = (th->th_off << 2);
- l -= sizeof(*th);
+
+ th = tcp_lro_get_th(m);
+ tcp_opt_len = (th->th_off << 2);
+ tcp_opt_len -= sizeof(*th);
ts_ptr = (uint32_t *)(th + 1);
- if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) ||
- (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16|
- TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) {
+
+ if (tcp_opt_len != 0 && __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
+ *ts_ptr != TCP_LRO_TS_OPTION)) {
/*
* Its not the timestamp. We can't
* use this guy as the head.
*/
le->m_head->m_nextpkt = m->m_nextpkt;
- tcp_push_and_replace(tp, lc, le, m, locked);
+ tcp_push_and_replace(lc, le, m);
goto again;
}
if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) {
@@ -725,7 +987,7 @@ again:
* before this segment, e.g. FIN.
*/
le->m_head->m_nextpkt = m->m_nextpkt;
- tcp_push_and_replace(tp, lc, le, m, locked);
+ tcp_push_and_replace(lc, le, m);
goto again;
}
while((m = le->m_head->m_nextpkt) != NULL) {
@@ -736,34 +998,23 @@ again:
le->m_head->m_nextpkt = m->m_nextpkt;
m->m_nextpkt = NULL;
/* Setup my data */
- tcp_data_len = m->m_pkthdr.lro_len;
- th = tcp_lro_get_th(le, m);
- KASSERT(th != NULL,
- ("le:%p m:%p th comes back NULL?", le, m));
+ tcp_data_len = m->m_pkthdr.lro_tcp_d_len;
+ th = tcp_lro_get_th(m);
ts_ptr = (uint32_t *)(th + 1);
- l = (th->th_off << 2);
- l -= sizeof(*th);
- if (tp && locked) {
- tcp_lro_log(tp, lc, le, m, 1, 0, 0, 0, 0);
- }
- if (le->append_cnt >= lc->lro_ackcnt_lim) {
- if (tp && locked) {
- tcp_lro_log(tp, lc, le, m, 2, 0, 0, 0, 0);
- }
- tcp_push_and_replace(tp, lc, le, m, locked);
- goto again;
- }
- if (le->p_len > (lc->lro_length_lim - tcp_data_len)) {
+ tcp_opt_len = (th->th_off << 2);
+ tcp_opt_len -= sizeof(*th);
+ tcp_data_len_total = le->m_head->m_pkthdr.lro_tcp_d_len + tcp_data_len;
+ tcp_data_seg_total = le->m_head->m_pkthdr.lro_nsegs + m->m_pkthdr.lro_nsegs;
+
+ if (tcp_data_seg_total >= lc->lro_ackcnt_lim ||
+ tcp_data_len_total >= lc->lro_length_lim) {
/* Flush now if appending will result in overflow. */
- if (tp && locked) {
- tcp_lro_log(tp, lc, le, m, 3, tcp_data_len, 0, 0, 0);
- }
- tcp_push_and_replace(tp, lc, le, m, locked);
+ tcp_push_and_replace(lc, le, m);
goto again;
}
- if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) ||
- (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16|
- TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) {
+ if (tcp_opt_len != 0 &&
+ __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
+ *ts_ptr != TCP_LRO_TS_OPTION)) {
/*
* Maybe a sack in the new one? We need to
* start all over after flushing the
@@ -771,18 +1022,18 @@ again:
* and flush it (calling the replace again possibly
* or just returning).
*/
- tcp_push_and_replace(tp, lc, le, m, locked);
+ tcp_push_and_replace(lc, le, m);
goto again;
}
if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) {
- tcp_push_and_replace(tp, lc, le, m, locked);
+ tcp_push_and_replace(lc, le, m);
goto again;
}
- if (l != 0) {
+ if (tcp_opt_len != 0) {
uint32_t tsval = ntohl(*(ts_ptr + 1));
/* Make sure timestamp values are increasing. */
if (TSTMP_GT(le->tsval, tsval)) {
- tcp_push_and_replace(tp, lc, le, m, locked);
+ tcp_push_and_replace(lc, le, m);
goto again;
}
le->tsval = tsval;
@@ -794,51 +1045,32 @@ again:
le->ack_seq == th->th_ack &&
le->window == th->th_win))) {
/* Out of order packet or duplicate ACK. */
- if (tp && locked) {
- tcp_lro_log(tp, lc, le, m, 4, tcp_data_len,
- ntohl(th->th_seq),
- th->th_ack,
- th->th_win);
- }
- tcp_push_and_replace(tp, lc, le, m, locked);
+ tcp_push_and_replace(lc, le, m);
goto again;
}
- if (tcp_data_len || SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq))) {
+ if (tcp_data_len != 0 ||
+ SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq))) {
le->next_seq += tcp_data_len;
le->ack_seq = th->th_ack;
le->window = th->th_win;
} else if (th->th_ack == le->ack_seq) {
le->window = WIN_MAX(le->window, th->th_win);
}
- csum_upd = m->m_pkthdr.lro_csum;
- le->ulp_csum += csum_upd;
+
if (tcp_data_len == 0) {
- le->append_cnt++;
- le->mbuf_cnt--;
- if (tp && locked) {
- tcp_lro_log(tp, lc, le, m, 5, tcp_data_len,
- ntohl(th->th_seq),
- th->th_ack,
- th->th_win);
- }
m_freem(m);
continue;
}
- le->append_cnt++;
- le->mbuf_appended++;
- le->p_len += tcp_data_len;
+
+ /* Merge TCP data checksum and length to head mbuf. */
+ tcp_lro_mbuf_append_pkthdr(le->m_head, m);
+
/*
* Adjust the mbuf so that m_data points to the first byte of
* the ULP payload. Adjust the mbuf to avoid complications and
* append new segment to existing mbuf chain.
*/
m_adj(m, m->m_pkthdr.len - tcp_data_len);
- if (tp && locked) {
- tcp_lro_log(tp, lc, le, m, 6, tcp_data_len,
- ntohl(th->th_seq),
- th->th_ack,
- th->th_win);
- }
m_demote_pkthdr(m);
le->m_tail->m_next = m;
le->m_tail = m_last(m);
@@ -847,8 +1079,9 @@ again:
#ifdef TCPHPTS
static void
-tcp_queue_pkts(struct tcpcb *tp, struct lro_entry *le)
+tcp_queue_pkts(struct inpcb *inp, struct tcpcb *tp, struct lro_entry *le)
{
+ INP_WLOCK_ASSERT(inp);
if (tp->t_in_pkt == NULL) {
/* Nothing yet there */
tp->t_in_pkt = le->m_head;
@@ -861,128 +1094,250 @@ tcp_queue_pkts(struct tcpcb *tp, struct lro_entry *le)
le->m_head = NULL;
le->m_last_mbuf = NULL;
}
-#endif
-void
-tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
+static struct mbuf *
+tcp_lro_get_last_if_ackcmp(struct lro_ctrl *lc, struct lro_entry *le,
+ struct inpcb *inp, int32_t *new_m)
{
- struct tcpcb *tp = NULL;
- int locked = 0;
-#ifdef TCPHPTS
- struct inpcb *inp = NULL;
- int need_wakeup = 0, can_queue = 0;
- struct epoch_tracker et;
+ struct tcpcb *tp;
+ struct mbuf *m;
- /* Now lets lookup the inp first */
- CURVNET_SET(lc->ifp->if_vnet);
- /*
- * XXXRRS Currently the common input handler for
- * mbuf queuing cannot handle VLAN Tagged. This needs
- * to be fixed and the or condition removed (i.e. the
- * common code should do the right lookup for the vlan
- * tag and anything else that the vlan_input() does).
- */
- if ((tcplro_stacks_wanting_mbufq == 0) || (le->m_head->m_flags & M_VLANTAG))
- goto skip_lookup;
- NET_EPOCH_ENTER(et);
- switch (le->eh_type) {
+ tp = intotcpcb(inp);
+ if (__predict_false(tp == NULL))
+ return (NULL);
+
+ /* Look at the last mbuf if any in queue */
+ m = tp->t_tail_pkt;
+ if (m != NULL && (m->m_flags & M_ACKCMP) != 0) {
+ if (M_TRAILINGSPACE(m) >= sizeof(struct tcp_ackent)) {
+ tcp_lro_log(tp, lc, le, NULL, 23, 0, 0, 0, 0);
+ *new_m = 0;
+ counter_u64_add(tcp_extra_mbuf, 1);
+ return (m);
+ } else {
+ /* Mark we ran out of space */
+ inp->inp_flags2 |= INP_MBUF_L_ACKS;
+ }
+ }
+ /* Decide mbuf size. */
+ if (inp->inp_flags2 & INP_MBUF_L_ACKS)
+ m = m_getcl(M_NOWAIT, MT_DATA, M_ACKCMP | M_PKTHDR);
+ else
+ m = m_gethdr(M_NOWAIT, MT_DATA);
+
+ if (__predict_false(m == NULL)) {
+ counter_u64_add(tcp_would_have_but, 1);
+ return (NULL);
+ }
+ counter_u64_add(tcp_comp_total, 1);
+ m->m_flags |= M_ACKCMP;
+ *new_m = 1;
+ return (m);
+}
+
+static struct inpcb *
+tcp_lro_lookup(struct ifnet *ifp, struct lro_parser *pa)
+{
+ struct inpcb *inp;
+
+ NET_EPOCH_ASSERT();
+
+ switch (pa->data.lro_type) {
#ifdef INET6
- case ETHERTYPE_IPV6:
- inp = in6_pcblookup(&V_tcbinfo, &le->source_ip6,
- le->source_port, &le->dest_ip6,le->dest_port,
- INPLOOKUP_WLOCKPCB,
- lc->ifp);
+ case LRO_TYPE_IPV6_TCP:
+ inp = in6_pcblookup(&V_tcbinfo,
+ &pa->data.s_addr.v6,
+ pa->data.s_port,
+ &pa->data.d_addr.v6,
+ pa->data.d_port,
+ INPLOOKUP_WLOCKPCB,
+ ifp);
break;
#endif
#ifdef INET
- case ETHERTYPE_IP:
- inp = in_pcblookup(&V_tcbinfo, le->le_ip4->ip_src,
- le->source_port, le->le_ip4->ip_dst, le->dest_port,
- INPLOOKUP_WLOCKPCB,
- lc->ifp);
+ case LRO_TYPE_IPV4_TCP:
+ inp = in_pcblookup(&V_tcbinfo,
+ pa->data.s_addr.v4,
+ pa->data.s_port,
+ pa->data.d_addr.v4,
+ pa->data.d_port,
+ INPLOOKUP_WLOCKPCB,
+ ifp);
break;
#endif
- }
- NET_EPOCH_EXIT(et);
- if (inp && ((inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)) ||
- (inp->inp_flags2 & INP_FREED))) {
- /* We don't want this guy */
- INP_WUNLOCK(inp);
+ default:
inp = NULL;
+ break;
}
- if (inp && (inp->inp_flags2 & INP_SUPPORTS_MBUFQ)) {
- /* The transport supports mbuf queuing */
- can_queue = 1;
- if (le->need_wakeup ||
- ((inp->inp_in_input == 0) &&
- ((inp->inp_flags2 & INP_MBUF_QUEUE_READY) == 0))) {
- /*
- * Either the transport is off on a keep-alive
- * (it has the queue_ready flag clear and its
- * not already been woken) or the entry has
- * some urgent thing (FIN or possibly SACK blocks).
- * This means we need to wake the transport up by
- * putting it on the input pacer.
- */
- need_wakeup = 1;
- if ((inp->inp_flags2 & INP_DONT_SACK_QUEUE) &&
- (le->need_wakeup != 1)) {
- /*
- * Prohibited from a sack wakeup.
- */
- need_wakeup = 0;
- }
+ return (inp);
+}
+
+static inline bool
+tcp_lro_ack_valid(struct mbuf *m, struct tcphdr *th, uint32_t **ppts, bool *other_opts)
+{
+ /*
+ * This function returns two bits of valuable information.
+ * a) Is what is present capable of being ack-compressed,
+ * we can ack-compress if there is no options or just
+ * a timestamp option, and of course the th_flags must
+ * be correct as well.
+ * b) Our other options present such as SACK. This is
+ * used to determine if we want to wakeup or not.
+ */
+ bool ret = true;
+
+ switch (th->th_off << 2) {
+ case (sizeof(*th) + TCPOLEN_TSTAMP_APPA):
+ *ppts = (uint32_t *)(th + 1);
+ /* Check if we have only one timestamp option. */
+ if (**ppts == TCP_LRO_TS_OPTION)
+ *other_opts = false;
+ else {
+ *other_opts = true;
+ ret = false;
}
- /* Do we need to be awoken due to lots of data or acks? */
- if ((le->tcp_tot_p_len >= lc->lro_length_lim) ||
- (le->mbuf_cnt >= lc->lro_ackcnt_lim))
- need_wakeup = 1;
+ break;
+ case (sizeof(*th)):
+ /* No options. */
+ *ppts = NULL;
+ *other_opts = false;
+ break;
+ default:
+ *ppts = NULL;
+ *other_opts = true;
+ ret = false;
+ break;
}
- if (inp) {
- tp = intotcpcb(inp);
- locked = 1;
- } else
- tp = NULL;
- if (can_queue) {
- counter_u64_add(tcp_inp_lro_direct_queue, 1);
- tcp_lro_log(tp, lc, le, NULL, 22, need_wakeup,
- inp->inp_flags2, inp->inp_in_input, le->need_wakeup);
- tcp_queue_pkts(tp, le);
- if (need_wakeup) {
- /*
- * We must get the guy to wakeup via
- * hpts.
- */
- counter_u64_add(tcp_inp_lro_wokeup_queue, 1);
- if (le->need_wakeup)
- counter_u64_add(tcp_inp_lro_sack_wake, 1);
- tcp_queue_to_input(inp);
- }
+ /* For ACKCMP we only accept ACK, PUSH, ECE and CWR. */
+ if ((th->th_flags & ~(TH_ACK | TH_PUSH | TH_ECE | TH_CWR)) != 0)
+ ret = false;
+ /* If it has data on it we cannot compress it */
+ if (m->m_pkthdr.lro_tcp_d_len)
+ ret = false;
+
+ /* ACK flag must be set. */
+ if (!(th->th_flags & TH_ACK))
+ ret = false;
+ return (ret);
+}
+
+static int
+tcp_lro_flush_tcphpts(struct lro_ctrl *lc, struct lro_entry *le)
+{
+ struct inpcb *inp;
+ struct tcpcb *tp;
+ struct mbuf **pp, *cmp, *mv_to;
+ bool bpf_req, should_wake;
+
+ /* Check if packet doesn't belongs to our network interface. */
+ if ((tcplro_stacks_wanting_mbufq == 0) ||
+ (le->outer.data.vlan_id != 0) ||
+ (le->inner.data.lro_type != LRO_TYPE_NONE))
+ return (TCP_LRO_CANNOT);
+
+#ifdef INET6
+ /*
+ * Be proactive about unspecified IPv6 address in source. As
+ * we use all-zero to indicate unbounded/unconnected pcb,
+ * unspecified IPv6 address can be used to confuse us.
+ *
+ * Note that packets with unspecified IPv6 destination is
+ * already dropped in ip6_input.
+ */
+ if (__predict_false(le->outer.data.lro_type == LRO_TYPE_IPV6_TCP &&
+ IN6_IS_ADDR_UNSPECIFIED(&le->outer.data.s_addr.v6)))
+ return (TCP_LRO_CANNOT);
+
+ if (__predict_false(le->inner.data.lro_type == LRO_TYPE_IPV6_TCP &&
+ IN6_IS_ADDR_UNSPECIFIED(&le->inner.data.s_addr.v6)))
+ return (TCP_LRO_CANNOT);
+#endif
+ /* Lookup inp, if any. */
+ inp = tcp_lro_lookup(lc->ifp,
+ (le->inner.data.lro_type == LRO_TYPE_NONE) ? &le->outer : &le->inner);
+ if (inp == NULL)
+ return (TCP_LRO_CANNOT);
+
+ counter_u64_add(tcp_inp_lro_locks_taken, 1);
+
+ /* Get TCP control structure. */
+ tp = intotcpcb(inp);
+
+ /* Check if the inp is dead, Jim. */
+ if (tp == NULL ||
+ (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) ||
+ (inp->inp_flags2 & INP_FREED)) {
+ INP_WUNLOCK(inp);
+ return (TCP_LRO_CANNOT);
}
- if (inp && (hold_lock_over_compress == 0)) {
- /* Unlock it */
- locked = 0;
- tp = NULL;
- counter_u64_add(tcp_inp_lro_locks_taken, 1);
+
+ /* Check if the transport doesn't support the needed optimizations. */
+ if ((inp->inp_flags2 & (INP_SUPPORTS_MBUFQ | INP_MBUF_ACKCMP)) == 0) {
INP_WUNLOCK(inp);
+ return (TCP_LRO_CANNOT);
}
- if (can_queue == 0) {
-skip_lookup:
-#endif /* TCPHPTS */
- /* Old fashioned lro method */
- if (le->m_head != le->m_last_mbuf) {
- counter_u64_add(tcp_inp_lro_compressed, 1);
- tcp_lro_condense(tp, lc, le, locked);
- } else
- counter_u64_add(tcp_inp_lro_single_push, 1);
- tcp_flush_out_le(tp, lc, le, locked);
-#ifdef TCPHPTS
+
+ if (inp->inp_flags2 & INP_MBUF_QUEUE_READY)
+ should_wake = false;
+ else
+ should_wake = true;
+ /* Check if packets should be tapped to BPF. */
+ bpf_req = bpf_peers_present(lc->ifp->if_bpf);
+
+ /* Strip and compress all the incoming packets. */
+ cmp = NULL;
+ for (pp = &le->m_head; *pp != NULL; ) {
+ mv_to = NULL;
+ if (do_bpf_strip_and_compress(inp, lc, le, pp,
+ &cmp, &mv_to, &should_wake, bpf_req ) == false) {
+ /* Advance to next mbuf. */
+ pp = &(*pp)->m_nextpkt;
+ } else if (mv_to != NULL) {
+ /* We are asked to move pp up */
+ pp = &mv_to->m_nextpkt;
+ }
}
- if (inp && locked) {
- counter_u64_add(tcp_inp_lro_locks_taken, 1);
- INP_WUNLOCK(inp);
+ /* Update "m_last_mbuf", if any. */
+ if (pp == &le->m_head)
+ le->m_last_mbuf = *pp;
+ else
+ le->m_last_mbuf = __containerof(pp, struct mbuf, m_nextpkt);
+
+ /* Check if any data mbufs left. */
+ if (le->m_head != NULL) {
+ counter_u64_add(tcp_inp_lro_direct_queue, 1);
+ tcp_lro_log(tp, lc, le, NULL, 22, 1,
+ inp->inp_flags2, inp->inp_in_input, 1);
+ tcp_queue_pkts(inp, tp, le);
+ }
+ if (should_wake) {
+ /* Wakeup */
+ counter_u64_add(tcp_inp_lro_wokeup_queue, 1);
+ if ((*tp->t_fb->tfb_do_queued_segments)(inp->inp_socket, tp, 0))
+ inp = NULL;
}
+ if (inp != NULL)
+ INP_WUNLOCK(inp);
+ return (0); /* Success. */
+}
+#endif
+
+void
+tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
+{
+ /* Only optimise if there are multiple packets waiting. */
+#ifdef TCPHPTS
+ int error;
+
+ CURVNET_SET(lc->ifp->if_vnet);
+ error = tcp_lro_flush_tcphpts(lc, le);
CURVNET_RESTORE();
+ if (error != 0) {
+#endif
+ tcp_lro_condense(lc, le);
+ tcp_flush_out_entry(lc, le);
+#ifdef TCPHPTS
+ }
#endif
lc->lro_flushed++;
bzero(le, sizeof(*le));
@@ -1088,6 +1443,11 @@ tcp_lro_flush_all(struct lro_ctrl *lc)
if (lc->lro_mbuf_count == 0)
goto done;
+ CURVNET_SET(lc->ifp->if_vnet);
+
+ /* get current time */
+ lc->lro_last_queue_time = getsbinuptime();
+
/* sort all mbufs according to stream */
tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count);
@@ -1111,13 +1471,14 @@ tcp_lro_flush_all(struct lro_ctrl *lc)
}
/* add packet to LRO engine */
- if (tcp_lro_rx2(lc, mb, 0, 0) != 0) {
+ if (tcp_lro_rx_common(lc, mb, 0, false) != 0) {
/* input packet to network layer */
(*lc->ifp->if_input)(lc->ifp, mb);
lc->lro_queued++;
lc->lro_flushed++;
}
}
+ CURVNET_RESTORE();
done:
/* flush active streams */
tcp_lro_rx_done(lc);
@@ -1125,213 +1486,324 @@ done:
lc->lro_mbuf_count = 0;
}
+#ifdef TCPHPTS
static void
-lro_set_mtime(struct timeval *tv, struct timespec *ts)
+build_ack_entry(struct tcp_ackent *ae, struct tcphdr *th, struct mbuf *m,
+ uint32_t *ts_ptr, uint16_t iptos)
{
- tv->tv_sec = ts->tv_sec;
- tv->tv_usec = ts->tv_nsec / 1000;
+ /*
+ * Given a TCP ACK, summarize it down into the small TCP ACK
+ * entry.
+ */
+ ae->timestamp = m->m_pkthdr.rcv_tstmp;
+ if (m->m_flags & M_TSTMP_LRO)
+ ae->flags = TSTMP_LRO;
+ else if (m->m_flags & M_TSTMP)
+ ae->flags = TSTMP_HDWR;
+ ae->seq = ntohl(th->th_seq);
+ ae->ack = ntohl(th->th_ack);
+ ae->flags |= th->th_flags;
+ if (ts_ptr != NULL) {
+ ae->ts_value = ntohl(ts_ptr[1]);
+ ae->ts_echo = ntohl(ts_ptr[2]);
+ ae->flags |= HAS_TSTMP;
+ }
+ ae->win = ntohs(th->th_win);
+ ae->codepoint = iptos;
}
-static int
-tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, int use_hash)
+/*
+ * Do BPF tap for either ACK_CMP packets or MBUF QUEUE type packets
+ * and strip all, but the IPv4/IPv6 header.
+ */
+static bool
+do_bpf_strip_and_compress(struct inpcb *inp, struct lro_ctrl *lc,
+ struct lro_entry *le, struct mbuf **pp, struct mbuf **cmp, struct mbuf **mv_to,
+ bool *should_wake, bool bpf_req)
{
- struct lro_entry *le;
- struct ether_header *eh;
-#ifdef INET6
- struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */
-#endif
-#ifdef INET
- struct ip *ip4 = NULL; /* Keep compiler happy. */
-#endif
+ union {
+ void *ptr;
+ struct ip *ip4;
+ struct ip6_hdr *ip6;
+ } l3;
+ struct mbuf *m;
+ struct mbuf *nm;
struct tcphdr *th;
- void *l3hdr = NULL; /* Keep compiler happy. */
+ struct tcp_ackent *ack_ent;
uint32_t *ts_ptr;
- tcp_seq seq;
- int error, ip_len, l;
- uint16_t eh_type, tcp_data_len, need_flush;
- struct lro_head *bucket;
- struct timespec arrv;
+ int32_t n_mbuf;
+ bool other_opts, can_compress;
+ uint16_t lro_type;
+ uint16_t iptos;
+ int tcp_hdr_offset;
+ int idx;
+
+ /* Get current mbuf. */
+ m = *pp;
+
+ /* Let the BPF see the packet */
+ if (__predict_false(bpf_req))
+ ETHER_BPF_MTAP(lc->ifp, m);
+
+ tcp_hdr_offset = m->m_pkthdr.lro_tcp_h_off;
+ lro_type = le->inner.data.lro_type;
+ switch (lro_type) {
+ case LRO_TYPE_NONE:
+ lro_type = le->outer.data.lro_type;
+ switch (lro_type) {
+ case LRO_TYPE_IPV4_TCP:
+ tcp_hdr_offset -= sizeof(*le->outer.ip4);
+ m->m_pkthdr.lro_etype = ETHERTYPE_IP;
+ break;
+ case LRO_TYPE_IPV6_TCP:
+ tcp_hdr_offset -= sizeof(*le->outer.ip6);
+ m->m_pkthdr.lro_etype = ETHERTYPE_IPV6;
+ break;
+ default:
+ goto compressed;
+ }
+ break;
+ case LRO_TYPE_IPV4_TCP:
+ tcp_hdr_offset -= sizeof(*le->outer.ip4);
+ m->m_pkthdr.lro_etype = ETHERTYPE_IP;
+ break;
+ case LRO_TYPE_IPV6_TCP:
+ tcp_hdr_offset -= sizeof(*le->outer.ip6);
+ m->m_pkthdr.lro_etype = ETHERTYPE_IPV6;
+ break;
+ default:
+ goto compressed;
+ }
- /* We expect a contiguous header [eh, ip, tcp]. */
- if ((m->m_flags & (M_TSTMP_LRO|M_TSTMP)) == 0) {
- /* If no hardware or arrival stamp on the packet add arrival */
- nanouptime(&arrv);
- m->m_pkthdr.rcv_tstmp = (arrv.tv_sec * 1000000000) + arrv.tv_nsec;
- m->m_flags |= M_TSTMP_LRO;
+ MPASS(tcp_hdr_offset >= 0);
+
+ m_adj(m, tcp_hdr_offset);
+ m->m_flags |= M_LRO_EHDRSTRP;
+ m->m_flags &= ~M_ACKCMP;
+ m->m_pkthdr.lro_tcp_h_off -= tcp_hdr_offset;
+
+ th = tcp_lro_get_th(m);
+
+ th->th_sum = 0; /* TCP checksum is valid. */
+
+ /* Check if ACK can be compressed */
+ can_compress = tcp_lro_ack_valid(m, th, &ts_ptr, &other_opts);
+
+ /* Now lets look at the should wake states */
+ if ((other_opts == true) &&
+ ((inp->inp_flags2 & INP_DONT_SACK_QUEUE) == 0)) {
+ /*
+ * If there are other options (SACK?) and the
+ * tcp endpoint has not expressly told us it does
+ * not care about SACKS, then we should wake up.
+ */
+ *should_wake = true;
}
- eh = mtod(m, struct ether_header *);
- eh_type = ntohs(eh->ether_type);
- switch (eh_type) {
-#ifdef INET6
- case ETHERTYPE_IPV6:
- {
- CURVNET_SET(lc->ifp->if_vnet);
- if (V_ip6_forwarding != 0) {
- /* XXX-BZ stats but changing lro_ctrl is a problem. */
- CURVNET_RESTORE();
- return (TCP_LRO_CANNOT);
- }
- CURVNET_RESTORE();
- l3hdr = ip6 = (struct ip6_hdr *)(eh + 1);
- error = tcp_lro_rx_ipv6(lc, m, ip6, &th);
- if (error != 0)
- return (error);
- tcp_data_len = ntohs(ip6->ip6_plen);
- ip_len = sizeof(*ip6) + tcp_data_len;
+ /* Is the ack compressable? */
+ if (can_compress == false)
+ goto done;
+ /* Does the TCP endpoint support ACK compression? */
+ if ((inp->inp_flags2 & INP_MBUF_ACKCMP) == 0)
+ goto done;
+
+ /* Lets get the TOS/traffic class field */
+ l3.ptr = mtod(m, void *);
+ switch (lro_type) {
+ case LRO_TYPE_IPV4_TCP:
+ iptos = l3.ip4->ip_tos;
break;
+ case LRO_TYPE_IPV6_TCP:
+ iptos = IPV6_TRAFFIC_CLASS(l3.ip6);
+ break;
+ default:
+ iptos = 0; /* Keep compiler happy. */
+ break;
+ }
+ /* Now lets get space if we don't have some already */
+ if (*cmp == NULL) {
+new_one:
+ nm = tcp_lro_get_last_if_ackcmp(lc, le, inp, &n_mbuf);
+ if (__predict_false(nm == NULL))
+ goto done;
+ *cmp = nm;
+ if (n_mbuf) {
+ /*
+ * Link in the new cmp ack to our in-order place,
+ * first set our cmp ack's next to where we are.
+ */
+ nm->m_nextpkt = m;
+ (*pp) = nm;
+ /*
+ * Set it up so mv_to is advanced to our
+ * compressed ack. This way the caller can
+ * advance pp to the right place.
+ */
+ *mv_to = nm;
+ /*
+ * Advance it here locally as well.
+ */
+ pp = &nm->m_nextpkt;
+ }
+ } else {
+ /* We have one already we are working on */
+ nm = *cmp;
+ if (M_TRAILINGSPACE(nm) < sizeof(struct tcp_ackent)) {
+ /* We ran out of space */
+ inp->inp_flags2 |= INP_MBUF_L_ACKS;
+ goto new_one;
+ }
}
+ MPASS(M_TRAILINGSPACE(nm) >= sizeof(struct tcp_ackent));
+ counter_u64_add(tcp_inp_lro_compressed, 1);
+ le->compressed++;
+ /* We can add in to the one on the tail */
+ ack_ent = mtod(nm, struct tcp_ackent *);
+ idx = (nm->m_len / sizeof(struct tcp_ackent));
+ build_ack_entry(&ack_ent[idx], th, m, ts_ptr, iptos);
+
+ /* Bump the size of both pkt-hdr and len */
+ nm->m_len += sizeof(struct tcp_ackent);
+ nm->m_pkthdr.len += sizeof(struct tcp_ackent);
+compressed:
+ /* Advance to next mbuf before freeing. */
+ *pp = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ m_freem(m);
+ return (true);
+done:
+ counter_u64_add(tcp_uncomp_total, 1);
+ le->uncompressed++;
+ return (false);
+}
#endif
+
+static struct lro_head *
+tcp_lro_rx_get_bucket(struct lro_ctrl *lc, struct mbuf *m, struct lro_parser *parser)
+{
+ u_long hash;
+
+ if (M_HASHTYPE_ISHASH(m)) {
+ hash = m->m_pkthdr.flowid;
+ } else {
+ for (unsigned i = hash = 0; i != LRO_RAW_ADDRESS_MAX; i++)
+ hash += parser->data.raw[i];
+ }
+ return (&lc->lro_hash[hash % lc->lro_hashsz]);
+}
+
+static int
+tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, bool use_hash)
+{
+ struct lro_parser pi; /* inner address data */
+ struct lro_parser po; /* outer address data */
+ struct lro_parser *pa; /* current parser for TCP stream */
+ struct lro_entry *le;
+ struct lro_head *bucket;
+ struct tcphdr *th;
+ int tcp_data_len;
+ int tcp_opt_len;
+ int error;
+ uint16_t tcp_data_sum;
+
#ifdef INET
- case ETHERTYPE_IP:
- {
- CURVNET_SET(lc->ifp->if_vnet);
- if (V_ipforwarding != 0) {
- /* XXX-BZ stats but changing lro_ctrl is a problem. */
- CURVNET_RESTORE();
- return (TCP_LRO_CANNOT);
- }
- CURVNET_RESTORE();
- l3hdr = ip4 = (struct ip *)(eh + 1);
- error = tcp_lro_rx_ipv4(lc, m, ip4, &th);
- if (error != 0)
+ /* Quickly decide if packet cannot be LRO'ed */
+ if (__predict_false(V_ipforwarding != 0))
+ return (TCP_LRO_CANNOT);
+#endif
+#ifdef INET6
+ /* Quickly decide if packet cannot be LRO'ed */
+ if (__predict_false(V_ip6_forwarding != 0))
+ return (TCP_LRO_CANNOT);
+#endif
+
+ /* We expect a contiguous header [eh, ip, tcp]. */
+ pa = tcp_lro_parser(m, &po, &pi, true);
+ if (__predict_false(pa == NULL))
+ return (TCP_LRO_NOT_SUPPORTED);
+
+ /* We don't expect any padding. */
+ error = tcp_lro_trim_mbuf_chain(m, pa);
+ if (__predict_false(error != 0))
+ return (error);
+
+#ifdef INET
+ switch (pa->data.lro_type) {
+ case LRO_TYPE_IPV4_TCP:
+ error = tcp_lro_rx_ipv4(lc, m, pa->ip4);
+ if (__predict_false(error != 0))
return (error);
- ip_len = ntohs(ip4->ip_len);
- tcp_data_len = ip_len - sizeof(*ip4);
+ break;
+ default:
break;
}
#endif
- /* XXX-BZ what happens in case of VLAN(s)? */
- default:
- return (TCP_LRO_NOT_SUPPORTED);
+ /* If no hardware or arrival stamp on the packet add timestamp */
+ if ((m->m_flags & (M_TSTMP_LRO | M_TSTMP)) == 0) {
+ m->m_pkthdr.rcv_tstmp = sbttons(lc->lro_last_queue_time);
+ m->m_flags |= M_TSTMP_LRO;
}
- /*
- * If the frame is padded beyond the end of the IP packet, then we must
- * trim the extra bytes off.
- */
- l = m->m_pkthdr.len - (ETHER_HDR_LEN + ip_len);
- if (l != 0) {
- if (l < 0)
- /* Truncated packet. */
- return (TCP_LRO_CANNOT);
+ /* Get pointer to TCP header. */
+ th = pa->tcp;
- m_adj(m, -l);
- }
- /*
- * Check TCP header constraints.
- */
- if (th->th_flags & TH_SYN)
+ /* Don't process SYN packets. */
+ if (__predict_false(th->th_flags & TH_SYN))
+ return (TCP_LRO_CANNOT);
+
+ /* Get total TCP header length and compute payload length. */
+ tcp_opt_len = (th->th_off << 2);
+ tcp_data_len = m->m_pkthdr.len - ((uint8_t *)th -
+ (uint8_t *)m->m_data) - tcp_opt_len;
+ tcp_opt_len -= sizeof(*th);
+
+ /* Don't process invalid TCP headers. */
+ if (__predict_false(tcp_opt_len < 0 || tcp_data_len < 0))
return (TCP_LRO_CANNOT);
- if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
- need_flush = 1;
+
+ /* Compute TCP data only checksum. */
+ if (tcp_data_len == 0)
+ tcp_data_sum = 0; /* no data, no checksum */
+ else if (__predict_false(csum != 0))
+ tcp_data_sum = tcp_lro_rx_csum_data(pa, ~csum);
else
- need_flush = 0;
- l = (th->th_off << 2);
- ts_ptr = (uint32_t *)(th + 1);
- tcp_data_len -= l;
- l -= sizeof(*th);
- if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) ||
- (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16|
- TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) {
- /*
- * We have an option besides Timestamps, maybe
- * it is a sack (most likely) which means we
- * will probably need to wake up a sleeper (if
- * the guy does queueing).
- */
- need_flush = 2;
- }
+ tcp_data_sum = tcp_lro_rx_csum_data(pa, ~th->th_sum);
- /* If the driver did not pass in the checksum, set it now. */
- if (csum == 0x0000)
- csum = th->th_sum;
- seq = ntohl(th->th_seq);
+ /* Save TCP info in mbuf. */
+ m->m_nextpkt = NULL;
+ m->m_pkthdr.rcvif = lc->ifp;
+ m->m_pkthdr.lro_tcp_d_csum = tcp_data_sum;
+ m->m_pkthdr.lro_tcp_d_len = tcp_data_len;
+ m->m_pkthdr.lro_tcp_h_off = ((uint8_t *)th - (uint8_t *)m->m_data);
+ m->m_pkthdr.lro_nsegs = 1;
+
+ /* Get hash bucket. */
if (!use_hash) {
bucket = &lc->lro_hash[0];
- } else if (M_HASHTYPE_ISHASH(m)) {
- bucket = &lc->lro_hash[m->m_pkthdr.flowid % lc->lro_hashsz];
} else {
- uint32_t hash;
-
- switch (eh_type) {
-#ifdef INET
- case ETHERTYPE_IP:
- hash = ip4->ip_src.s_addr + ip4->ip_dst.s_addr;
- break;
-#endif
-#ifdef INET6
- case ETHERTYPE_IPV6:
- hash = ip6->ip6_src.s6_addr32[0] +
- ip6->ip6_dst.s6_addr32[0];
- hash += ip6->ip6_src.s6_addr32[1] +
- ip6->ip6_dst.s6_addr32[1];
- hash += ip6->ip6_src.s6_addr32[2] +
- ip6->ip6_dst.s6_addr32[2];
- hash += ip6->ip6_src.s6_addr32[3] +
- ip6->ip6_dst.s6_addr32[3];
- break;
-#endif
- default:
- hash = 0;
- break;
- }
- hash += th->th_sport + th->th_dport;
- bucket = &lc->lro_hash[hash % lc->lro_hashsz];
+ bucket = tcp_lro_rx_get_bucket(lc, m, pa);
}
/* Try to find a matching previous segment. */
LIST_FOREACH(le, bucket, hash_next) {
- if (le->eh_type != eh_type)
- continue;
- if (le->source_port != th->th_sport ||
- le->dest_port != th->th_dport)
+ /* Compare addresses and ports. */
+ if (lro_address_compare(&po.data, &le->outer.data) == false ||
+ lro_address_compare(&pi.data, &le->inner.data) == false)
continue;
- switch (eh_type) {
-#ifdef INET6
- case ETHERTYPE_IPV6:
- if (bcmp(&le->source_ip6, &ip6->ip6_src,
- sizeof(struct in6_addr)) != 0 ||
- bcmp(&le->dest_ip6, &ip6->ip6_dst,
- sizeof(struct in6_addr)) != 0)
- continue;
- break;
-#endif
-#ifdef INET
- case ETHERTYPE_IP:
- if (le->source_ip4 != ip4->ip_src.s_addr ||
- le->dest_ip4 != ip4->ip_dst.s_addr)
- continue;
- break;
-#endif
- }
- if (tcp_data_len || SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq)) ||
- (th->th_ack == le->ack_seq)) {
- m->m_pkthdr.lro_len = tcp_data_len;
- } else {
- /* no data and old ack */
+
+ /* Check if no data and old ACK. */
+ if (tcp_data_len == 0 &&
+ SEQ_LT(ntohl(th->th_ack), ntohl(le->ack_seq))) {
m_freem(m);
return (0);
}
- if (need_flush)
- le->need_wakeup = need_flush;
- /* Save of the data only csum */
- m->m_pkthdr.rcvif = lc->ifp;
- m->m_pkthdr.lro_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th,
- tcp_data_len, ~csum);
- th->th_sum = csum; /* Restore checksum */
- /* Save off the tail I am appending too (prev) */
- le->m_prev_last = le->m_last_mbuf;
- /* Mark me in the last spot */
+
+ /* Mark "m" in the last spot. */
le->m_last_mbuf->m_nextpkt = m;
- /* Now set the tail to me */
+ /* Now set the tail to "m". */
le->m_last_mbuf = m;
- le->mbuf_cnt++;
- m->m_nextpkt = NULL;
- /* Add to the total size of data */
- le->tcp_tot_p_len += tcp_data_len;
- lro_set_mtime(&le->mtime, &arrv);
return (0);
}
+
/* Try to find an empty slot. */
if (LIST_EMPTY(&lc->lro_free))
return (TCP_LRO_NO_ENTRIES);
@@ -1340,79 +1812,40 @@ tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, int use_hash)
le = LIST_FIRST(&lc->lro_free);
LIST_REMOVE(le, next);
tcp_lro_active_insert(lc, bucket, le);
- lro_set_mtime(&le->mtime, &arrv);
- /* Start filling in details. */
- switch (eh_type) {
-#ifdef INET6
- case ETHERTYPE_IPV6:
- le->le_ip6 = ip6;
- le->source_ip6 = ip6->ip6_src;
- le->dest_ip6 = ip6->ip6_dst;
- le->eh_type = eh_type;
- le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN - sizeof(*ip6);
- break;
-#endif
-#ifdef INET
- case ETHERTYPE_IP:
- le->le_ip4 = ip4;
- le->source_ip4 = ip4->ip_src.s_addr;
- le->dest_ip4 = ip4->ip_dst.s_addr;
- le->eh_type = eh_type;
- le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN;
- break;
-#endif
- }
- le->source_port = th->th_sport;
- le->dest_port = th->th_dport;
- le->next_seq = seq + tcp_data_len;
- le->ack_seq = th->th_ack;
- le->window = th->th_win;
- if (l != 0) {
- le->timestamp = 1;
- le->tsval = ntohl(*(ts_ptr + 1));
- le->tsecr = *(ts_ptr + 2);
- }
- KASSERT(le->ulp_csum == 0, ("%s: le=%p le->ulp_csum=0x%04x\n",
- __func__, le, le->ulp_csum));
-
- le->append_cnt = 0;
- le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len,
- ~csum);
- le->append_cnt++;
- th->th_sum = csum; /* Restore checksum */
- le->m_head = m;
- m->m_pkthdr.rcvif = lc->ifp;
- le->mbuf_cnt = 1;
- if (need_flush)
- le->need_wakeup = need_flush;
- else
- le->need_wakeup = 0;
- le->m_tail = m_last(m);
+ /* Make sure the headers are set. */
+ le->inner = pi;
+ le->outer = po;
+
+ /* Store time this entry was allocated. */
+ le->alloc_time = lc->lro_last_queue_time;
+
+ tcp_set_entry_to_mbuf(lc, le, m, th);
+
+ /* Now set the tail to "m". */
le->m_last_mbuf = m;
- m->m_nextpkt = NULL;
- le->m_prev_last = NULL;
- /*
- * We keep the total size here for cross checking when we may need
- * to flush/wakeup in the MBUF_QUEUE case.
- */
- le->tcp_tot_p_len = tcp_data_len;
- m->m_pkthdr.lro_len = tcp_data_len;
+
return (0);
}
int
tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
{
+ int error;
- return tcp_lro_rx2(lc, m, csum, 1);
+ /* get current time */
+ lc->lro_last_queue_time = getsbinuptime();
+
+ CURVNET_SET(lc->ifp->if_vnet);
+ error = tcp_lro_rx_common(lc, m, csum, true);
+ CURVNET_RESTORE();
+
+ return (error);
}
void
tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb)
{
- struct timespec arrv;
-
/* sanity checks */
if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL ||
lc->lro_mbuf_max == 0)) {
@@ -1428,15 +1861,7 @@ tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb)
(*lc->ifp->if_input) (lc->ifp, mb);
return;
}
- /* Arrival Stamp the packet */
-
- if ((mb->m_flags & M_TSTMP) == 0) {
- /* If no hardware or arrival stamp on the packet add arrival */
- nanouptime(&arrv);
- mb->m_pkthdr.rcv_tstmp = ((arrv.tv_sec * 1000000000) +
- arrv.tv_nsec);
- mb->m_flags |= M_TSTMP_LRO;
- }
+
/* create sequence number */
lc->lro_mbuf_data[lc->lro_mbuf_count].seq =
(((uint64_t)M_HASHTYPE_GET(mb)) << 56) |
diff --git a/sys/netinet/tcp_lro.h b/sys/netinet/tcp_lro.h
index f2c05ad4aec7..d2220a626b81 100644
--- a/sys/netinet/tcp_lro.h
+++ b/sys/netinet/tcp_lro.h
@@ -3,7 +3,7 @@
*
* Copyright (c) 2006, Myricom Inc.
* Copyright (c) 2008, Intel Corporation.
- * Copyright (c) 2016 Mellanox Technologies.
+ * Copyright (c) 2016-2021 Mellanox Technologies.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -40,59 +40,110 @@
#define TCP_LRO_ENTRIES 8
#endif
+/*
+ * Flags for ACK entry for compression
+ * the bottom 8 bits has the th_flags.
+ * LRO itself adds only the TSTMP flags
+ * to indicate if either of the types
+ * of timestamps are filled and the
+ * HAS_TSTMP option to indicate if the
+ * TCP timestamp option is valid.
+ *
+ * The other 5 flag bits are for processing
+ * by a stack.
+ *
+ */
+#define TSTMP_LRO 0x0100
+#define TSTMP_HDWR 0x0200
+#define HAS_TSTMP 0x0400
+
+struct inpcb;
+
+union lro_address {
+ u_long raw[1];
+ struct {
+ uint16_t lro_type; /* internal */
+#define LRO_TYPE_NONE 0
+#define LRO_TYPE_IPV4_TCP 1
+#define LRO_TYPE_IPV6_TCP 2
+#define LRO_TYPE_IPV4_UDP 3
+#define LRO_TYPE_IPV6_UDP 4
+ uint16_t vlan_id; /* VLAN identifier */
+ uint16_t s_port; /* source TCP/UDP port */
+ uint16_t d_port; /* destination TCP/UDP port */
+ uint32_t vxlan_vni; /* VXLAN virtual network identifier */
+ union {
+#ifdef INET
+ struct in_addr v4;
+#endif
+#ifdef INET6
+ struct in6_addr v6;
+#endif
+ } s_addr; /* source IPv4/IPv6 address */
+ union {
+#ifdef INET
+ struct in_addr v4;
+#endif
+#ifdef INET6
+ struct in6_addr v6;
+#endif
+ } d_addr; /* destination IPv4/IPv6 address */
+ };
+} __aligned(sizeof(u_long));
+
+#define LRO_RAW_ADDRESS_MAX \
+ (sizeof(union lro_address) / sizeof(u_long))
+
+/* Optimize address comparison by comparing one unsigned long at a time: */
+
+static inline bool
+lro_address_compare(const union lro_address *pa, const union lro_address *pb)
+{
+ if (pa->lro_type == LRO_TYPE_NONE && pb->lro_type == LRO_TYPE_NONE) {
+ return (true);
+ } else for (unsigned i = 0; i < LRO_RAW_ADDRESS_MAX; i++) {
+ if (pa->raw[i] != pb->raw[i])
+ return (false);
+ }
+ return (true);
+}
+
+struct lro_parser {
+ union lro_address data;
+ union {
+ uint8_t *l3;
+ struct ip *ip4;
+ struct ip6_hdr *ip6;
+ };
+ union {
+ uint8_t *l4;
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ };
+ uint16_t total_hdr_len;
+};
+
+/* This structure is zeroed frequently, try to keep it small. */
struct lro_entry {
LIST_ENTRY(lro_entry) next;
LIST_ENTRY(lro_entry) hash_next;
struct mbuf *m_head;
struct mbuf *m_tail;
struct mbuf *m_last_mbuf;
- struct mbuf *m_prev_last;
- union {
- struct ip *ip4;
- struct ip6_hdr *ip6;
- } leip;
- union {
- in_addr_t s_ip4;
- struct in6_addr s_ip6;
- } lesource;
- union {
- in_addr_t d_ip4;
- struct in6_addr d_ip6;
- } ledest;
- uint16_t source_port;
- uint16_t dest_port;
- uint16_t eh_type; /* EthernetHeader type. */
- uint16_t append_cnt;
- uint32_t p_len; /* IP header payload length. */
- uint32_t ulp_csum; /* TCP, etc. checksum. */
+ struct lro_parser outer;
+ struct lro_parser inner;
uint32_t next_seq; /* tcp_seq */
uint32_t ack_seq; /* tcp_seq */
uint32_t tsval;
uint32_t tsecr;
- uint32_t tcp_tot_p_len; /* TCP payload length of chain */
+ uint16_t compressed;
+ uint16_t uncompressed;
uint16_t window;
uint16_t timestamp; /* flag, not a TCP hdr field. */
- uint16_t need_wakeup;
- uint16_t mbuf_cnt; /* Count of mbufs collected see note */
- uint16_t mbuf_appended;
- struct timeval mtime;
+ sbintime_t alloc_time; /* time when entry was allocated */
};
-/*
- * Note: The mbuf_cnt field tracks our number of mbufs added to the m_next
- * list. Each mbuf counted can have data and of course it will
- * have an ack as well (by defintion any inbound tcp segment will
- * have an ack value. We use this count to tell us how many ACK's
- * are present for our ack-count threshold. If we exceed that or
- * the data threshold we will wake up the endpoint.
- */
-LIST_HEAD(lro_head, lro_entry);
-#define le_ip4 leip.ip4
-#define le_ip6 leip.ip6
-#define source_ip4 lesource.s_ip4
-#define dest_ip4 ledest.d_ip4
-#define source_ip6 lesource.s_ip6
-#define dest_ip6 ledest.d_ip6
+LIST_HEAD(lro_head, lro_entry);
struct lro_mbuf_sort {
uint64_t seq;
@@ -103,6 +154,7 @@ struct lro_mbuf_sort {
struct lro_ctrl {
struct ifnet *ifp;
struct lro_mbuf_sort *lro_mbuf_data;
+ sbintime_t lro_last_queue_time; /* last time data was queued */
uint64_t lro_queued;
uint64_t lro_flushed;
uint64_t lro_bad_csum;
@@ -118,7 +170,24 @@ struct lro_ctrl {
struct lro_head lro_free;
};
-#define TCP_LRO_LENGTH_MAX 65535
+struct tcp_ackent {
+ uint64_t timestamp; /* hardware or sofware timestamp, valid if TSTMP_LRO or TSTMP_HDRW set */
+ uint32_t seq; /* th_seq value */
+ uint32_t ack; /* th_ack value */
+ uint32_t ts_value; /* If ts option value, valid if HAS_TSTMP is set */
+ uint32_t ts_echo; /* If ts option echo, valid if HAS_TSTMP is set */
+ uint16_t win; /* TCP window */
+ uint16_t flags; /* Flags to say if TS is present and type of timestamp and th_flags */
+ uint8_t codepoint; /* IP level codepoint including ECN bits */
+ uint8_t ack_val_set; /* Classification of ack used by the stack */
+ uint8_t pad[2]; /* To 32 byte boundary */
+};
+
+/* We use two M_PROTO on the mbuf */
+#define M_ACKCMP M_PROTO4 /* Indicates LRO is sending in a Ack-compression mbuf */
+#define M_LRO_EHDRSTRP M_PROTO6 /* Indicates that LRO has stripped the etherenet header */
+
+#define TCP_LRO_LENGTH_MAX (65535 - 255) /* safe value with room for outer headers */
#define TCP_LRO_ACKCNT_MAX 65535 /* unlimited */
int tcp_lro_init(struct lro_ctrl *);
diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
index c49ff680d201..dff7767cd9cf 100644
--- a/sys/netinet/tcp_subr.c
+++ b/sys/netinet/tcp_subr.c
@@ -1233,9 +1233,7 @@ tcp_init(void)
tcp_inp_lro_direct_queue = counter_u64_alloc(M_WAITOK);
tcp_inp_lro_wokeup_queue = counter_u64_alloc(M_WAITOK);
tcp_inp_lro_compressed = counter_u64_alloc(M_WAITOK);
- tcp_inp_lro_single_push = counter_u64_alloc(M_WAITOK);
tcp_inp_lro_locks_taken = counter_u64_alloc(M_WAITOK);
- tcp_inp_lro_sack_wake = counter_u64_alloc(M_WAITOK);
#ifdef TCPPCAP
tcp_pcap_init();
#endif
diff --git a/sys/netinet/tcp_var.h b/sys/netinet/tcp_var.h
index 48762e231065..c9b337b3c3d8 100644
--- a/sys/netinet/tcp_var.h
+++ b/sys/netinet/tcp_var.h
@@ -983,9 +983,7 @@ int tcp_default_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb
extern counter_u64_t tcp_inp_lro_direct_queue;
extern counter_u64_t tcp_inp_lro_wokeup_queue;
extern counter_u64_t tcp_inp_lro_compressed;
-extern counter_u64_t tcp_inp_lro_single_push;
extern counter_u64_t tcp_inp_lro_locks_taken;
-extern counter_u64_t tcp_inp_lro_sack_wake;
#ifdef NETFLIX_EXP_DETECTION
/* Various SACK attack thresholds */
diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h
index 9c18ebd4943d..2ed7f6788c39 100644
--- a/sys/sys/mbuf.h
+++ b/sys/sys/mbuf.h
@@ -203,8 +203,10 @@ struct pkthdr {
#define tso_segsz PH_per.sixteen[1] /* inbound after LRO */
#define lro_nsegs tso_segsz /* inbound after LRO */
#define csum_data PH_per.thirtytwo[1] /* inbound from hardware up */
-#define lro_len PH_loc.sixteen[0] /* inbound during LRO (no reassembly) */
-#define lro_csum PH_loc.sixteen[1] /* inbound during LRO (no reassembly) */
+#define lro_tcp_d_len PH_loc.sixteen[0] /* inbound during LRO (no reassembly) */
+#define lro_tcp_d_csum PH_loc.sixteen[1] /* inbound during LRO (no reassembly) */
+#define lro_tcp_h_off PH_loc.sixteen[2] /* inbound during LRO (no reassembly) */
+#define lro_etype PH_loc.sixteen[3] /* inbound during LRO (no reassembly) */
/* Note PH_loc is used during IP reassembly (all 8 bytes as a ptr) */
/*
diff --git a/sys/sys/param.h b/sys/sys/param.h
index 3d1ab448eae4..127c04becf58 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -60,7 +60,7 @@
* in the range 5 to 9.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 1300505 /* Master, propagated to newvers */
+#define __FreeBSD_version 1300506 /* Master, propagated to newvers */
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,