aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Tuexen <tuexen@FreeBSD.org>2018-12-20 16:05:30 +0000
committerMichael Tuexen <tuexen@FreeBSD.org>2018-12-20 16:05:30 +0000
commit09423f72fd97e9ccac645b87ca58135170167bb8 (patch)
treefba7595fa469719023d58d374914ebbf0b3ad9aa
parenteacff37a611fc5ac2d9c6639bde7a835412d3004 (diff)
downloadsrc-09423f72fd97e9ccac645b87ca58135170167bb8.tar.gz
src-09423f72fd97e9ccac645b87ca58135170167bb8.zip
Fix a regression in the TCP handling of received segments.
When receiving TCP segments the stack protects itself by limiting the resources allocated for a TCP connections. This patch adds an exception to these limitations for the TCP segement which is the next expected in-sequence segment. Without this patch, TCP connections may stall and finally fail in some cases of packet loss. Reported by: jhb@ Reviewed by: jtl@, rrs@ MFC after: 3 days Sponsored by: Netflix, Inc. Differential Revision: https://reviews.freebsd.org/D18580
Notes
Notes: svn path=/head/; revision=342280
-rw-r--r--sys/netinet/tcp_reass.c26
1 files changed, 22 insertions, 4 deletions
diff --git a/sys/netinet/tcp_reass.c b/sys/netinet/tcp_reass.c
index 12b717bc9549..e59565478e8d 100644
--- a/sys/netinet/tcp_reass.c
+++ b/sys/netinet/tcp_reass.c
@@ -579,7 +579,8 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
*/
lenofoh = tcp_reass_overhead_of_chain(m, &mlast);
sb = &tp->t_inpcb->inp_socket->so_rcv;
- if ((sb->sb_mbcnt + tp->t_segqmbuflen + lenofoh) > sb->sb_mbmax) {
+ if ((th->th_seq != tp->rcv_nxt || !TCPS_HAVEESTABLISHED(tp->t_state)) &&
+ (sb->sb_mbcnt + tp->t_segqmbuflen + lenofoh) > sb->sb_mbmax) {
/* No room */
TCPSTAT_INC(tcps_rcvreassfull);
#ifdef TCP_REASS_COUNTERS
@@ -588,6 +589,11 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
#ifdef TCP_REASS_LOGGING
tcp_log_reassm(tp, NULL, NULL, th->th_seq, lenofoh, TCP_R_LOG_LIMIT_REACHED, 0);
#endif
+ if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
+ log(LOG_DEBUG, "%s; %s: mbuf count limit reached, "
+ "segment dropped\n", s, __func__);
+ free(s, M_TCPLOG);
+ }
m_freem(m);
*tlenp = 0;
#ifdef TCP_REASS_LOGGING
@@ -936,6 +942,20 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
* is understood.
*/
new_entry:
+ if (th->th_seq == tp->rcv_nxt && TCPS_HAVEESTABLISHED(tp->t_state)) {
+ tp->rcv_nxt += *tlenp;
+ flags = th->th_flags & TH_FIN;
+ TCPSTAT_INC(tcps_rcvoopack);
+ TCPSTAT_ADD(tcps_rcvoobyte, *tlenp);
+ SOCKBUF_LOCK(&so->so_rcv);
+ if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ m_freem(m);
+ } else {
+ sbappendstream_locked(&so->so_rcv, m, 0);
+ }
+ sorwakeup_locked(so);
+ return (flags);
+ }
if (tcp_new_limits) {
if ((tp->t_segqlen > tcp_reass_queue_guard) &&
(*tlenp < MSIZE)) {
@@ -960,9 +980,7 @@ new_entry:
return (0);
}
} else {
-
- if ((th->th_seq != tp->rcv_nxt || !TCPS_HAVEESTABLISHED(tp->t_state)) &&
- tp->t_segqlen >= min((so->so_rcv.sb_hiwat / tp->t_maxseg) + 1,
+ if (tp->t_segqlen >= min((so->so_rcv.sb_hiwat / tp->t_maxseg) + 1,
tcp_reass_maxqueuelen)) {
TCPSTAT_INC(tcps_rcvreassfull);
*tlenp = 0;