aboutsummaryrefslogtreecommitdiff
path: root/sys/netinet/tcp_stacks
diff options
context:
space:
mode:
Diffstat (limited to 'sys/netinet/tcp_stacks')
-rw-r--r--sys/netinet/tcp_stacks/bbr.c98
-rw-r--r--sys/netinet/tcp_stacks/rack.c342
-rw-r--r--sys/netinet/tcp_stacks/rack_bbr_common.c16
-rw-r--r--sys/netinet/tcp_stacks/rack_bbr_common.h4
-rw-r--r--sys/netinet/tcp_stacks/rack_pcm.c14
-rw-r--r--sys/netinet/tcp_stacks/sack_filter.c8
-rw-r--r--sys/netinet/tcp_stacks/sack_filter.h2
-rw-r--r--sys/netinet/tcp_stacks/tailq_hash.c2
-rw-r--r--sys/netinet/tcp_stacks/tcp_bbr.h2
-rw-r--r--sys/netinet/tcp_stacks/tcp_rack.h3
10 files changed, 243 insertions, 248 deletions
diff --git a/sys/netinet/tcp_stacks/bbr.c b/sys/netinet/tcp_stacks/bbr.c
index 17a0744961ce..f2d7867df9b4 100644
--- a/sys/netinet/tcp_stacks/bbr.c
+++ b/sys/netinet/tcp_stacks/bbr.c
@@ -78,8 +78,6 @@
#include <netinet/in_kdtrace.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
-#include <netinet/ip_icmp.h> /* required for icmp_var.h */
-#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
#include <netinet6/in6_pcb.h>
@@ -2173,7 +2171,7 @@ bbr_log_rtt_sample(struct tcp_bbr *bbr, uint32_t rtt, uint32_t tsin)
log.u_bbr.flex3 = bbr->r_ctl.rc_ack_hdwr_delay;
log.u_bbr.flex4 = bbr->rc_tp->ts_offset;
log.u_bbr.flex5 = bbr->r_ctl.rc_target_at_state;
- log.u_bbr.pkts_out = tcp_tv_to_mssectick(&bbr->rc_tv);
+ log.u_bbr.pkts_out = tcp_tv_to_msec(&bbr->rc_tv);
log.u_bbr.flex6 = tsin;
log.u_bbr.flex7 = 0;
log.u_bbr.flex8 = bbr->rc_ack_was_delayed;
@@ -2241,13 +2239,13 @@ bbr_log_ack_event(struct tcp_bbr *bbr, struct tcphdr *th, struct tcpopt *to, uin
mbuf_tstmp2timespec(m, &ts);
tv.tv_sec = ts.tv_sec;
tv.tv_usec = ts.tv_nsec / 1000;
- log.u_bbr.lt_epoch = tcp_tv_to_usectick(&tv);
+ log.u_bbr.lt_epoch = tcp_tv_to_usec(&tv);
} else {
log.u_bbr.lt_epoch = 0;
}
if (m->m_flags & M_TSTMP_LRO) {
mbuf_tstmp2timeval(m, &tv);
- log.u_bbr.flex5 = tcp_tv_to_usectick(&tv);
+ log.u_bbr.flex5 = tcp_tv_to_usec(&tv);
} else {
/* No arrival timestamp */
log.u_bbr.flex5 = 0;
@@ -5126,8 +5124,8 @@ bbr_timeout_rxt(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
tp->t_maxseg = tp->t_pmtud_saved_maxseg;
if (tp->t_maxseg < V_tcp_mssdflt) {
/*
- * The MSS is so small we should not
- * process incoming SACK's since we are
+ * The MSS is so small we should not
+ * process incoming SACK's since we are
* subject to attack in such a case.
*/
tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT;
@@ -6792,7 +6790,7 @@ bbr_update_rtt(struct tcpcb *tp, struct tcp_bbr *bbr,
(ack_type == BBR_CUM_ACKED) &&
(to->to_flags & TOF_TS) &&
(to->to_tsecr != 0)) {
- t = tcp_tv_to_mssectick(&bbr->rc_tv) - to->to_tsecr;
+ t = tcp_tv_to_msec(&bbr->rc_tv) - to->to_tsecr;
if (t < 1)
t = 1;
t *= MS_IN_USEC;
@@ -7330,7 +7328,7 @@ bbr_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th,
uint32_t ts, now, rtt;
ts = bbr_ts_convert(to->to_tsecr);
- now = bbr_ts_convert(tcp_tv_to_mssectick(&bbr->rc_tv));
+ now = bbr_ts_convert(tcp_tv_to_msec(&bbr->rc_tv));
rtt = now - ts;
if (rtt < 1)
rtt = 1;
@@ -7863,7 +7861,7 @@ nothing_left:
/* tcp_close will kill the inp pre-log the Reset */
tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
tp = tcp_close(tp);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
BBR_STAT_INC(bbr_dropped_af_data);
return (1);
}
@@ -8461,7 +8459,7 @@ bbr_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
}
if ((to->to_flags & TOF_TS) != 0 &&
SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
- tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
+ tp->ts_recent_age = tcp_tv_to_msec(&bbr->rc_tv);
tp->ts_recent = to->to_tsval;
}
/*
@@ -8763,7 +8761,7 @@ bbr_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
(SEQ_LEQ(th->th_ack, tp->iss) ||
SEQ_GT(th->th_ack, tp->snd_max))) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
}
if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
@@ -8893,7 +8891,7 @@ bbr_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
if ((to->to_flags & TOF_TS) != 0) {
uint32_t t, rtt;
- t = tcp_tv_to_mssectick(&bbr->rc_tv);
+ t = tcp_tv_to_msec(&bbr->rc_tv);
if (TSTMP_GEQ(t, to->to_tsecr)) {
rtt = t - to->to_tsecr;
if (rtt == 0) {
@@ -8965,7 +8963,7 @@ bbr_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
(SEQ_LEQ(th->th_ack, tp->snd_una) ||
SEQ_GT(th->th_ack, tp->snd_max))) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
}
if (tp->t_flags & TF_FASTOPEN) {
@@ -8977,7 +8975,7 @@ bbr_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
} else if (thflags & TH_SYN) {
/* non-initial SYN is ignored */
@@ -9010,7 +9008,7 @@ bbr_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
if (SEQ_LT(th->th_seq, tp->irs)) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
}
if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
@@ -9034,7 +9032,7 @@ bbr_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
((thflags & (TH_SYN | TH_FIN)) != 0))) {
- tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
+ tp->ts_recent_age = tcp_tv_to_msec(&bbr->rc_tv);
tp->ts_recent = to->to_tsval;
}
tp->snd_wnd = tiwin;
@@ -9067,7 +9065,7 @@ bbr_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
if ((to->to_flags & TOF_TS) != 0) {
uint32_t t, rtt;
- t = tcp_tv_to_mssectick(&bbr->rc_tv);
+ t = tcp_tv_to_msec(&bbr->rc_tv);
if (TSTMP_GEQ(t, to->to_tsecr)) {
rtt = t - to->to_tsecr;
if (rtt == 0) {
@@ -9258,7 +9256,7 @@ bbr_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
((thflags & (TH_SYN | TH_FIN)) != 0))) {
- tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
+ tp->ts_recent_age = tcp_tv_to_msec(&bbr->rc_tv);
tp->ts_recent = to->to_tsval;
}
/*
@@ -9288,7 +9286,7 @@ bbr_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (sbavail(&so->so_snd)) {
if (ctf_progress_timeout_check(tp, true)) {
bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -9355,7 +9353,7 @@ bbr_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
((thflags & (TH_SYN | TH_FIN)) != 0))) {
- tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
+ tp->ts_recent_age = tcp_tv_to_msec(&bbr->rc_tv);
tp->ts_recent = to->to_tsval;
}
/*
@@ -9385,7 +9383,7 @@ bbr_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (sbavail(&so->so_snd)) {
if (ctf_progress_timeout_check(tp, true)) {
bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -9405,7 +9403,7 @@ close_now:
tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
tp = tcp_close(tp);
KMOD_TCPSTAT_INC(tcps_rcvafterclose);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
+ ctf_do_dropwithreset(m, tp, th, *tlen);
return (1);
}
if (sbavail(&so->so_snd) == 0)
@@ -9486,7 +9484,7 @@ bbr_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
((thflags & (TH_SYN | TH_FIN)) != 0))) {
- tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
+ tp->ts_recent_age = tcp_tv_to_msec(&bbr->rc_tv);
tp->ts_recent = to->to_tsval;
}
/*
@@ -9535,7 +9533,7 @@ bbr_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (sbavail(&so->so_snd)) {
if (ctf_progress_timeout_check(tp, true)) {
bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -9602,7 +9600,7 @@ bbr_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
((thflags & (TH_SYN | TH_FIN)) != 0))) {
- tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
+ tp->ts_recent_age = tcp_tv_to_msec(&bbr->rc_tv);
tp->ts_recent = to->to_tsval;
}
/*
@@ -9637,7 +9635,7 @@ bbr_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (sbavail(&so->so_snd)) {
if (ctf_progress_timeout_check(tp, true)) {
bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -9704,7 +9702,7 @@ bbr_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
((thflags & (TH_SYN | TH_FIN)) != 0))) {
- tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
+ tp->ts_recent_age = tcp_tv_to_msec(&bbr->rc_tv);
tp->ts_recent = to->to_tsval;
}
/*
@@ -9739,7 +9737,7 @@ bbr_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (sbavail(&so->so_snd)) {
if (ctf_progress_timeout_check(tp, true)) {
bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -9818,7 +9816,7 @@ bbr_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
((thflags & (TH_SYN | TH_FIN)) != 0))) {
- tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
+ tp->ts_recent_age = tcp_tv_to_msec(&bbr->rc_tv);
tp->ts_recent = to->to_tsval;
}
/*
@@ -9848,7 +9846,7 @@ bbr_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (sbavail(&so->so_snd)) {
if (ctf_progress_timeout_check(tp, true)) {
bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -10141,7 +10139,7 @@ bbr_init(struct tcpcb *tp, void **ptr)
* flags.
*/
bbr_stop_all_timers(tp, bbr);
- /*
+ /*
* Validate the timers are not in usec, if they are convert.
* BBR should in theory move to USEC and get rid of a
* lot of the TICKS_2 calls.. but for now we stay
@@ -10150,7 +10148,7 @@ bbr_init(struct tcpcb *tp, void **ptr)
tcp_change_time_units(tp, TCP_TMR_GRANULARITY_TICKS);
TCPT_RANGESET(tp->t_rxtcur,
((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
- tp->t_rttmin, TCPTV_REXMTMAX);
+ tp->t_rttmin, tcp_rexmit_max);
bbr_start_hpts_timer(bbr, tp, cts, 5, 0, 0);
return (0);
}
@@ -11327,7 +11325,7 @@ bbr_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
mbuf_tstmp2timespec(m, &ts);
bbr->rc_tv.tv_sec = ts.tv_sec;
bbr->rc_tv.tv_usec = ts.tv_nsec / 1000;
- bbr->r_ctl.rc_rcvtime = cts = tcp_tv_to_usectick(&bbr->rc_tv);
+ bbr->r_ctl.rc_rcvtime = cts = tcp_tv_to_usec(&bbr->rc_tv);
} else if (m->m_flags & M_TSTMP_LRO) {
/* Next the arrival timestamp */
struct timespec ts;
@@ -11335,7 +11333,7 @@ bbr_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
mbuf_tstmp2timespec(m, &ts);
bbr->rc_tv.tv_sec = ts.tv_sec;
bbr->rc_tv.tv_usec = ts.tv_nsec / 1000;
- bbr->r_ctl.rc_rcvtime = cts = tcp_tv_to_usectick(&bbr->rc_tv);
+ bbr->r_ctl.rc_rcvtime = cts = tcp_tv_to_usec(&bbr->rc_tv);
} else {
/*
* Ok just get the current time.
@@ -11376,7 +11374,7 @@ bbr_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
*/
if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
to.to_tsecr -= tp->ts_offset;
- if (TSTMP_GT(to.to_tsecr, tcp_tv_to_mssectick(&bbr->rc_tv)))
+ if (TSTMP_GT(to.to_tsecr, tcp_tv_to_msec(&bbr->rc_tv)))
to.to_tsecr = 0;
}
/*
@@ -11414,7 +11412,7 @@ bbr_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
(tp->t_flags & TF_REQ_TSTMP)) {
tp->t_flags |= TF_RCVD_TSTMP;
tp->ts_recent = to.to_tsval;
- tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
+ tp->ts_recent_age = tcp_tv_to_msec(&bbr->rc_tv);
} else
tp->t_flags &= ~TF_REQ_TSTMP;
if (to.to_flags & TOF_MSS)
@@ -11510,7 +11508,7 @@ bbr_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
(SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
if (tiwin > bbr->r_ctl.rc_high_rwnd)
@@ -11544,7 +11542,7 @@ bbr_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
bbr_check_bbr_for_state(bbr, cts, __LINE__, (bbr->r_ctl.rc_lost - lost));
if (nxt_pkt == 0) {
if ((bbr->r_wanted_output != 0) ||
- (tp->t_flags & TF_ACKNOW)) {
+ (tp->t_flags & TF_ACKNOW)) {
bbr->rc_output_starts_timer = 0;
did_out = 1;
@@ -11870,7 +11868,7 @@ bbr_output_wtime(struct tcpcb *tp, const struct timeval *tv)
bbr = (struct tcp_bbr *)tp->t_fb_ptr;
/* We take a cache hit here */
memcpy(&bbr->rc_tv, tv, sizeof(struct timeval));
- cts = tcp_tv_to_usectick(&bbr->rc_tv);
+ cts = tcp_tv_to_usec(&bbr->rc_tv);
inp = bbr->rc_inp;
hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS);
tp->t_flags2 &= ~TF2_HPTS_CALLS;
@@ -12885,7 +12883,7 @@ send:
/* Timestamps. */
if ((tp->t_flags & TF_RCVD_TSTMP) ||
((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
- to.to_tsval = tcp_tv_to_mssectick(&bbr->rc_tv) + tp->ts_offset;
+ to.to_tsval = tcp_tv_to_msec(&bbr->rc_tv) + tp->ts_offset;
to.to_tsecr = tp->ts_recent;
to.to_flags |= TOF_TS;
local_options += TCPOLEN_TIMESTAMP + 2;
@@ -12893,7 +12891,7 @@ send:
/* Set receive buffer autosizing timestamp. */
if (tp->rfbuf_ts == 0 &&
(so->so_rcv.sb_flags & SB_AUTOSIZE))
- tp->rfbuf_ts = tcp_tv_to_mssectick(&bbr->rc_tv);
+ tp->rfbuf_ts = tcp_tv_to_msec(&bbr->rc_tv);
/* Selective ACK's. */
if (flags & TH_SYN)
to.to_flags |= TOF_SACKPERM;
@@ -13172,11 +13170,7 @@ send:
mb, moff, &len,
if_hw_tsomaxsegcount,
if_hw_tsomaxsegsize, msb,
- ((rsm == NULL) ? hw_tls : 0)
-#ifdef NETFLIX_COPY_ARGS
- , NULL, NULL
-#endif
- );
+ ((rsm == NULL) ? hw_tls : 0));
if (len <= maxseg) {
/*
* Must have ran out of mbufs for the copy
@@ -13806,8 +13800,8 @@ nomore:
tp->t_maxseg = old_maxseg - 40;
if (tp->t_maxseg < V_tcp_mssdflt) {
/*
- * The MSS is so small we should not
- * process incoming SACK's since we are
+ * The MSS is so small we should not
+ * process incoming SACK's since we are
* subject to attack in such a case.
*/
tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT;
@@ -14127,17 +14121,17 @@ bbr_switch_failed(struct tcpcb *tp)
toval = bbr->rc_pacer_started - cts;
} else {
/* one slot please */
- toval = HPTS_TICKS_PER_SLOT;
+ toval = HPTS_USECS_PER_SLOT;
}
} else if (bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
if (TSTMP_GT(bbr->r_ctl.rc_timer_exp, cts)) {
toval = bbr->r_ctl.rc_timer_exp - cts;
} else {
/* one slot please */
- toval = HPTS_TICKS_PER_SLOT;
+ toval = HPTS_USECS_PER_SLOT;
}
} else
- toval = HPTS_TICKS_PER_SLOT;
+ toval = HPTS_USECS_PER_SLOT;
(void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval),
__LINE__, &diag);
bbr_log_hpts_diag(bbr, cts, &diag);
diff --git a/sys/netinet/tcp_stacks/rack.c b/sys/netinet/tcp_stacks/rack.c
index f5bc435890e7..11ef5ba706c5 100644
--- a/sys/netinet/tcp_stacks/rack.c
+++ b/sys/netinet/tcp_stacks/rack.c
@@ -40,7 +40,6 @@
#endif
#include <sys/lock.h>
#include <sys/malloc.h>
-#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/proc.h> /* for proc0 declaration */
@@ -78,8 +77,6 @@
#include <netinet/in_kdtrace.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
-#include <netinet/ip_icmp.h> /* required for icmp_var.h */
-#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
#include <netinet6/in6_pcb.h>
@@ -198,7 +195,7 @@ static uint32_t rack_pcm_blast = 0;
static uint32_t rack_pcm_is_enabled = 1;
static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto -> rec */
-static uint32_t rack_gp_gain_req = 1200; /* Amount percent wise required to gain to record a round has "gaining" */
+static uint32_t rack_gp_gain_req = 1200; /* Amount percent wise required to gain to record a round as "gaining" */
static uint32_t rack_rnd_cnt_req = 0x10005; /* Default number of rounds if we are below rack_gp_gain_req where we exit ss */
@@ -605,7 +602,7 @@ rack_get_lt_bw(struct tcp_rack *rack)
/* Include all the current bytes too */
microuptime(&tv);
bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq);
- tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark);
+ tim += (tcp_tv_to_lusec(&tv) - rack->r_ctl.lt_timemark);
}
if ((bytes != 0) && (tim != 0))
return ((bytes * (uint64_t)1000000) / tim);
@@ -621,7 +618,7 @@ rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8)
struct tcpcb *tp;
uint32_t old_beta;
uint32_t old_beta_ecn;
- int error, failed = 0;
+ int error = 0, failed = 0;
tp = rack->rc_tp;
if (tp->t_cc == NULL) {
@@ -684,7 +681,7 @@ out:
struct newreno *ptr;
ptr = ((struct newreno *)tp->t_ccv.cc_data);
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.timeStamp = tcp_get_usecs(&tv);
log.u_bbr.flex1 = ptr->beta;
log.u_bbr.flex2 = ptr->beta_ecn;
@@ -938,7 +935,7 @@ rack_init_sysctls(void)
SYSCTL_ADD_U32(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_probertt),
OID_AUTO, "time_between", CTLFLAG_RW,
- & rack_time_between_probertt, 96000000,
+ &rack_time_between_probertt, 96000000,
"How many useconds between the lowest rtt falling must past before we enter probertt");
SYSCTL_ADD_U32(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_probertt),
@@ -2246,7 +2243,7 @@ rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped)
ent = rack->r_ctl.rc_last_sft;
microuptime(&tv);
- timenow = tcp_tv_to_lusectick(&tv);
+ timenow = tcp_tv_to_lusec(&tv);
if (timenow >= ent->deadline) {
/* No time left we do DGP only */
rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
@@ -2678,7 +2675,7 @@ rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t
*/
return;
}
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.flex1 = tsused;
log.u_bbr.flex2 = thresh;
log.u_bbr.flex3 = rsm->r_flags;
@@ -2709,7 +2706,7 @@ rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.flex1 = rack->rc_tp->t_srtt;
log.u_bbr.flex2 = to;
log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
@@ -2752,7 +2749,7 @@ rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rs
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
log.u_bbr.flex8 = to_num;
log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
@@ -2792,7 +2789,7 @@ rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack,
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.flex8 = flag;
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
log.u_bbr.cur_del_rate = (uintptr_t)prev;
@@ -2840,7 +2837,7 @@ rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t l
if (tcp_bblogging_on(tp)) {
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
log.u_bbr.flex1 = t;
log.u_bbr.flex2 = len;
@@ -2889,7 +2886,7 @@ rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t l
log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered;
log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts;
log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt;
- log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
+ log.u_bbr.bw_inuse = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time);
log.u_bbr.bw_inuse <<= 32;
if (rsm)
log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]);
@@ -3013,7 +3010,7 @@ rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick,
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
log.u_bbr.flex1 = line;
log.u_bbr.flex2 = tick;
@@ -3042,7 +3039,7 @@ rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_
if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
union tcp_log_stackspecific log;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
log.u_bbr.flex1 = slot;
if (rack->rack_no_prr)
@@ -3149,7 +3146,7 @@ rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, ui
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
log.u_bbr.flex1 = slot;
log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
@@ -3185,7 +3182,7 @@ rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32
if (tcp_bblogging_on(rack->rc_tp)) {
union tcp_log_stackspecific log;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
log.u_bbr.flex1 = line;
log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to;
@@ -3230,7 +3227,7 @@ rack_log_alt_to_to_cancel(struct tcp_rack *rack,
/* No you can't use 1, its for the real to cancel */
return;
}
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.timeStamp = tcp_get_usecs(&tv);
log.u_bbr.flex1 = flex1;
log.u_bbr.flex2 = flex2;
@@ -3255,7 +3252,7 @@ rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.flex1 = timers;
log.u_bbr.flex2 = ret;
log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
@@ -3285,7 +3282,7 @@ rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line)
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.flex1 = rack->r_ctl.rc_prr_out;
log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs;
if (rack->rack_no_prr)
@@ -3480,16 +3477,16 @@ static void
rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
{
if (rsm->r_flags & RACK_APP_LIMITED) {
- if (rack->r_ctl.rc_app_limited_cnt > 0) {
- rack->r_ctl.rc_app_limited_cnt--;
- }
+ KASSERT((rack->r_ctl.rc_app_limited_cnt > 0),
+ ("app_cnt %u, rsm %p", rack->r_ctl.rc_app_limited_cnt, rsm));
+ rack->r_ctl.rc_app_limited_cnt--;
}
if (rsm->r_limit_type) {
/* currently there is only one limit type */
rack->r_ctl.rc_num_split_allocs--;
}
if (rsm == rack->r_ctl.rc_first_appl) {
- rack->r_ctl.cleared_app_ack_seq = rsm->r_start + (rsm->r_end - rsm->r_start);
+ rack->r_ctl.cleared_app_ack_seq = rsm->r_end;
rack->r_ctl.cleared_app_ack = 1;
if (rack->r_ctl.rc_app_limited_cnt == 0)
rack->r_ctl.rc_first_appl = NULL;
@@ -3554,8 +3551,7 @@ rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack)
* earlier.
*
* So lets calculate the BDP with the "known" b/w using
- * the SRTT has our rtt and then multiply it by the
- * goal.
+ * the SRTT as our rtt and then multiply it by the goal.
*/
bw = rack_get_bw(rack);
srtt = (uint64_t)tp->t_srtt;
@@ -3646,7 +3642,7 @@ rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_
}
/* Now what about time? */
srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts);
- tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts;
+ tim = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time) - tp->gput_ts;
if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) {
/*
* We do not allow a measurement if we are in recovery
@@ -4118,7 +4114,7 @@ rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts,
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.flex1 = line;
log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts;
log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts;
@@ -4864,7 +4860,7 @@ rack_log_gp_calc(struct tcp_rack *rack, uint32_t add_part, uint32_t sub_part, ui
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.timeStamp = tcp_get_usecs(&tv);
log.u_bbr.flex1 = add_part;
log.u_bbr.flex2 = sub_part;
@@ -4893,7 +4889,7 @@ rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
uint64_t resid_bw, subpart = 0, addpart = 0, srtt;
int did_add = 0;
- us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
+ us_cts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time);
segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
if (TSTMP_GEQ(us_cts, tp->gput_ts))
tim = us_cts - tp->gput_ts;
@@ -5214,7 +5210,7 @@ rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.timeStamp = tcp_get_usecs(&tv);
log.u_bbr.flex1 = rack->r_ctl.current_round;
log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise;
@@ -5250,7 +5246,7 @@ rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.timeStamp = tcp_get_usecs(&tv);
log.u_bbr.flex1 = rack->r_ctl.current_round;
log.u_bbr.flex2 = (uint32_t)gp_est;
@@ -5357,7 +5353,7 @@ skip_measurement:
rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
- tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
+ tp->gput_ts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time);
rack->app_limited_needs_set = 0;
tp->gput_seq = th_ack;
if (rack->in_probe_rtt)
@@ -5492,7 +5488,7 @@ rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint
rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq);
rack->r_ctl.lt_seq = tp->snd_max;
- tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time);
+ tmark = tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time);
if (tmark >= rack->r_ctl.lt_timemark) {
rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark);
}
@@ -5533,7 +5529,7 @@ rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.timeStamp = tcp_get_usecs(&tv);
log.u_bbr.flex1 = th_ack;
log.u_bbr.flex2 = tp->t_ccv.flags;
@@ -5648,7 +5644,7 @@ rack_post_recovery(struct tcpcb *tp, uint32_t th_ack)
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.timeStamp = tcp_get_usecs(&tv);
log.u_bbr.flex1 = th_ack;
log.u_bbr.flex2 = tp->t_ccv.flags;
@@ -5793,7 +5789,7 @@ rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line)
tp->t_badrxtwin = 0;
break;
}
- if ((CC_ALGO(tp)->cong_signal != NULL) &&
+ if ((CC_ALGO(tp)->cong_signal != NULL) &&
(type != CC_RTO)){
tp->t_ccv.curack = ack;
CC_ALGO(tp)->cong_signal(&tp->t_ccv, type);
@@ -5904,7 +5900,7 @@ rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts, int li
*
* If reorder-fade is configured, then we track the last time we saw
* re-ordering occur. If we reach the point where enough time as
- * passed we no longer consider reordering has occuring.
+ * passed we no longer consider reordering as occurring.
*
* Or if reorder-face is 0, then once we see reordering we consider
* the connection to alway be subject to reordering and just set lro
@@ -6347,7 +6343,7 @@ activate_tlp:
if (to < rack_tlp_min) {
to = rack_tlp_min;
}
- if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) {
+ if (to > TICKS_2_USEC(tcp_rexmit_max)) {
/*
* If the TLP time works out to larger than the max
* RTO lets not do TLP.. just RTO.
@@ -6392,7 +6388,7 @@ rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_se
rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq);
rack->r_ctl.lt_seq = snd_una;
- tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time);
+ tmark = tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time);
if (tmark >= rack->r_ctl.lt_timemark) {
rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark);
}
@@ -6481,7 +6477,7 @@ rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts,
if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
union tcp_log_stackspecific log;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.flex1 = diag->p_nxt_slot;
log.u_bbr.flex2 = diag->p_cur_slot;
log.u_bbr.flex3 = diag->slot_req;
@@ -6520,7 +6516,7 @@ rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uin
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.flex1 = sb->sb_flags;
log.u_bbr.flex2 = len;
log.u_bbr.flex3 = sb->sb_state;
@@ -6594,22 +6590,22 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
* on the clock. We always have a min
* 10 slots (10 x 10 i.e. 100 usecs).
*/
- if (slot <= HPTS_TICKS_PER_SLOT) {
+ if (slot <= HPTS_USECS_PER_SLOT) {
/* We gain delay */
- rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot);
- slot = HPTS_TICKS_PER_SLOT;
+ rack->r_ctl.rc_agg_delayed += (HPTS_USECS_PER_SLOT - slot);
+ slot = HPTS_USECS_PER_SLOT;
} else {
/* We take off some */
- rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT);
- slot = HPTS_TICKS_PER_SLOT;
+ rack->r_ctl.rc_agg_delayed -= (slot - HPTS_USECS_PER_SLOT);
+ slot = HPTS_USECS_PER_SLOT;
}
} else {
slot -= rack->r_ctl.rc_agg_delayed;
rack->r_ctl.rc_agg_delayed = 0;
/* Make sure we have 100 useconds at minimum */
- if (slot < HPTS_TICKS_PER_SLOT) {
- rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot;
- slot = HPTS_TICKS_PER_SLOT;
+ if (slot < HPTS_USECS_PER_SLOT) {
+ rack->r_ctl.rc_agg_delayed = HPTS_USECS_PER_SLOT - slot;
+ slot = HPTS_USECS_PER_SLOT;
}
if (rack->r_ctl.rc_agg_delayed == 0)
rack->r_late = 0;
@@ -7045,6 +7041,9 @@ rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm,
/* Push bit must go to the right edge as well */
if (rsm->r_flags & RACK_HAD_PUSH)
rsm->r_flags &= ~RACK_HAD_PUSH;
+ /* Update the count if app limited */
+ if (nrsm->r_flags & RACK_APP_LIMITED)
+ rack->r_ctl.rc_app_limited_cnt++;
/* Clone over the state of the hw_tls flag */
nrsm->r_hw_tls = rsm->r_hw_tls;
/*
@@ -7096,7 +7095,7 @@ rack_merge_rsm(struct tcp_rack *rack,
l_rsm->r_flags |= RACK_TLP;
if (r_rsm->r_flags & RACK_RWND_COLLAPSED)
l_rsm->r_flags |= RACK_RWND_COLLAPSED;
- if ((r_rsm->r_flags & RACK_APP_LIMITED) &&
+ if ((r_rsm->r_flags & RACK_APP_LIMITED) &&
((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) {
/*
* If both are app-limited then let the
@@ -7887,8 +7886,8 @@ drop_it:
tp->t_maxseg = tp->t_pmtud_saved_maxseg;
if (tp->t_maxseg < V_tcp_mssdflt) {
/*
- * The MSS is so small we should not
- * process incoming SACK's since we are
+ * The MSS is so small we should not
+ * process incoming SACK's since we are
* subject to attack in such a case.
*/
tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT;
@@ -8032,6 +8031,7 @@ skip_time_check:
ret = rack_timeout_rack(tp, rack, cts);
} else if (timers & PACE_TMR_TLP) {
rack->r_ctl.rc_tlp_rxt_last_time = cts;
+ rack->r_fast_output = 0;
ret = rack_timeout_tlp(tp, rack, cts, doing_tlp);
} else if (timers & PACE_TMR_RXT) {
rack->r_ctl.rc_tlp_rxt_last_time = cts;
@@ -8136,7 +8136,7 @@ rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
* remove the lost desgination and reduce the
* bytes considered lost.
*/
- rsm->r_flags &= ~RACK_WAS_LOST;
+ rsm->r_flags &= ~RACK_WAS_LOST;
KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)),
("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start))
@@ -8778,7 +8778,7 @@ tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
}
stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt));
#endif
- rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
+ rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_msec(&rack->r_ctl.act_rcv_time);
/*
* the retransmit should happen at rtt + 4 * rttvar. Because of the
* way we do the smoothing, srtt and rttvar will each average +1/2
@@ -8831,7 +8831,7 @@ rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts
val = rack_probertt_lower_within * rack_time_between_probertt;
val /= 100;
- if ((rack->in_probe_rtt == 0) &&
+ if ((rack->in_probe_rtt == 0) &&
(rack->rc_skip_timely == 0) &&
((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) {
rack_enter_probertt(rack, us_cts);
@@ -8884,8 +8884,8 @@ rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
rack->r_ctl.rc_rack_min_rtt = 1;
}
}
- if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]))
- us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
+ if (TSTMP_GT(tcp_tv_to_usec(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]))
+ us_rtt = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
else
us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
if (us_rtt == 0)
@@ -8894,7 +8894,7 @@ rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
/* Kick the RTT to the CC */
CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas);
}
- rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
+ rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usec(&rack->r_ctl.act_rcv_time));
if (ack_type == SACKED) {
rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1);
tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt);
@@ -8989,8 +8989,8 @@ rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
* we retransmitted. This is because
* we match the timestamps.
*/
- if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i]))
- us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i];
+ if (TSTMP_GT(tcp_tv_to_usec(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i]))
+ us_rtt = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i];
else
us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i];
CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas);
@@ -9183,7 +9183,7 @@ rack_need_set_test(struct tcpcb *tp,
seq = tp->gput_seq;
ts = tp->gput_ts;
rack->app_limited_needs_set = 0;
- tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
+ tp->gput_ts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time);
/* Do we start at a new end? */
if ((use_which == RACK_USE_BEG) &&
SEQ_GEQ(rsm->r_start, tp->gput_seq)) {
@@ -10368,7 +10368,7 @@ more:
* and yet before retransmitting we get an ack
* which can happen due to reordering.
*/
- rsm->r_flags &= ~RACK_WAS_LOST;
+ rsm->r_flags &= ~RACK_WAS_LOST;
KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)),
("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start))
@@ -10818,7 +10818,7 @@ rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered
changed = th_ack - rsm->r_start;
if (changed) {
rack_process_to_cumack(tp, rack, th_ack, cts, to,
- tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time));
+ tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time));
}
if ((to->to_flags & TOF_SACK) == 0) {
/* We are done nothing left and no sack. */
@@ -11064,7 +11064,7 @@ rack_strike_dupack(struct tcp_rack *rack, tcp_seq th_ack)
* We need to skip anything already set
* to be retransmitted.
*/
- if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
+ if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
(rsm->r_flags & RACK_MUST_RXT)) {
rsm = TAILQ_NEXT(rsm, r_tnext);
continue;
@@ -11696,7 +11696,7 @@ rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack)
rack_log_hybrid_sends(rack, ent, __LINE__);
/* calculate the time based on the ack arrival */
data = ent->end - ent->start;
- laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time);
+ laa = tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time);
if (ent->flags & TCP_TRK_TRACK_FLG_FSND) {
if (ent->first_send > ent->localtime)
ftim = ent->first_send;
@@ -11842,7 +11842,7 @@ rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
* less than and we have not closed our window.
*/
if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) {
- rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
+ rack->r_ctl.rc_reorder_ts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time);
if (rack->r_ctl.rc_reorder_ts == 0)
rack->r_ctl.rc_reorder_ts = 1;
}
@@ -12036,7 +12036,7 @@ rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
/* tcp_close will kill the inp pre-log the Reset */
tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
tp = tcp_close(tp);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
}
}
@@ -12874,7 +12874,7 @@ rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
(SEQ_LEQ(th->th_ack, tp->iss) ||
SEQ_GT(th->th_ack, tp->snd_max))) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
}
if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
@@ -13088,7 +13088,7 @@ rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
(SEQ_LEQ(th->th_ack, tp->snd_una) ||
SEQ_GT(th->th_ack, tp->snd_max))) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
}
if (tp->t_flags & TF_FASTOPEN) {
@@ -13101,7 +13101,7 @@ rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
} else if (thflags & TH_SYN) {
/* non-initial SYN is ignored */
@@ -13135,7 +13135,7 @@ rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
if (SEQ_LT(th->th_seq, tp->irs)) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
}
if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
@@ -13398,7 +13398,7 @@ rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (sbavail(&so->so_snd)) {
if (ctf_progress_timeout_check(tp, true)) {
rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -13494,7 +13494,7 @@ rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_progress_timeout_check(tp, true)) {
rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -13516,7 +13516,7 @@ rack_check_data_after_close(struct mbuf *m,
tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
tp = tcp_close(tp);
KMOD_TCPSTAT_INC(tcps_rcvafterclose);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
+ ctf_do_dropwithreset(m, tp, th, *tlen);
return (1);
}
if (sbavail(&so->so_snd) == 0)
@@ -13644,7 +13644,7 @@ rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_progress_timeout_check(tp, true)) {
rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -13745,7 +13745,7 @@ rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_progress_timeout_check(tp, true)) {
rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -13847,7 +13847,7 @@ rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_progress_timeout_check(tp, true)) {
rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -13951,7 +13951,7 @@ rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_progress_timeout_check(tp, true)) {
rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -14227,7 +14227,7 @@ rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod,
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.timeStamp = tcp_get_usecs(&tv);
log.u_bbr.flex8 = mod;
log.u_bbr.flex1 = flex1;
@@ -14366,17 +14366,17 @@ rack_switch_failed(struct tcpcb *tp)
toval = rack->r_ctl.rc_last_output_to - cts;
} else {
/* one slot please */
- toval = HPTS_TICKS_PER_SLOT;
+ toval = HPTS_USECS_PER_SLOT;
}
} else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
toval = rack->r_ctl.rc_timer_exp - cts;
} else {
/* one slot please */
- toval = HPTS_TICKS_PER_SLOT;
+ toval = HPTS_USECS_PER_SLOT;
}
} else
- toval = HPTS_TICKS_PER_SLOT;
+ toval = HPTS_USECS_PER_SLOT;
(void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval),
__LINE__, &diag);
rack_log_hpts_diag(rack, cts, &diag, &tv);
@@ -14636,9 +14636,6 @@ rack_init(struct tcpcb *tp, void **ptr)
if (rack->r_ctl.pcm_s == NULL) {
rack->r_ctl.pcm_i.cnt_alloc = 0;
}
-#ifdef NETFLIX_STATS
- rack->r_ctl.side_chan_dis_mask = tcp_sidechannel_disable_mask;
-#endif
rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss;
rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca;
if (rack_enable_shared_cwnd)
@@ -14744,12 +14741,12 @@ rack_init(struct tcpcb *tp, void **ptr)
rack->r_ctl.rack_per_of_gp_ss = 250;
}
rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
- rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
- rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
+ rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_msec(&rack->r_ctl.act_rcv_time);
+ rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_msec(&rack->r_ctl.act_rcv_time);
setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN,
rack_probertt_filter_life);
- us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
+ us_cts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time);
rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
rack->r_ctl.rc_time_of_last_probertt = us_cts;
rack->r_ctl.rc_went_idle_time = us_cts;
@@ -14958,7 +14955,7 @@ rack_init(struct tcpcb *tp, void **ptr)
if (TSTMP_GT(qr.timer_pacing_to, us_cts))
tov = qr.timer_pacing_to - us_cts;
else
- tov = HPTS_TICKS_PER_SLOT;
+ tov = HPTS_USECS_PER_SLOT;
}
if (qr.timer_hpts_flags & PACE_TMR_MASK) {
rack->r_ctl.rc_timer_exp = qr.timer_timer_exp;
@@ -14966,7 +14963,7 @@ rack_init(struct tcpcb *tp, void **ptr)
if (TSTMP_GT(qr.timer_timer_exp, us_cts))
tov = qr.timer_timer_exp - us_cts;
else
- tov = HPTS_TICKS_PER_SLOT;
+ tov = HPTS_USECS_PER_SLOT;
}
}
rack_log_chg_info(tp, rack, 4,
@@ -15117,7 +15114,7 @@ rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.flex8 = 10;
log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced;
log.u_bbr.flex2 = rack->rc_free_cnt;
@@ -15361,7 +15358,7 @@ rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent
tcp_req = tcp_req_find_req_for_seq(tp, ae->ack);
}
#endif
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
if (rack->rack_no_prr == 0)
log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
@@ -15386,7 +15383,7 @@ rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent
ts.tv_nsec = ae->timestamp % 1000000000;
ltv.tv_sec = ts.tv_sec;
ltv.tv_usec = ts.tv_nsec / 1000;
- log.u_bbr.lt_epoch = tcp_tv_to_usectick(&ltv);
+ log.u_bbr.lt_epoch = tcp_tv_to_usec(&ltv);
} else if (ae->flags & TSTMP_LRO) {
/* Record the LRO the arrival timestamp */
log.u_bbr.flex3 = M_TSTMP_LRO;
@@ -15394,7 +15391,7 @@ rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent
ts.tv_nsec = ae->timestamp % 1000000000;
ltv.tv_sec = ts.tv_sec;
ltv.tv_usec = ts.tv_nsec / 1000;
- log.u_bbr.flex5 = tcp_tv_to_usectick(&ltv);
+ log.u_bbr.flex5 = tcp_tv_to_usec(&ltv);
}
log.u_bbr.timeStamp = tcp_get_usecs(&ltv);
/* Log the rcv time */
@@ -15562,10 +15559,10 @@ rack_log_pcm(struct tcp_rack *rack, uint8_t mod, uint32_t flex1, uint32_t flex2,
if (tcp_bblogging_on(rack->rc_tp)) {
union tcp_log_stackspecific log;
struct timeval tv;
-
+
(void)tcp_get_usecs(&tv);
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
- log.u_bbr.timeStamp = tcp_tv_to_usectick(&tv);
+ memset(&log, 0, sizeof(log));
+ log.u_bbr.timeStamp = tcp_tv_to_usec(&tv);
log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
log.u_bbr.flex8 = mod;
log.u_bbr.flex1 = flex1;
@@ -15647,7 +15644,7 @@ rack_new_round_setup(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq)
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.timeStamp = tcp_get_usecs(&tv);
log.u_bbr.flex1 = rack->r_ctl.current_round;
log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise;
@@ -15748,8 +15745,8 @@ rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mb
the_win = tp->snd_wnd;
win_seq = tp->snd_wl1;
win_upd_ack = tp->snd_wl2;
- cts = tcp_tv_to_usectick(tv);
- ms_cts = tcp_tv_to_mssectick(tv);
+ cts = tcp_tv_to_usec(tv);
+ ms_cts = tcp_tv_to_msec(tv);
rack->r_ctl.rc_rcvtime = cts;
segsiz = ctf_fixed_maxseg(tp);
if ((rack->rc_gp_dyn_mul) &&
@@ -15865,7 +15862,7 @@ rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mb
* or it could be a keep-alive or persists
*/
if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) {
- rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
+ rack->r_ctl.rc_reorder_ts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time);
if (rack->r_ctl.rc_reorder_ts == 0)
rack->r_ctl.rc_reorder_ts = 1;
}
@@ -15884,7 +15881,7 @@ rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mb
}
if (rack->forced_ack) {
rack_handle_probe_response(rack, tiwin,
- tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
+ tcp_tv_to_usec(&rack->r_ctl.act_rcv_time));
}
#ifdef TCP_ACCOUNTING
win_up_req = 1;
@@ -15931,7 +15928,7 @@ rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mb
rack->r_ctl.act_rcv_time = *tv;
}
rack_process_to_cumack(tp, rack, ae->ack, cts, to,
- tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time));
+ tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time));
#ifdef TCP_REQUEST_TRK
rack_req_check_for_comp(rack, high_seq);
#endif
@@ -16399,7 +16396,7 @@ rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
* must process the ack coming in but need to defer sending
* anything becase a pacing timer is running.
*/
- us_cts = tcp_tv_to_usectick(tv);
+ us_cts = tcp_tv_to_usec(tv);
if (m->m_flags & M_ACKCMP) {
/*
* All compressed ack's are ack's by definition so
@@ -16467,8 +16464,8 @@ rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
if (m->m_flags & M_ACKCMP) {
panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp);
}
- cts = tcp_tv_to_usectick(tv);
- ms_cts = tcp_tv_to_mssectick(tv);
+ cts = tcp_tv_to_usec(tv);
+ ms_cts = tcp_tv_to_msec(tv);
nsegs = m->m_pkthdr.lro_nsegs;
counter_u64_add(rack_proc_non_comp_ack, 1);
#ifdef TCP_ACCOUNTING
@@ -16570,7 +16567,7 @@ rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack);
}
#endif
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
if (rack->rack_no_prr == 0)
log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
@@ -16596,13 +16593,13 @@ rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
mbuf_tstmp2timespec(m, &ts);
ltv.tv_sec = ts.tv_sec;
ltv.tv_usec = ts.tv_nsec / 1000;
- log.u_bbr.lt_epoch = tcp_tv_to_usectick(&ltv);
+ log.u_bbr.lt_epoch = tcp_tv_to_usec(&ltv);
} else if (m->m_flags & M_TSTMP_LRO) {
/* Record the LRO the arrival timestamp */
mbuf_tstmp2timespec(m, &ts);
ltv.tv_sec = ts.tv_sec;
ltv.tv_usec = ts.tv_nsec / 1000;
- log.u_bbr.flex5 = tcp_tv_to_usectick(&ltv);
+ log.u_bbr.flex5 = tcp_tv_to_usec(&ltv);
}
log.u_bbr.timeStamp = tcp_get_usecs(&ltv);
/* Log the rcv time */
@@ -16654,7 +16651,7 @@ rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
(SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
#ifdef TCP_ACCOUNTING
sched_unpin();
#endif
@@ -16820,7 +16817,7 @@ rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
}
if (thflags & TH_FIN)
tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN);
- us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
+ us_cts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time);
if ((rack->rc_gp_dyn_mul) &&
(rack->use_fixed_rate == 0) &&
(rack->rc_always_pace)) {
@@ -16918,7 +16915,7 @@ do_output_now:
} else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) {
goto do_output_now;
} else if ((no_output == 1) &&
- (nxt_pkt == 0) &&
+ (nxt_pkt == 0) &&
(tcp_in_hpts(rack->rc_tp) == 0)) {
/*
* We are not in hpts and we had a pacing timer up. Use
@@ -17178,6 +17175,12 @@ rack_log_pacing_delay_calc (struct tcp_rack *rack, uint32_t len, uint32_t slot,
log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
log.u_bbr.cwnd_gain <<= 1;
log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
+ log.u_bbr.cwnd_gain <<= 1;
+ log.u_bbr.cwnd_gain |= rack->use_fixed_rate;
+ log.u_bbr.cwnd_gain <<= 1;
+ log.u_bbr.cwnd_gain |= rack->rc_always_pace;
+ log.u_bbr.cwnd_gain <<= 1;
+ log.u_bbr.cwnd_gain |= rack->gp_ready;
log.u_bbr.bbr_substate = quality;
log.u_bbr.bbr_state = rack->dgp_on;
log.u_bbr.bbr_state <<= 1;
@@ -17344,7 +17347,7 @@ at_lt_bw:
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.timeStamp = tcp_get_usecs(&tv);
log.u_bbr.flex1 = rack_bw_multipler;
log.u_bbr.flex2 = len;
@@ -17539,8 +17542,8 @@ rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, str
rack->r_ctl.rc_last_us_rtt,
88, __LINE__, NULL, gain);
}
- if ((bw_est == 0) || (rate_wanted == 0) ||
- ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) {
+ if (((bw_est == 0) || (rate_wanted == 0) || (rack->gp_ready == 0)) &&
+ (rack->use_fixed_rate == 0)) {
/*
* No way yet to make a b/w estimate or
* our raise is set incorrectly.
@@ -17979,7 +17982,7 @@ start_set:
tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
rack->r_ctl.rc_gp_cumack_ts = 0;
if ((rack->r_ctl.cleared_app_ack == 1) &&
- (SEQ_GEQ(rack->r_ctl.cleared_app_ack, tp->gput_seq))) {
+ (SEQ_GEQ(tp->gput_seq, rack->r_ctl.cleared_app_ack_seq))) {
/*
* We just cleared an application limited period
* so the next seq out needs to skip the first
@@ -18102,7 +18105,7 @@ rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_
union tcp_log_stackspecific log;
struct timeval tv;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
log.u_bbr.flex1 = error;
log.u_bbr.flex2 = flags;
@@ -18367,7 +18370,7 @@ rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack,
err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue);
err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate);
#endif
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
log.u_bbr.flex1 = p_rate;
log.u_bbr.flex2 = p_queue;
@@ -18820,7 +18823,7 @@ rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendma
counter_u64_add(rack_collapsed_win_rxt, 1);
counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start));
}
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
if (rack->rack_no_prr)
log.u_bbr.flex1 = 0;
@@ -19039,7 +19042,7 @@ rack_sndbuf_autoscale(struct tcp_rack *rack)
static int
rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val,
- uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err)
+ uint32_t cts, uint32_t ms_cts, struct timeval *tv, long *tot_len, int *send_err, int line)
{
/*
* Enter to do fast output. We are given that the sched_pin is
@@ -19212,7 +19215,7 @@ again:
}
if (rack->r_ctl.fsb.rfo_apply_push &&
(len == rack->r_ctl.fsb.left_to_send)) {
- tcp_set_flags(th, flags | TH_PUSH);
+ flags |= TH_PUSH;
add_flag |= RACK_HAD_PUSH;
}
if ((m->m_next == NULL) || (len <= 0)){
@@ -19369,7 +19372,7 @@ again:
if (tcp_bblogging_on(rack->rc_tp)) {
union tcp_log_stackspecific log;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
if (rack->rack_no_prr)
log.u_bbr.flex1 = 0;
@@ -19391,11 +19394,11 @@ again:
log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
log.u_bbr.flex5 = log.u_bbr.inflight;
log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
- log.u_bbr.delivered = 0;
+ log.u_bbr.delivered = rack->r_ctl.fsb.left_to_send;
log.u_bbr.rttProp = 0;
log.u_bbr.delRate = rack->r_must_retran;
log.u_bbr.delRate <<= 1;
- log.u_bbr.pkt_epoch = __LINE__;
+ log.u_bbr.pkt_epoch = line;
/* For fast output no retrans so just inflight and how many mss we send */
log.u_bbr.flex5 = log.u_bbr.inflight;
log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz);
@@ -19437,7 +19440,7 @@ again:
}
if ((error == 0) && (rack->lt_bw_up == 0)) {
/* Unlikely */
- rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv);
+ rack->r_ctl.lt_timemark = tcp_tv_to_lusec(tv);
rack->r_ctl.lt_seq = tp->snd_una;
rack->lt_bw_up = 1;
} else if ((error == 0) &&
@@ -19468,7 +19471,7 @@ again:
tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls);
rack->forced_ack = 0; /* If we send something zap the FA flag */
- tot_len += len;
+ *tot_len += len;
if ((tp->t_flags & TF_GPUTINPROG) == 0)
rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset);
tp->snd_max += len;
@@ -19504,6 +19507,7 @@ again:
}
if ((rack->r_ctl.fsb.left_to_send >= segsiz) &&
(max_val > len) &&
+ (*tot_len < rack->r_ctl.rc_pace_max_segs) &&
(tso == 0)) {
max_val -= len;
len = segsiz;
@@ -19515,14 +19519,14 @@ again:
}
tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
counter_u64_add(rack_fto_send, 1);
- slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz, __LINE__);
- rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0);
+ slot = rack_get_pacing_delay(rack, tp, *tot_len, NULL, segsiz, __LINE__);
+ rack_start_hpts_timer(rack, tp, cts, slot, *tot_len, 0);
#ifdef TCP_ACCOUNTING
crtsc = get_cyclecount();
if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
- tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz);
+ tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((*tot_len + segsiz - 1) / segsiz);
}
sched_unpin();
#endif
@@ -19779,7 +19783,7 @@ rack_output(struct tcpcb *tp)
#endif
early = 0;
cts = tcp_get_usecs(&tv);
- ms_cts = tcp_tv_to_mssectick(&tv);
+ ms_cts = tcp_tv_to_msec(&tv);
if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
tcp_in_hpts(rack->rc_tp)) {
/*
@@ -19884,20 +19888,36 @@ rack_output(struct tcpcb *tp)
TCPS_HAVEESTABLISHED(tp->t_state)) {
rack_set_state(tp, rack);
}
+ segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
+ minseg = segsiz;
+ if (rack->r_ctl.rc_pace_max_segs == 0)
+ pace_max_seg = rack->rc_user_set_max_segs * segsiz;
+ else
+ pace_max_seg = rack->r_ctl.rc_pace_max_segs;
if ((rack->r_fast_output) &&
(doing_tlp == 0) &&
(tp->rcv_numsacks == 0)) {
int ret;
error = 0;
- ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
- if (ret >= 0)
+ ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, &tot_len_this_send, &error, __LINE__);
+ if (ret > 0)
return(ret);
else if (error) {
inp = rack->rc_inp;
so = inp->inp_socket;
sb = &so->so_snd;
goto nomore;
+ } else {
+ /* Return == 0, if there is more we can send tot_len wise fall through and send */
+ if (tot_len_this_send >= pace_max_seg)
+ return (ret);
+#ifdef TCP_ACCOUNTING
+ /* We need to re-pin since fast_output un-pined */
+ sched_pin();
+ ts_val = get_cyclecount();
+#endif
+ /* Fall back out so we can send any more that may bring us to pace_max_seg */
}
}
inp = rack->rc_inp;
@@ -20001,15 +20021,9 @@ rack_output(struct tcpcb *tp)
again:
sendalot = 0;
cts = tcp_get_usecs(&tv);
- ms_cts = tcp_tv_to_mssectick(&tv);
+ ms_cts = tcp_tv_to_msec(&tv);
tso = 0;
mtu = 0;
- segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
- minseg = segsiz;
- if (rack->r_ctl.rc_pace_max_segs == 0)
- pace_max_seg = rack->rc_user_set_max_segs * segsiz;
- else
- pace_max_seg = rack->r_ctl.rc_pace_max_segs;
if (TCPS_HAVEESTABLISHED(tp->t_state) &&
(rack->r_ctl.pcm_max_seg == 0)) {
/*
@@ -20025,7 +20039,7 @@ again:
rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10;
}
}
- if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) {
+ if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) {
uint32_t rw_avail, cwa;
if (tp->snd_wnd > ctf_outstanding(tp))
@@ -20871,6 +20885,7 @@ just_return_nolock:
rack->r_fsb_inited &&
TCPS_HAVEESTABLISHED(tp->t_state) &&
((IN_RECOVERY(tp->t_flags)) == 0) &&
+ (doing_tlp == 0) &&
(rack->r_must_retran == 0) &&
((tp->t_flags & TF_NEEDFIN) == 0) &&
(len > 0) && (orig_len > 0) &&
@@ -21012,7 +21027,7 @@ just_return_nolock:
} else
log = 1;
}
- /* Mark the last packet has app limited */
+ /* Mark the last packet as app limited */
rsm = tqhash_max(rack->r_ctl.tqh);
if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
if (rack->r_ctl.rc_app_limited_cnt == 0)
@@ -21364,7 +21379,8 @@ send:
if (max_len <= 0) {
len = 0;
} else if (len > max_len) {
- sendalot = 1;
+ if (doing_tlp == 0)
+ sendalot = 1;
len = max_len;
mark = 2;
}
@@ -21535,11 +21551,7 @@ send:
m->m_next = tcp_m_copym(
mb, moff, &len,
if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb,
- ((rsm == NULL) ? hw_tls : 0)
-#ifdef NETFLIX_COPY_ARGS
- , &s_mb, &s_moff
-#endif
- );
+ ((rsm == NULL) ? hw_tls : 0));
if (len <= (tp->t_maxseg - optlen)) {
/*
* Must have ran out of mbufs for the copy
@@ -21593,7 +21605,6 @@ send:
flags |= TH_PUSH;
add_flag |= RACK_HAD_PUSH;
}
-
SOCK_SENDBUF_UNLOCK(so);
} else {
SOCK_SENDBUF_UNLOCK(so);
@@ -21886,7 +21897,7 @@ send:
if (tcp_bblogging_on(rack->rc_tp)) {
union tcp_log_stackspecific log;
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
+ memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
if (rack->rack_no_prr)
log.u_bbr.flex1 = 0;
@@ -22062,6 +22073,8 @@ out:
* In transmit state, time the transmission and arrange for the
* retransmit. In persist state, just set snd_max.
*/
+ if ((rsm == NULL) && doing_tlp)
+ add_flag |= RACK_TLP;
rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error,
rack_to_usec_ts(&tv),
rsm, add_flag, s_mb, s_moff, hw_tls, segsiz);
@@ -22075,7 +22088,7 @@ out:
}
if (rsm == NULL) {
if (rack->lt_bw_up == 0) {
- rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv);
+ rack->r_ctl.lt_timemark = tcp_tv_to_lusec(&tv);
rack->r_ctl.lt_seq = tp->snd_una;
rack->lt_bw_up = 1;
} else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) {
@@ -22148,15 +22161,14 @@ out:
rack->r_ctl.rc_prr_sndcnt = 0;
}
sub_from_prr = 0;
- if (doing_tlp) {
- /* Make sure the TLP is added */
- add_flag |= RACK_TLP;
- } else if (rsm) {
- /* If its a resend without TLP then it must not have the flag */
- rsm->r_flags &= ~RACK_TLP;
- }
-
-
+ if (rsm != NULL) {
+ if (doing_tlp)
+ /* Make sure the TLP is added */
+ rsm->r_flags |= RACK_TLP;
+ else
+ /* If its a resend without TLP then it must not have the flag */
+ rsm->r_flags &= ~RACK_TLP;
+ }
if ((error == 0) &&
(len > 0) &&
(tp->snd_una == tp->snd_max))
@@ -22494,6 +22506,7 @@ enobufs:
((flags & (TH_SYN|TH_FIN)) == 0) &&
(rsm == NULL) &&
(ipoptlen == 0) &&
+ (doing_tlp == 0) &&
rack->r_fsb_inited &&
TCPS_HAVEESTABLISHED(tp->t_state) &&
((IN_RECOVERY(tp->t_flags)) == 0) &&
@@ -22520,6 +22533,7 @@ enobufs:
rack_use_rfo &&
((flags & (TH_SYN|TH_FIN)) == 0) &&
(rsm == NULL) &&
+ (doing_tlp == 0) &&
(ipoptlen == 0) &&
(rack->r_must_retran == 0) &&
rack->r_fsb_inited &&
@@ -22536,7 +22550,7 @@ enobufs:
segsiz, pace_max_seg, hw_tls, flags);
if (rack->r_fast_output) {
error = 0;
- ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
+ ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, &tot_len_this_send, &error, __LINE__);
if (ret >= 0)
return (ret);
else if (error)
@@ -22822,7 +22836,7 @@ process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid)
rack->r_ctl.rc_fixed_pacing_rate_ca = 0;
rack->r_ctl.rc_fixed_pacing_rate_ss = 0;
/* Now allocate or find our entry that will have these settings */
- sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0);
+ sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusec(&tv), 0);
if (sft == NULL) {
rack->rc_tp->tcp_hybrid_error++;
/* no space, where would it have gone? */
diff --git a/sys/netinet/tcp_stacks/rack_bbr_common.c b/sys/netinet/tcp_stacks/rack_bbr_common.c
index da26b8cb1f9b..4a0a5fc118f6 100644
--- a/sys/netinet/tcp_stacks/rack_bbr_common.c
+++ b/sys/netinet/tcp_stacks/rack_bbr_common.c
@@ -76,8 +76,6 @@
#include <netinet/in_kdtrace.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
-#include <netinet/ip_icmp.h> /* required for icmp_var.h */
-#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
#include <netinet6/in6_pcb.h>
@@ -507,13 +505,11 @@ ctf_flight_size(struct tcpcb *tp, uint32_t rc_sacked)
void
ctf_do_dropwithreset(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th,
- int32_t rstreason, int32_t tlen)
+ int32_t tlen)
{
- if (tp != NULL) {
- tcp_dropwithreset(m, th, tp, tlen, rstreason);
+ tcp_dropwithreset(m, th, tp, tlen);
+ if (tp != NULL)
INP_WUNLOCK(tptoinpcb(tp));
- } else
- tcp_dropwithreset(m, th, NULL, tlen, rstreason);
}
void
@@ -672,7 +668,7 @@ ctf_do_dropafterack(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, int32_t
(SEQ_GT(tp->snd_una, th->th_ack) ||
SEQ_GT(th->th_ack, tp->snd_max))) {
*ret_val = 1;
- ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return;
} else
*ret_val = 0;
@@ -866,10 +862,10 @@ ctf_calc_rwin(struct socket *so, struct tcpcb *tp)
void
ctf_do_dropwithreset_conn(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th,
- int32_t rstreason, int32_t tlen)
+ int32_t tlen)
{
- tcp_dropwithreset(m, th, tp, tlen, rstreason);
+ tcp_dropwithreset(m, th, tp, tlen);
tp = tcp_drop(tp, ETIMEDOUT);
if (tp)
INP_WUNLOCK(tptoinpcb(tp));
diff --git a/sys/netinet/tcp_stacks/rack_bbr_common.h b/sys/netinet/tcp_stacks/rack_bbr_common.h
index 6a8a056d89b0..cd33cb8ce50b 100644
--- a/sys/netinet/tcp_stacks/rack_bbr_common.h
+++ b/sys/netinet/tcp_stacks/rack_bbr_common.h
@@ -101,7 +101,7 @@ ctf_do_dropafterack(struct mbuf *m, struct tcpcb *tp,
void
ctf_do_dropwithreset(struct mbuf *m, struct tcpcb *tp,
- struct tcphdr *th, int32_t rstreason, int32_t tlen);
+ struct tcphdr *th, int32_t tlen);
void
ctf_do_drop(struct mbuf *m, struct tcpcb *tp);
@@ -125,7 +125,7 @@ ctf_calc_rwin(struct socket *so, struct tcpcb *tp);
void
ctf_do_dropwithreset_conn(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th,
- int32_t rstreason, int32_t tlen);
+ int32_t tlen);
uint32_t
ctf_fixed_maxseg(struct tcpcb *tp);
diff --git a/sys/netinet/tcp_stacks/rack_pcm.c b/sys/netinet/tcp_stacks/rack_pcm.c
index 09e90da88895..1a51097f627c 100644
--- a/sys/netinet/tcp_stacks/rack_pcm.c
+++ b/sys/netinet/tcp_stacks/rack_pcm.c
@@ -78,8 +78,6 @@
#include <netinet/in_kdtrace.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
-#include <netinet/ip_icmp.h> /* required for icmp_var.h */
-#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
#include <netinet6/in6_pcb.h>
@@ -172,9 +170,9 @@ rack_update_pcm_ack(struct tcp_rack *rack, int was_cumack, uint32_t start, uint3
goto skip_ack_accounting;
}
/*
- * Record ACK data.
+ * Record ACK data.
*/
- ack_arrival = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time);
+ ack_arrival = tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time);
if (SEQ_GT(end, rack->r_ctl.pcm_i.eseq)) {
/* Trim the end to the end of our range if it is beyond */
end = rack->r_ctl.pcm_i.eseq;
@@ -241,8 +239,8 @@ skip_ack_accounting:
for (i=0; i<rack->r_ctl.pcm_i.cnt; i++) {
e = &rack->r_ctl.pcm_s[i];
- memset(&log.u_bbr, 0, sizeof(log.u_bbr));
- log.u_bbr.timeStamp = tcp_tv_to_usectick(&tv);
+ memset(&log, 0, sizeof(log));
+ log.u_bbr.timeStamp = tcp_tv_to_usec(&tv);
log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
log.u_bbr.flex8 = 1;
log.u_bbr.flex1 = e->sseq;
@@ -286,7 +284,7 @@ skip_ack_accounting:
* Prev time holds the last ack arrival time.
*/
memset(&log.u_bbr, 0, sizeof(log.u_bbr));
- log.u_bbr.timeStamp = tcp_tv_to_usectick(&tv);
+ log.u_bbr.timeStamp = tcp_tv_to_usec(&tv);
log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
log.u_bbr.flex8 = 2;
log.u_bbr.flex1 = rack->r_ctl.pcm_i.sseq;
@@ -305,7 +303,7 @@ skip_ack_accounting:
0, &log, false, NULL, NULL, 0, &tv);
}
}
- /*
+ /*
* Here we need a lot to be added including:
* 1) Some form of measurement, where if we think the measurement
* is valid we iterate over the PCM data and come up with a path
diff --git a/sys/netinet/tcp_stacks/sack_filter.c b/sys/netinet/tcp_stacks/sack_filter.c
index fc9ee8454a1e..2b70548f3cc6 100644
--- a/sys/netinet/tcp_stacks/sack_filter.c
+++ b/sys/netinet/tcp_stacks/sack_filter.c
@@ -400,7 +400,7 @@ sack_filter_run(struct sack_filter *sf, struct sackblk *in, int numblks, tcp_seq
break;
}
/* Copy it out to the outbound */
- memcpy(&in[at], &blkboard[i], sizeof(struct sackblk));
+ memcpy(&in[at], &blkboard[i], sizeof(struct sackblk));
at++;
room--;
/* now lets add it to our sack-board */
@@ -588,7 +588,7 @@ sack_filter_blks(struct tcpcb *tp, struct sack_filter *sf, struct sackblk *in, i
sf->sf_ack = th_ack;
for(i=0, sf->sf_cur=0; i<numblks; i++) {
- if ((in[i].end != tp->snd_max) &&
+ if ((in[i].end != tp->snd_max) &&
((in[i].end - in[i].start) < segmax)) {
/*
* We do not accept blocks less than a MSS minus all
@@ -707,7 +707,7 @@ main(int argc, char **argv)
out = stdout;
memset(&tp, 0, sizeof(tp));
tp.t_maxseg = 1460;
-
+
while ((i = getopt(argc, argv, "dIi:o:?hS:")) != -1) {
switch (i) {
case 'S':
@@ -883,7 +883,7 @@ main(int argc, char **argv)
} else {
printf("can't open sack_setup.bin -- sorry no load\n");
}
-
+
} else if (strncmp(buffer, "help", 4) == 0) {
help:
fprintf(out, "You can input:\n");
diff --git a/sys/netinet/tcp_stacks/sack_filter.h b/sys/netinet/tcp_stacks/sack_filter.h
index b12fcf84567c..a1c0684a4359 100644
--- a/sys/netinet/tcp_stacks/sack_filter.h
+++ b/sys/netinet/tcp_stacks/sack_filter.h
@@ -42,7 +42,7 @@
* previously processed sack information.
*
* The second thing that the sack filter does is help protect against malicious
- * attackers that are trying to attack any linked lists (or other data structures)
+ * attackers that are trying to attack any linked lists (or other data structures)
* that are used in sack processing. Consider an attacker sending in sacks for
* every other byte of data outstanding. This could in theory drastically split
* up any scoreboard you are maintaining and make you search through a very large
diff --git a/sys/netinet/tcp_stacks/tailq_hash.c b/sys/netinet/tcp_stacks/tailq_hash.c
index 5ba3e7cd36c0..ff01640524b6 100644
--- a/sys/netinet/tcp_stacks/tailq_hash.c
+++ b/sys/netinet/tcp_stacks/tailq_hash.c
@@ -51,8 +51,6 @@
#include <netinet/in_kdtrace.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
-#include <netinet/ip_icmp.h> /* required for icmp_var.h */
-#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
#include <netinet6/in6_pcb.h>
diff --git a/sys/netinet/tcp_stacks/tcp_bbr.h b/sys/netinet/tcp_stacks/tcp_bbr.h
index f88efe3c9ef9..10ddd12bda75 100644
--- a/sys/netinet/tcp_stacks/tcp_bbr.h
+++ b/sys/netinet/tcp_stacks/tcp_bbr.h
@@ -347,8 +347,6 @@ struct bbr_log_sysctl_out {
/*
* Locking for the rack control block.
* a) Locked by INP_WLOCK
- * b) Locked by the hpts-mutex
- *
*/
#define BBR_STATE_STARTUP 0x01
#define BBR_STATE_DRAIN 0x02
diff --git a/sys/netinet/tcp_stacks/tcp_rack.h b/sys/netinet/tcp_stacks/tcp_rack.h
index 4374594a1d82..144b4fabf7eb 100644
--- a/sys/netinet/tcp_stacks/tcp_rack.h
+++ b/sys/netinet/tcp_stacks/tcp_rack.h
@@ -327,8 +327,6 @@ extern counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
/*
* Locking for the rack control block.
* a) Locked by INP_WLOCK
- * b) Locked by the hpts-mutex
- *
*/
#define RACK_GP_HIST 4 /* How much goodput history do we maintain? */
#define RETRAN_CNT_SIZE 16
@@ -614,7 +612,6 @@ struct rack_control {
struct tcp_rack {
/* First cache line 0x00 */
- TAILQ_ENTRY(tcp_rack) r_hpts; /* hptsi queue next Lock(b) */
int32_t(*r_substate) (struct mbuf *, struct tcphdr *,
struct socket *, struct tcpcb *, struct tcpopt *,
int32_t, int32_t, uint32_t, int, int, uint8_t); /* Lock(a) */