aboutsummaryrefslogtreecommitdiff
path: root/sys/netinet/cc/cc_newreno.c
diff options
context:
space:
mode:
authorRandall Stewart <rrs@FreeBSD.org>2021-10-22 11:10:28 +0000
committerRandall Stewart <rrs@FreeBSD.org>2021-10-22 11:10:28 +0000
commit4e4c84f8d101216ebf303f04ce9d4327c3328059 (patch)
tree7e17615939128b392ff51eb8f30dde2877d55ffd /sys/netinet/cc/cc_newreno.c
parent5a3eb6207a353c3a18da8abcf00a2d75276dd29e (diff)
downloadsrc-4e4c84f8d101216ebf303f04ce9d4327c3328059.tar.gz
src-4e4c84f8d101216ebf303f04ce9d4327c3328059.zip
tcp: Add hystart-plus to cc_newreno and rack.
TCP Hystart draft version -03: https://datatracker.ietf.org/doc/html/draft-ietf-tcpm-hystartplusplus Is a new version of hystart that allows one to carefully exit slow start if the RTT spikes too much. The newer version has a slower-slow-start so to speak that then kicks in for five round trips. To see if you exited too early, if not into congestion avoidance. This commit will add that feature to our newreno CC and add the needed bits in rack to be able to enable it. Reviewed by: tuexen Sponsored by: Netflix Inc. Differential Revision: https://reviews.freebsd.org/D32373
Diffstat (limited to 'sys/netinet/cc/cc_newreno.c')
-rw-r--r--sys/netinet/cc/cc_newreno.c335
1 files changed, 305 insertions, 30 deletions
diff --git a/sys/netinet/cc/cc_newreno.c b/sys/netinet/cc/cc_newreno.c
index 8f939ad70695..23d2b273f6aa 100644
--- a/sys/netinet/cc/cc_newreno.c
+++ b/sys/netinet/cc/cc_newreno.c
@@ -63,15 +63,21 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/socket.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <net/vnet.h>
+#include <netinet/in.h>
+#include <netinet/in_pcb.h>
#include <netinet/tcp.h>
#include <netinet/tcp_seq.h>
#include <netinet/tcp_var.h>
+#include <netinet/tcp_log_buf.h>
+#include <netinet/tcp_hpts.h>
#include <netinet/cc/cc.h>
#include <netinet/cc/cc_module.h>
#include <netinet/cc/cc_newreno.h>
@@ -85,6 +91,9 @@ static void newreno_after_idle(struct cc_var *ccv);
static void newreno_cong_signal(struct cc_var *ccv, uint32_t type);
static void newreno_post_recovery(struct cc_var *ccv);
static int newreno_ctl_output(struct cc_var *ccv, struct sockopt *sopt, void *buf);
+static void newreno_newround(struct cc_var *ccv, uint32_t round_cnt);
+static void newreno_rttsample(struct cc_var *ccv, uint32_t usec_rtt, uint32_t rxtcnt, uint32_t fas);
+static int newreno_cb_init(struct cc_var *ccv);
VNET_DEFINE(uint32_t, newreno_beta) = 50;
VNET_DEFINE(uint32_t, newreno_beta_ecn) = 80;
@@ -99,23 +108,95 @@ struct cc_algo newreno_cc_algo = {
.cong_signal = newreno_cong_signal,
.post_recovery = newreno_post_recovery,
.ctl_output = newreno_ctl_output,
+ .newround = newreno_newround,
+ .rttsample = newreno_rttsample,
+ .cb_init = newreno_cb_init,
};
-static inline struct newreno *
-newreno_malloc(struct cc_var *ccv)
-{
- struct newreno *nreno;
+static uint32_t hystart_lowcwnd = 16;
+static uint32_t hystart_minrtt_thresh = 4000;
+static uint32_t hystart_maxrtt_thresh = 16000;
+static uint32_t hystart_n_rttsamples = 8;
+static uint32_t hystart_css_growth_div = 4;
+static uint32_t hystart_css_rounds = 5;
+static uint32_t hystart_bblogs = 0;
- nreno = malloc(sizeof(struct newreno), M_NEWRENO, M_NOWAIT);
- if (nreno != NULL) {
- /* NB: nreno is not zeroed, so initialise all fields. */
- nreno->beta = V_newreno_beta;
- nreno->beta_ecn = V_newreno_beta_ecn;
- nreno->newreno_flags = 0;
- ccv->cc_data = nreno;
+static void
+newreno_log_hystart_event(struct cc_var *ccv, struct newreno *nreno, uint8_t mod, uint32_t flex1)
+{
+ /*
+ * Types of logs (mod value)
+ * 1 - rtt_thresh in flex1, checking to see if RTT is to great.
+ * 2 - rtt is too great, rtt_thresh in flex1.
+ * 3 - CSS is active incr in flex1
+ * 4 - A new round is beginning flex1 is round count
+ * 5 - A new RTT measurement flex1 is the new measurement.
+ * 6 - We enter CA ssthresh is also in flex1.
+ * 7 - Socket option to change hystart executed opt.val in flex1.
+ * 8 - Back out of CSS into SS, flex1 is the css_baseline_minrtt
+ */
+ struct tcpcb *tp;
+
+ if (hystart_bblogs == 0)
+ return;
+ tp = ccv->ccvc.tcp;
+ if (tp->t_logstate != TCP_LOG_STATE_OFF) {
+ union tcp_log_stackspecific log;
+ struct timeval tv;
+
+ memset(&log, 0, sizeof(log));
+ log.u_bbr.flex1 = flex1;
+ log.u_bbr.flex2 = nreno->css_current_round_minrtt;
+ log.u_bbr.flex3 = nreno->css_lastround_minrtt;
+ log.u_bbr.flex4 = nreno->css_rttsample_count;
+ log.u_bbr.flex5 = nreno->css_entered_at_round;
+ log.u_bbr.flex6 = nreno->css_baseline_minrtt;
+ /* We only need bottom 16 bits of flags */
+ log.u_bbr.flex7 = nreno->newreno_flags & 0x0000ffff;
+ log.u_bbr.flex8 = mod;
+ log.u_bbr.epoch = nreno->css_current_round;
+ log.u_bbr.timeStamp = tcp_get_usecs(&tv);
+ log.u_bbr.lt_epoch = nreno->css_fas_at_css_entry;
+ log.u_bbr.pkts_out = nreno->css_last_fas;
+ log.u_bbr.delivered = nreno->css_lowrtt_fas;
+ TCP_LOG_EVENTP(tp, NULL,
+ &tp->t_inpcb->inp_socket->so_rcv,
+ &tp->t_inpcb->inp_socket->so_snd,
+ TCP_HYSTART, 0,
+ 0, &log, false, &tv);
}
+}
+
+static int
+newreno_cb_init(struct cc_var *ccv)
+{
+ struct newreno *nreno;
- return (nreno);
+ ccv->cc_data = NULL;
+ ccv->cc_data = malloc(sizeof(struct newreno), M_NEWRENO, M_NOWAIT);
+ if (ccv->cc_data == NULL)
+ return (ENOMEM);
+ nreno = (struct newreno *)ccv->cc_data;
+ /* NB: nreno is not zeroed, so initialise all fields. */
+ nreno->beta = V_newreno_beta;
+ nreno->beta_ecn = V_newreno_beta_ecn;
+ /*
+ * We set the enabled flag so that if
+ * the socket option gets strobed and
+ * we have not hit a loss
+ */
+ nreno->newreno_flags = CC_NEWRENO_HYSTART_ENABLED;
+ /* At init set both to infinity */
+ nreno->css_lastround_minrtt = 0xffffffff;
+ nreno->css_current_round_minrtt = 0xffffffff;
+ nreno->css_current_round = 0;
+ nreno->css_baseline_minrtt = 0xffffffff;
+ nreno->css_rttsample_count = 0;
+ nreno->css_entered_at_round = 0;
+ nreno->css_fas_at_css_entry = 0;
+ nreno->css_lowrtt_fas = 0;
+ nreno->css_last_fas = 0;
+ return (0);
}
static void
@@ -127,6 +208,9 @@ newreno_cb_destroy(struct cc_var *ccv)
static void
newreno_ack_received(struct cc_var *ccv, uint16_t type)
{
+ struct newreno *nreno;
+
+ nreno = (struct newreno *)ccv->cc_data;
if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) &&
(ccv->flags & CCF_CWND_LIMITED)) {
u_int cw = CCV(ccv, snd_cwnd);
@@ -160,6 +244,16 @@ newreno_ack_received(struct cc_var *ccv, uint16_t type)
* avoid capping cwnd.
*/
if (cw > CCV(ccv, snd_ssthresh)) {
+ if (nreno->newreno_flags & CC_NEWRENO_HYSTART_IN_CSS) {
+ /*
+ * We have slipped into CA with
+ * CSS active. Deactivate all.
+ */
+ /* Turn off the CSS flag */
+ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS;
+ /* Disable use of CSS in the future except long idle */
+ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_ENABLED;
+ }
if (V_tcp_do_rfc3465) {
if (ccv->flags & CCF_ABC_SENTAWND)
ccv->flags &= ~CCF_ABC_SENTAWND;
@@ -184,12 +278,48 @@ newreno_ack_received(struct cc_var *ccv, uint16_t type)
abc_val = ccv->labc;
else
abc_val = V_tcp_abc_l_var;
+ if ((nreno->newreno_flags & CC_NEWRENO_HYSTART_ALLOWED) &&
+ (nreno->newreno_flags & CC_NEWRENO_HYSTART_ENABLED) &&
+ ((nreno->newreno_flags & CC_NEWRENO_HYSTART_IN_CSS) == 0)) {
+ /*
+ * Hystart is allowed and still enabled and we are not yet
+ * in CSS. Lets check to see if we can make a decision on
+ * if we need to go into CSS.
+ */
+ if ((nreno->css_rttsample_count >= hystart_n_rttsamples) &&
+ (CCV(ccv, snd_cwnd) >
+ (hystart_lowcwnd * tcp_fixed_maxseg(ccv->ccvc.tcp)))) {
+ uint32_t rtt_thresh;
+
+ /* Clamp (minrtt_thresh, lastround/8, maxrtt_thresh) */
+ rtt_thresh = (nreno->css_lastround_minrtt >> 3);
+ if (rtt_thresh < hystart_minrtt_thresh)
+ rtt_thresh = hystart_minrtt_thresh;
+ if (rtt_thresh > hystart_maxrtt_thresh)
+ rtt_thresh = hystart_maxrtt_thresh;
+ newreno_log_hystart_event(ccv, nreno, 1, rtt_thresh);
+ if (nreno->css_current_round_minrtt >= (nreno->css_lastround_minrtt + rtt_thresh)) {
+ /* Enter CSS */
+ nreno->newreno_flags |= CC_NEWRENO_HYSTART_IN_CSS;
+ nreno->css_fas_at_css_entry = nreno->css_lowrtt_fas;
+ nreno->css_baseline_minrtt = nreno->css_current_round_minrtt;
+ nreno->css_entered_at_round = nreno->css_current_round;
+ newreno_log_hystart_event(ccv, nreno, 2, rtt_thresh);
+ }
+ }
+ }
if (CCV(ccv, snd_nxt) == CCV(ccv, snd_max))
incr = min(ccv->bytes_this_ack,
ccv->nsegs * abc_val *
CCV(ccv, t_maxseg));
else
incr = min(ccv->bytes_this_ack, CCV(ccv, t_maxseg));
+
+ /* Only if Hystart is enabled will the flag get set */
+ if (nreno->newreno_flags & CC_NEWRENO_HYSTART_IN_CSS) {
+ incr /= hystart_css_growth_div;
+ newreno_log_hystart_event(ccv, nreno, 3, incr);
+ }
}
/* ABC is on by default, so incr equals 0 frequently. */
if (incr > 0)
@@ -201,8 +331,10 @@ newreno_ack_received(struct cc_var *ccv, uint16_t type)
static void
newreno_after_idle(struct cc_var *ccv)
{
+ struct newreno *nreno;
uint32_t rw;
+ nreno = (struct newreno *)ccv->cc_data;
/*
* If we've been idle for more than one retransmit timeout the old
* congestion window is no longer current and we have to reduce it to
@@ -226,6 +358,16 @@ newreno_after_idle(struct cc_var *ccv)
CCV(ccv, snd_cwnd)-(CCV(ccv, snd_cwnd)>>2));
CCV(ccv, snd_cwnd) = min(rw, CCV(ccv, snd_cwnd));
+ if ((nreno->newreno_flags & CC_NEWRENO_HYSTART_ENABLED) == 0) {
+ if (CCV(ccv, snd_cwnd) <= (hystart_lowcwnd * tcp_fixed_maxseg(ccv->ccvc.tcp))) {
+ /*
+ * Re-enable hystart if our cwnd has fallen below
+ * the hystart lowcwnd point.
+ */
+ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS;
+ nreno->newreno_flags |= CC_NEWRENO_HYSTART_ENABLED;
+ }
+ }
}
/*
@@ -240,15 +382,9 @@ newreno_cong_signal(struct cc_var *ccv, uint32_t type)
cwin = CCV(ccv, snd_cwnd);
mss = tcp_fixed_maxseg(ccv->ccvc.tcp);
- /*
- * Other TCP congestion controls use newreno_cong_signal(), but
- * with their own private cc_data. Make sure the cc_data is used
- * correctly.
- */
- nreno = (CC_ALGO(ccv->ccvc.tcp) == &newreno_cc_algo) ? ccv->cc_data : NULL;
- beta = (nreno == NULL) ? V_newreno_beta : nreno->beta;
- beta_ecn = (nreno == NULL) ? V_newreno_beta_ecn : nreno->beta_ecn;
-
+ nreno = (struct newreno *) ccv->cc_data;
+ beta = nreno->beta;
+ beta_ecn = nreno->beta_ecn;
/*
* Note that we only change the backoff for ECN if the
* global sysctl V_cc_do_abe is set <or> the stack itself
@@ -257,7 +393,7 @@ newreno_cong_signal(struct cc_var *ccv, uint32_t type)
*/
if ((type == CC_ECN) &&
(V_cc_do_abe ||
- ((nreno != NULL) && (nreno->newreno_flags & CC_NEWRENO_BETA_ECN))))
+ ((nreno != NULL) && (nreno->newreno_flags & CC_NEWRENO_BETA_ECN_ENABLED))))
factor = beta_ecn;
else
factor = beta;
@@ -271,6 +407,11 @@ newreno_cong_signal(struct cc_var *ccv, uint32_t type)
switch (type) {
case CC_NDUPACK:
+ if (nreno->newreno_flags & CC_NEWRENO_HYSTART_ENABLED) {
+ /* Make sure the flags are all off we had a loss */
+ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_ENABLED;
+ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS;
+ }
if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) {
if (IN_CONGRECOVERY(CCV(ccv, t_flags) &&
V_cc_do_abe && V_cc_abe_frlossreduce)) {
@@ -284,6 +425,11 @@ newreno_cong_signal(struct cc_var *ccv, uint32_t type)
}
break;
case CC_ECN:
+ if (nreno->newreno_flags & CC_NEWRENO_HYSTART_ENABLED) {
+ /* Make sure the flags are all off we had a loss */
+ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_ENABLED;
+ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS;
+ }
if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
CCV(ccv, snd_ssthresh) = cwin;
CCV(ccv, snd_cwnd) = cwin;
@@ -346,17 +492,10 @@ newreno_ctl_output(struct cc_var *ccv, struct sockopt *sopt, void *buf)
if (CC_ALGO(ccv->ccvc.tcp) != &newreno_cc_algo)
return (ENOPROTOOPT);
- nreno = ccv->cc_data;
+ nreno = (struct newreno *)ccv->cc_data;
opt = buf;
-
switch (sopt->sopt_dir) {
case SOPT_SET:
- /* We cannot set without cc_data memory. */
- if (nreno == NULL) {
- nreno = newreno_malloc(ccv);
- if (nreno == NULL)
- return (ENOMEM);
- }
switch (opt->name) {
case CC_NEWRENO_BETA:
nreno->beta = opt->val;
@@ -365,6 +504,19 @@ newreno_ctl_output(struct cc_var *ccv, struct sockopt *sopt, void *buf)
if ((!V_cc_do_abe) && ((nreno->newreno_flags & CC_NEWRENO_BETA_ECN) == 0))
return (EACCES);
nreno->beta_ecn = opt->val;
+ nreno->newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED;
+ break;
+ case CC_NEWRENO_ENABLE_HYSTART:
+ /* Allow hystart on this connection */
+ if (opt->val != 0) {
+ nreno->newreno_flags |= CC_NEWRENO_HYSTART_ALLOWED;
+ if (opt->val > 1)
+ nreno->newreno_flags |= CC_NEWRENO_HYSTART_CAN_SH_CWND;
+ if (opt->val > 2)
+ nreno->newreno_flags |= CC_NEWRENO_HYSTART_CONS_SSTH;
+ } else
+ nreno->newreno_flags &= ~(CC_NEWRENO_HYSTART_ALLOWED|CC_NEWRENO_HYSTART_CAN_SH_CWND|CC_NEWRENO_HYSTART_CONS_SSTH);
+ newreno_log_hystart_event(ccv, nreno, 7, opt->val);
break;
default:
return (ENOPROTOOPT);
@@ -380,6 +532,17 @@ newreno_ctl_output(struct cc_var *ccv, struct sockopt *sopt, void *buf)
opt->val = (nreno == NULL) ?
V_newreno_beta_ecn : nreno->beta_ecn;
break;
+ case CC_NEWRENO_ENABLE_HYSTART:
+ if (nreno->newreno_flags & CC_NEWRENO_HYSTART_ALLOWED) {
+ if (nreno->newreno_flags & CC_NEWRENO_HYSTART_CONS_SSTH)
+ opt->val = 3;
+ else if (nreno->newreno_flags & CC_NEWRENO_HYSTART_CAN_SH_CWND)
+ opt->val = 2;
+ else
+ opt->val = 1;
+ } else
+ opt->val = 0;
+ break;
default:
return (ENOPROTOOPT);
}
@@ -411,6 +574,78 @@ newreno_beta_handler(SYSCTL_HANDLER_ARGS)
return (error);
}
+static void
+newreno_newround(struct cc_var *ccv, uint32_t round_cnt)
+{
+ struct newreno *nreno;
+
+ nreno = (struct newreno *)ccv->cc_data;
+ /* We have entered a new round */
+ nreno->css_lastround_minrtt = nreno->css_current_round_minrtt;
+ nreno->css_current_round_minrtt = 0xffffffff;
+ nreno->css_rttsample_count = 0;
+ nreno->css_current_round = round_cnt;
+ if ((nreno->newreno_flags & CC_NEWRENO_HYSTART_IN_CSS) &&
+ ((round_cnt - nreno->css_entered_at_round) >= hystart_css_rounds)) {
+ /* Enter CA */
+ if (nreno->newreno_flags & CC_NEWRENO_HYSTART_CAN_SH_CWND) {
+ /*
+ * We engage more than snd_ssthresh, engage
+ * the brakes!! Though we will stay in SS to
+ * creep back up again, so lets leave CSS active
+ * and give us hystart_css_rounds more rounds.
+ */
+ if (nreno->newreno_flags & CC_NEWRENO_HYSTART_CONS_SSTH) {
+ CCV(ccv, snd_ssthresh) = ((nreno->css_lowrtt_fas + nreno->css_fas_at_css_entry) / 2);
+ } else {
+ CCV(ccv, snd_ssthresh) = nreno->css_lowrtt_fas;
+ }
+ CCV(ccv, snd_cwnd) = nreno->css_fas_at_css_entry;
+ nreno->css_entered_at_round = round_cnt;
+ } else {
+ CCV(ccv, snd_ssthresh) = CCV(ccv, snd_cwnd);
+ /* Turn off the CSS flag */
+ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS;
+ /* Disable use of CSS in the future except long idle */
+ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_ENABLED;
+ }
+ newreno_log_hystart_event(ccv, nreno, 6, CCV(ccv, snd_ssthresh));
+ }
+ newreno_log_hystart_event(ccv, nreno, 4, round_cnt);
+}
+
+static void
+newreno_rttsample(struct cc_var *ccv, uint32_t usec_rtt, uint32_t rxtcnt, uint32_t fas)
+{
+ struct newreno *nreno;
+
+ nreno = (struct newreno *)ccv->cc_data;
+ if (rxtcnt > 1) {
+ /*
+ * Only look at RTT's that are non-ambiguous.
+ */
+ return;
+ }
+ nreno->css_rttsample_count++;
+ nreno->css_last_fas = fas;
+ if (nreno->css_current_round_minrtt > usec_rtt) {
+ nreno->css_current_round_minrtt = usec_rtt;
+ nreno->css_lowrtt_fas = nreno->css_last_fas;
+ }
+ if ((nreno->newreno_flags & CC_NEWRENO_HYSTART_IN_CSS) &&
+ (nreno->css_rttsample_count >= hystart_n_rttsamples) &&
+ (nreno->css_baseline_minrtt > nreno->css_current_round_minrtt)) {
+ /*
+ * We were in CSS and the RTT is now less, we
+ * entered CSS erroneously.
+ */
+ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS;
+ newreno_log_hystart_event(ccv, nreno, 8, nreno->css_baseline_minrtt);
+ nreno->css_baseline_minrtt = 0xffffffff;
+ }
+ newreno_log_hystart_event(ccv, nreno, 5, usec_rtt);
+}
+
SYSCTL_DECL(_net_inet_tcp_cc_newreno);
SYSCTL_NODE(_net_inet_tcp_cc, OID_AUTO, newreno,
CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
@@ -426,5 +661,45 @@ SYSCTL_PROC(_net_inet_tcp_cc_newreno, OID_AUTO, beta_ecn,
&VNET_NAME(newreno_beta_ecn), 3, &newreno_beta_handler, "IU",
"New Reno beta ecn, specified as number between 1 and 100");
+SYSCTL_NODE(_net_inet_tcp_cc_newreno, OID_AUTO, hystartplusplus,
+ CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
+ "New Reno related HyStart++ settings");
+
+SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, lowcwnd,
+ CTLFLAG_RW,
+ &hystart_lowcwnd, 16,
+ "The number of MSS in the CWND before HyStart++ is active");
+
+SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, minrtt_thresh,
+ CTLFLAG_RW,
+ &hystart_minrtt_thresh, 4000,
+ "HyStarts++ minimum RTT thresh used in clamp (in microseconds)");
+
+SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, maxrtt_thresh,
+ CTLFLAG_RW,
+ &hystart_maxrtt_thresh, 16000,
+ "HyStarts++ maximum RTT thresh used in clamp (in microseconds)");
+
+SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, n_rttsamples,
+ CTLFLAG_RW,
+ &hystart_n_rttsamples, 8,
+ "The number of RTT samples that must be seen to consider HyStart++");
+
+SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, css_growth_div,
+ CTLFLAG_RW,
+ &hystart_css_growth_div, 4,
+ "The divisor to the growth when in Hystart++ CSS");
+
+SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, css_rounds,
+ CTLFLAG_RW,
+ &hystart_css_rounds, 5,
+ "The number of rounds HyStart++ lasts in CSS before falling to CA");
+
+SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, bblogs,
+ CTLFLAG_RW,
+ &hystart_bblogs, 0,
+ "Do we enable HyStart++ Black Box logs to be generated if BB logging is on");
+
+
DECLARE_CC_MODULE(newreno, &newreno_cc_algo);
MODULE_VERSION(newreno, 1);