summaryrefslogtreecommitdiffstats
path: root/sys/netinet
diff options
context:
space:
mode:
Diffstat (limited to 'sys/netinet')
-rw-r--r--sys/netinet/cc/cc_cubic.c396
-rw-r--r--sys/netinet/cc/cc_cubic.h229
-rw-r--r--sys/netinet/cc/cc_htcp.c521
-rw-r--r--sys/netinet/cc/cc_newreno.c79
-rw-r--r--sys/netinet/if_ether.c10
-rw-r--r--sys/netinet/in.c2
-rw-r--r--sys/netinet/ip6.h2
-rw-r--r--sys/netinet/tcp_input.c2
-rw-r--r--sys/netinet/tcp_output.c35
-rw-r--r--sys/netinet/tcp_timer.c2
10 files changed, 1199 insertions, 79 deletions
diff --git a/sys/netinet/cc/cc_cubic.c b/sys/netinet/cc/cc_cubic.c
new file mode 100644
index 0000000..d546ee1
--- /dev/null
+++ b/sys/netinet/cc/cc_cubic.c
@@ -0,0 +1,396 @@
+/*-
+ * Copyright (c) 2008-2010 Lawrence Stewart <lstewart@freebsd.org>
+ * Copyright (c) 2010 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Lawrence Stewart while studying at the Centre
+ * for Advanced Internet Architectures, Swinburne University, made possible in
+ * part by a grant from the Cisco University Research Program Fund at Community
+ * Foundation Silicon Valley.
+ *
+ * Portions of this software were developed at the Centre for Advanced
+ * Internet Architectures, Swinburne University of Technology, Melbourne,
+ * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * An implementation of the CUBIC congestion control algorithm for FreeBSD,
+ * based on the Internet Draft "draft-rhee-tcpm-cubic-02" by Rhee, Xu and Ha.
+ * Originally released as part of the NewTCP research project at Swinburne
+ * University's Centre for Advanced Internet Architectures, Melbourne,
+ * Australia, which was made possible in part by a grant from the Cisco
+ * University Research Program Fund at Community Foundation Silicon Valley. More
+ * details are available at:
+ * http://caia.swin.edu.au/urp/newtcp/
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <net/vnet.h>
+
+#include <netinet/cc.h>
+#include <netinet/tcp_seq.h>
+#include <netinet/tcp_timer.h>
+#include <netinet/tcp_var.h>
+
+#include <netinet/cc/cc_cubic.h>
+#include <netinet/cc/cc_module.h>
+
+static void cubic_ack_received(struct cc_var *ccv, uint16_t type);
+static void cubic_cb_destroy(struct cc_var *ccv);
+static int cubic_cb_init(struct cc_var *ccv);
+static void cubic_cong_signal(struct cc_var *ccv, uint32_t type);
+static void cubic_conn_init(struct cc_var *ccv);
+static int cubic_mod_init(void);
+static void cubic_post_recovery(struct cc_var *ccv);
+static void cubic_record_rtt(struct cc_var *ccv);
+static void cubic_ssthresh_update(struct cc_var *ccv);
+
+struct cubic {
+ /* Cubic K in fixed point form with CUBIC_SHIFT worth of precision. */
+ int64_t K;
+ /* Sum of RTT samples across an epoch in ticks. */
+ int64_t sum_rtt_ticks;
+ /* cwnd at the most recent congestion event. */
+ unsigned long max_cwnd;
+ /* cwnd at the previous congestion event. */
+ unsigned long prev_max_cwnd;
+ /* Number of congestion events. */
+ uint32_t num_cong_events;
+ /* Minimum observed rtt in ticks. */
+ int min_rtt_ticks;
+ /* Mean observed rtt between congestion epochs. */
+ int mean_rtt_ticks;
+ /* ACKs since last congestion event. */
+ int epoch_ack_count;
+ /* Time of last congestion event in ticks. */
+ int t_last_cong;
+};
+
+MALLOC_DECLARE(M_CUBIC);
+MALLOC_DEFINE(M_CUBIC, "cubic data",
+ "Per connection data required for the CUBIC congestion control algorithm");
+
+struct cc_algo cubic_cc_algo = {
+ .name = "cubic",
+ .ack_received = cubic_ack_received,
+ .cb_destroy = cubic_cb_destroy,
+ .cb_init = cubic_cb_init,
+ .cong_signal = cubic_cong_signal,
+ .conn_init = cubic_conn_init,
+ .mod_init = cubic_mod_init,
+ .post_recovery = cubic_post_recovery,
+};
+
+static void
+cubic_ack_received(struct cc_var *ccv, uint16_t type)
+{
+ struct cubic *cubic_data;
+ unsigned long w_tf, w_cubic_next;
+ int ticks_since_cong;
+
+ cubic_data = ccv->cc_data;
+ cubic_record_rtt(ccv);
+
+ /*
+ * Regular ACK and we're not in cong/fast recovery and we're cwnd
+ * limited and we're either not doing ABC or are slow starting or are
+ * doing ABC and we've sent a cwnd's worth of bytes.
+ */
+ if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) &&
+ (ccv->flags & CCF_CWND_LIMITED) && (!V_tcp_do_rfc3465 ||
+ CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) ||
+ (V_tcp_do_rfc3465 && ccv->flags & CCF_ABC_SENTAWND))) {
+ /* Use the logic in NewReno ack_received() for slow start. */
+ if (CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) ||
+ cubic_data->min_rtt_ticks == TCPTV_SRTTBASE)
+ newreno_cc_algo.ack_received(ccv, type);
+ else {
+ ticks_since_cong = ticks - cubic_data->t_last_cong;
+
+ /*
+ * The mean RTT is used to best reflect the equations in
+ * the I-D. Using min_rtt in the tf_cwnd calculation
+ * causes w_tf to grow much faster than it should if the
+ * RTT is dominated by network buffering rather than
+ * propogation delay.
+ */
+ w_tf = tf_cwnd(ticks_since_cong,
+ cubic_data->mean_rtt_ticks, cubic_data->max_cwnd,
+ CCV(ccv, t_maxseg));
+
+ w_cubic_next = cubic_cwnd(ticks_since_cong +
+ cubic_data->mean_rtt_ticks, cubic_data->max_cwnd,
+ CCV(ccv, t_maxseg), cubic_data->K);
+
+ ccv->flags &= ~CCF_ABC_SENTAWND;
+
+ if (w_cubic_next < w_tf)
+ /*
+ * TCP-friendly region, follow tf
+ * cwnd growth.
+ */
+ CCV(ccv, snd_cwnd) = w_tf;
+
+ else if (CCV(ccv, snd_cwnd) < w_cubic_next) {
+ /*
+ * Concave or convex region, follow CUBIC
+ * cwnd growth.
+ */
+ if (V_tcp_do_rfc3465)
+ CCV(ccv, snd_cwnd) = w_cubic_next;
+ else
+ CCV(ccv, snd_cwnd) += ((w_cubic_next -
+ CCV(ccv, snd_cwnd)) *
+ CCV(ccv, t_maxseg)) /
+ CCV(ccv, snd_cwnd);
+ }
+
+ /*
+ * If we're not in slow start and we're probing for a
+ * new cwnd limit at the start of a connection
+ * (happens when hostcache has a relevant entry),
+ * keep updating our current estimate of the
+ * max_cwnd.
+ */
+ if (cubic_data->num_cong_events == 0 &&
+ cubic_data->max_cwnd < CCV(ccv, snd_cwnd))
+ cubic_data->max_cwnd = CCV(ccv, snd_cwnd);
+ }
+ }
+}
+
+static void
+cubic_cb_destroy(struct cc_var *ccv)
+{
+
+ if (ccv->cc_data != NULL)
+ free(ccv->cc_data, M_CUBIC);
+}
+
+static int
+cubic_cb_init(struct cc_var *ccv)
+{
+ struct cubic *cubic_data;
+
+ cubic_data = malloc(sizeof(struct cubic), M_CUBIC, M_NOWAIT|M_ZERO);
+
+ if (cubic_data == NULL)
+ return (ENOMEM);
+
+ /* Init some key variables with sensible defaults. */
+ cubic_data->t_last_cong = ticks;
+ cubic_data->min_rtt_ticks = TCPTV_SRTTBASE;
+ cubic_data->mean_rtt_ticks = TCPTV_SRTTBASE;
+
+ ccv->cc_data = cubic_data;
+
+ return (0);
+}
+
+/*
+ * Perform any necessary tasks before we enter congestion recovery.
+ */
+static void
+cubic_cong_signal(struct cc_var *ccv, uint32_t type)
+{
+ struct cubic *cubic_data;
+
+ cubic_data = ccv->cc_data;
+
+ switch (type) {
+ case CC_NDUPACK:
+ if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) {
+ if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
+ cubic_ssthresh_update(ccv);
+ cubic_data->num_cong_events++;
+ cubic_data->prev_max_cwnd = cubic_data->max_cwnd;
+ cubic_data->max_cwnd = CCV(ccv, snd_cwnd);
+ }
+ ENTER_RECOVERY(CCV(ccv, t_flags));
+ }
+ break;
+
+ case CC_ECN:
+ if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
+ cubic_ssthresh_update(ccv);
+ cubic_data->num_cong_events++;
+ cubic_data->prev_max_cwnd = cubic_data->max_cwnd;
+ cubic_data->max_cwnd = CCV(ccv, snd_cwnd);
+ cubic_data->t_last_cong = ticks;
+ CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
+ ENTER_CONGRECOVERY(CCV(ccv, t_flags));
+ }
+ break;
+
+ case CC_RTO:
+ /*
+ * Grab the current time and record it so we know when the
+ * most recent congestion event was. Only record it when the
+ * timeout has fired more than once, as there is a reasonable
+ * chance the first one is a false alarm and may not indicate
+ * congestion.
+ */
+ if (CCV(ccv, t_rxtshift) >= 2)
+ cubic_data->num_cong_events++;
+ cubic_data->t_last_cong = ticks;
+ break;
+ }
+}
+
+static void
+cubic_conn_init(struct cc_var *ccv)
+{
+ struct cubic *cubic_data;
+
+ cubic_data = ccv->cc_data;
+
+ /*
+ * Ensure we have a sane initial value for max_cwnd recorded. Without
+ * this here bad things happen when entries from the TCP hostcache
+ * get used.
+ */
+ cubic_data->max_cwnd = CCV(ccv, snd_cwnd);
+}
+
+static int
+cubic_mod_init(void)
+{
+
+ cubic_cc_algo.after_idle = newreno_cc_algo.after_idle;
+
+ return (0);
+}
+
+/*
+ * Perform any necessary tasks before we exit congestion recovery.
+ */
+static void
+cubic_post_recovery(struct cc_var *ccv)
+{
+ struct cubic *cubic_data;
+
+ cubic_data = ccv->cc_data;
+
+ /* Fast convergence heuristic. */
+ if (cubic_data->max_cwnd < cubic_data->prev_max_cwnd)
+ cubic_data->max_cwnd = (cubic_data->max_cwnd * CUBIC_FC_FACTOR)
+ >> CUBIC_SHIFT;
+
+ if (IN_FASTRECOVERY(CCV(ccv, t_flags))) {
+ /*
+ * If inflight data is less than ssthresh, set cwnd
+ * conservatively to avoid a burst of data, as suggested in
+ * the NewReno RFC. Otherwise, use the CUBIC method.
+ *
+ * XXXLAS: Find a way to do this without needing curack
+ */
+ if (SEQ_GT(ccv->curack + CCV(ccv, snd_ssthresh),
+ CCV(ccv, snd_max)))
+ CCV(ccv, snd_cwnd) = CCV(ccv, snd_max) - ccv->curack +
+ CCV(ccv, t_maxseg);
+ else
+ /* Update cwnd based on beta and adjusted max_cwnd. */
+ CCV(ccv, snd_cwnd) = max(1, ((CUBIC_BETA *
+ cubic_data->max_cwnd) >> CUBIC_SHIFT));
+ }
+ cubic_data->t_last_cong = ticks;
+
+ /* Calculate the average RTT between congestion epochs. */
+ if (cubic_data->epoch_ack_count > 0 && cubic_data->sum_rtt_ticks > 0)
+ cubic_data->mean_rtt_ticks = (int)(cubic_data->sum_rtt_ticks /
+ cubic_data->epoch_ack_count);
+ else
+ /* For safety. */
+ cubic_data->mean_rtt_ticks = cubic_data->min_rtt_ticks;
+
+ cubic_data->epoch_ack_count = 0;
+ cubic_data->sum_rtt_ticks = 0;
+ cubic_data->K = cubic_k(cubic_data->max_cwnd / CCV(ccv, t_maxseg));
+}
+
+/*
+ * Record the min RTT and sum samples for the epoch average RTT calculation.
+ */
+static void
+cubic_record_rtt(struct cc_var *ccv)
+{
+ struct cubic *cubic_data;
+ int t_srtt_ticks;
+
+ /* Ignore srtt until a min number of samples have been taken. */
+ if (CCV(ccv, t_rttupdated) >= CUBIC_MIN_RTT_SAMPLES) {
+ cubic_data = ccv->cc_data;
+ t_srtt_ticks = CCV(ccv, t_srtt) / TCP_RTT_SCALE;
+
+ /*
+ * Record the current SRTT as our minrtt if it's the smallest
+ * we've seen or minrtt is currently equal to its initialised
+ * value.
+ *
+ * XXXLAS: Should there be some hysteresis for minrtt?
+ */
+ if ((t_srtt_ticks < cubic_data->min_rtt_ticks ||
+ cubic_data->min_rtt_ticks == TCPTV_SRTTBASE))
+ cubic_data->min_rtt_ticks = max(1, t_srtt_ticks);
+
+ /* Sum samples for epoch average RTT calculation. */
+ cubic_data->sum_rtt_ticks += t_srtt_ticks;
+ cubic_data->epoch_ack_count++;
+ }
+}
+
+/*
+ * Update the ssthresh in the event of congestion.
+ */
+static void
+cubic_ssthresh_update(struct cc_var *ccv)
+{
+ struct cubic *cubic_data;
+
+ cubic_data = ccv->cc_data;
+
+ /*
+ * On the first congestion event, set ssthresh to cwnd * 0.5, on
+ * subsequent congestion events, set it to cwnd * beta.
+ */
+ if (cubic_data->num_cong_events == 0)
+ CCV(ccv, snd_ssthresh) = CCV(ccv, snd_cwnd) >> 1;
+ else
+ CCV(ccv, snd_ssthresh) = (CCV(ccv, snd_cwnd) * CUBIC_BETA)
+ >> CUBIC_SHIFT;
+}
+
+
+DECLARE_CC_MODULE(cubic, &cubic_cc_algo);
diff --git a/sys/netinet/cc/cc_cubic.h b/sys/netinet/cc/cc_cubic.h
new file mode 100644
index 0000000..cf3470b
--- /dev/null
+++ b/sys/netinet/cc/cc_cubic.h
@@ -0,0 +1,229 @@
+/*-
+ * Copyright (c) 2008-2010 Lawrence Stewart <lstewart@freebsd.org>
+ * Copyright (c) 2010 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Lawrence Stewart while studying at the Centre
+ * for Advanced Internet Architectures, Swinburne University, made possible in
+ * part by a grant from the Cisco University Research Program Fund at Community
+ * Foundation Silicon Valley.
+ *
+ * Portions of this software were developed at the Centre for Advanced
+ * Internet Architectures, Swinburne University of Technology, Melbourne,
+ * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_CC_CUBIC_H_
+#define _NETINET_CC_CUBIC_H_
+
+/* Number of bits of precision for fixed point math calcs. */
+#define CUBIC_SHIFT 8
+
+#define CUBIC_SHIFT_4 32
+
+/* 0.5 << CUBIC_SHIFT. */
+#define RENO_BETA 128
+
+/* ~0.8 << CUBIC_SHIFT. */
+#define CUBIC_BETA 204
+
+/* ~0.2 << CUBIC_SHIFT. */
+#define ONE_SUB_CUBIC_BETA 51
+
+/* 3 * ONE_SUB_CUBIC_BETA. */
+#define THREE_X_PT2 153
+
+/* (2 << CUBIC_SHIFT) - ONE_SUB_CUBIC_BETA. */
+#define TWO_SUB_PT2 461
+
+/* ~0.4 << CUBIC_SHIFT. */
+#define CUBIC_C_FACTOR 102
+
+/* CUBIC fast convergence factor: ~0.9 << CUBIC_SHIFT. */
+#define CUBIC_FC_FACTOR 230
+
+/* Don't trust s_rtt until this many rtt samples have been taken. */
+#define CUBIC_MIN_RTT_SAMPLES 8
+
+/* Userland only bits. */
+#ifndef _KERNEL
+
+extern int hz;
+
+/*
+ * Implementation based on the formulae found in the CUBIC Internet Draft
+ * "draft-rhee-tcpm-cubic-02".
+ *
+ * Note BETA used in cc_cubic is equal to (1-beta) in the I-D
+ */
+
+static __inline float
+theoretical_cubic_k(double wmax_pkts)
+{
+ double C;
+
+ C = 0.4;
+
+ return (pow((wmax_pkts * 0.2) / C, (1.0 / 3.0)) * pow(2, CUBIC_SHIFT));
+}
+
+static __inline unsigned long
+theoretical_cubic_cwnd(int ticks_since_cong, unsigned long wmax, uint32_t smss)
+{
+ double C, wmax_pkts;
+
+ C = 0.4;
+ wmax_pkts = wmax / (double)smss;
+
+ return (smss * (wmax_pkts +
+ (C * pow(ticks_since_cong / (double)hz -
+ theoretical_cubic_k(wmax_pkts) / pow(2, CUBIC_SHIFT), 3.0))));
+}
+
+static __inline unsigned long
+theoretical_reno_cwnd(int ticks_since_cong, int rtt_ticks, unsigned long wmax,
+ uint32_t smss)
+{
+
+ return ((wmax * 0.5) + ((ticks_since_cong / (float)rtt_ticks) * smss));
+}
+
+static __inline unsigned long
+theoretical_tf_cwnd(int ticks_since_cong, int rtt_ticks, unsigned long wmax,
+ uint32_t smss)
+{
+
+ return ((wmax * 0.8) + ((3 * 0.2) / (2 - 0.2) *
+ (ticks_since_cong / (float)rtt_ticks) * smss));
+}
+
+#endif /* !_KERNEL */
+
+/*
+ * Compute the CUBIC K value used in the cwnd calculation, using an
+ * implementation of eqn 2 in the I-D. The method used
+ * here is adapted from Apple Computer Technical Report #KT-32.
+ */
+static __inline int64_t
+cubic_k(unsigned long wmax_pkts)
+{
+ int64_t s, K;
+ uint16_t p;
+
+ K = s = 0;
+ p = 0;
+
+ /* (wmax * beta)/C with CUBIC_SHIFT worth of precision. */
+ s = ((wmax_pkts * ONE_SUB_CUBIC_BETA) << CUBIC_SHIFT) / CUBIC_C_FACTOR;
+
+ /* Rebase s to be between 1 and 1/8 with a shift of CUBIC_SHIFT. */
+ while (s >= 256) {
+ s >>= 3;
+ p++;
+ }
+
+ /*
+ * Some magic constants taken from the Apple TR with appropriate
+ * shifts: 275 == 1.072302 << CUBIC_SHIFT, 98 == 0.3812513 <<
+ * CUBIC_SHIFT, 120 == 0.46946116 << CUBIC_SHIFT.
+ */
+ K = (((s * 275) >> CUBIC_SHIFT) + 98) -
+ (((s * s * 120) >> CUBIC_SHIFT) >> CUBIC_SHIFT);
+
+ /* Multiply by 2^p to undo the rebasing of s from above. */
+ return (K <<= p);
+}
+
+/*
+ * Compute the new cwnd value using an implementation of eqn 1 from the I-D.
+ * Thanks to Kip Macy for help debugging this function.
+ *
+ * XXXLAS: Characterise bounds for overflow.
+ */
+static __inline unsigned long
+cubic_cwnd(int ticks_since_cong, unsigned long wmax, uint32_t smss, int64_t K)
+{
+ int64_t cwnd;
+
+ /* K is in fixed point form with CUBIC_SHIFT worth of precision. */
+
+ /* t - K, with CUBIC_SHIFT worth of precision. */
+ cwnd = ((int64_t)(ticks_since_cong << CUBIC_SHIFT) - (K * hz)) / hz;
+
+ /* (t - K)^3, with CUBIC_SHIFT^3 worth of precision. */
+ cwnd *= (cwnd * cwnd);
+
+ /*
+ * C(t - K)^3 + wmax
+ * The down shift by CUBIC_SHIFT_4 is because cwnd has 4 lots of
+ * CUBIC_SHIFT included in the value. 3 from the cubing of cwnd above,
+ * and an extra from multiplying through by CUBIC_C_FACTOR.
+ */
+ cwnd = ((cwnd * CUBIC_C_FACTOR * smss) >> CUBIC_SHIFT_4) + wmax;
+
+ return ((unsigned long)cwnd);
+}
+
+/*
+ * Compute an approximation of the NewReno cwnd some number of ticks after a
+ * congestion event. RTT should be the average RTT estimate for the path
+ * measured over the previous congestion epoch and wmax is the value of cwnd at
+ * the last congestion event. The "TCP friendly" concept in the CUBIC I-D is
+ * rather tricky to understand and it turns out this function is not required.
+ * It is left here for reference.
+ */
+static __inline unsigned long
+reno_cwnd(int ticks_since_cong, int rtt_ticks, unsigned long wmax,
+ uint32_t smss)
+{
+
+ /*
+ * For NewReno, beta = 0.5, therefore: W_tcp(t) = wmax*0.5 + t/RTT
+ * W_tcp(t) deals with cwnd/wmax in pkts, so because our cwnd is in
+ * bytes, we have to multiply by smss.
+ */
+ return (((wmax * RENO_BETA) + (((ticks_since_cong * smss)
+ << CUBIC_SHIFT) / rtt_ticks)) >> CUBIC_SHIFT);
+}
+
+/*
+ * Compute an approximation of the "TCP friendly" cwnd some number of ticks
+ * after a congestion event that is designed to yield the same average cwnd as
+ * NewReno while using CUBIC's beta of 0.8. RTT should be the average RTT
+ * estimate for the path measured over the previous congestion epoch and wmax is
+ * the value of cwnd at the last congestion event.
+ */
+static __inline unsigned long
+tf_cwnd(int ticks_since_cong, int rtt_ticks, unsigned long wmax,
+ uint32_t smss)
+{
+
+ /* Equation 4 of I-D. */
+ return (((wmax * CUBIC_BETA) + (((THREE_X_PT2 * ticks_since_cong *
+ smss) << CUBIC_SHIFT) / TWO_SUB_PT2 / rtt_ticks)) >> CUBIC_SHIFT);
+}
+
+#endif /* _NETINET_CC_CUBIC_H_ */
diff --git a/sys/netinet/cc/cc_htcp.c b/sys/netinet/cc/cc_htcp.c
new file mode 100644
index 0000000..61175ac
--- /dev/null
+++ b/sys/netinet/cc/cc_htcp.c
@@ -0,0 +1,521 @@
+/*-
+ * Copyright (c) 2007-2008
+ * Swinburne University of Technology, Melbourne, Australia
+ * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
+ * Copyright (c) 2010 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed at the Centre for Advanced Internet
+ * Architectures, Swinburne University, by Lawrence Stewart and James Healy,
+ * made possible in part by a grant from the Cisco University Research Program
+ * Fund at Community Foundation Silicon Valley.
+ *
+ * Portions of this software were developed at the Centre for Advanced
+ * Internet Architectures, Swinburne University of Technology, Melbourne,
+ * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * An implementation of the H-TCP congestion control algorithm for FreeBSD,
+ * based on the Internet Draft "draft-leith-tcp-htcp-06.txt" by Leith and
+ * Shorten. Originally released as part of the NewTCP research project at
+ * Swinburne University's Centre for Advanced Internet Architectures, Melbourne,
+ * Australia, which was made possible in part by a grant from the Cisco
+ * University Research Program Fund at Community Foundation Silicon Valley. More
+ * details are available at:
+ * http://caia.swin.edu.au/urp/newtcp/
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <net/vnet.h>
+
+#include <netinet/cc.h>
+#include <netinet/tcp_seq.h>
+#include <netinet/tcp_timer.h>
+#include <netinet/tcp_var.h>
+
+#include <netinet/cc/cc_module.h>
+
+/* Fixed point math shifts. */
+#define HTCP_SHIFT 8
+#define HTCP_ALPHA_INC_SHIFT 4
+
+#define HTCP_INIT_ALPHA 1
+#define HTCP_DELTA_L hz /* 1 sec in ticks. */
+#define HTCP_MINBETA 128 /* 0.5 << HTCP_SHIFT. */
+#define HTCP_MAXBETA 204 /* ~0.8 << HTCP_SHIFT. */
+#define HTCP_MINROWE 26 /* ~0.1 << HTCP_SHIFT. */
+#define HTCP_MAXROWE 512 /* 2 << HTCP_SHIFT. */
+
+/* RTT_ref (ms) used in the calculation of alpha if RTT scaling is enabled. */
+#define HTCP_RTT_REF 100
+
+/* Don't trust SRTT until this many samples have been taken. */
+#define HTCP_MIN_RTT_SAMPLES 8
+
+/*
+ * HTCP_CALC_ALPHA performs a fixed point math calculation to determine the
+ * value of alpha, based on the function defined in the HTCP spec.
+ *
+ * i.e. 1 + 10(delta - delta_l) + ((delta - delta_l) / 2) ^ 2
+ *
+ * "diff" is passed in to the macro as "delta - delta_l" and is expected to be
+ * in units of ticks.
+ *
+ * The joyousnous of fixed point maths means our function implementation looks a
+ * little funky...
+ *
+ * In order to maintain some precision in the calculations, a fixed point shift
+ * HTCP_ALPHA_INC_SHIFT is used to ensure the integer divisions don't
+ * truncate the results too badly.
+ *
+ * The "16" value is the "1" term in the alpha function shifted up by
+ * HTCP_ALPHA_INC_SHIFT
+ *
+ * The "160" value is the "10" multiplier in the alpha function multiplied by
+ * 2^HTCP_ALPHA_INC_SHIFT
+ *
+ * Specifying these as constants reduces the computations required. After
+ * up-shifting all the terms in the function and performing the required
+ * calculations, we down-shift the final result by HTCP_ALPHA_INC_SHIFT to
+ * ensure it is back in the correct range.
+ *
+ * The "hz" terms are required as kernels can be configured to run with
+ * different tick timers, which we have to adjust for in the alpha calculation
+ * (which originally was defined in terms of seconds).
+ *
+ * We also have to be careful to constrain the value of diff such that it won't
+ * overflow whilst performing the calculation. The middle term i.e. (160 * diff)
+ * / hz is the limiting factor in the calculation. We must constrain diff to be
+ * less than the max size of an int divided by the constant 160 figure
+ * i.e. diff < INT_MAX / 160
+ *
+ * NB: Changing HTCP_ALPHA_INC_SHIFT will require you to MANUALLY update the
+ * constants used in this function!
+ */
+#define HTCP_CALC_ALPHA(diff) \
+((\
+ (16) + \
+ ((160 * (diff)) / hz) + \
+ (((diff) / hz) * (((diff) << HTCP_ALPHA_INC_SHIFT) / (4 * hz))) \
+) >> HTCP_ALPHA_INC_SHIFT)
+
+static void htcp_ack_received(struct cc_var *ccv, uint16_t type);
+static void htcp_cb_destroy(struct cc_var *ccv);
+static int htcp_cb_init(struct cc_var *ccv);
+static void htcp_cong_signal(struct cc_var *ccv, uint32_t type);
+static int htcp_mod_init(void);
+static void htcp_post_recovery(struct cc_var *ccv);
+static void htcp_recalc_alpha(struct cc_var *ccv);
+static void htcp_recalc_beta(struct cc_var *ccv);
+static void htcp_record_rtt(struct cc_var *ccv);
+static void htcp_ssthresh_update(struct cc_var *ccv);
+
+struct htcp {
+ /* cwnd before entering cong recovery. */
+ unsigned long prev_cwnd;
+ /* cwnd additive increase parameter. */
+ int alpha;
+ /* cwnd multiplicative decrease parameter. */
+ int beta;
+ /* Largest rtt seen for the flow. */
+ int maxrtt;
+ /* Shortest rtt seen for the flow. */
+ int minrtt;
+ /* Time of last congestion event in ticks. */
+ int t_last_cong;
+};
+
+static int htcp_rtt_ref;
+/*
+ * The maximum number of ticks the value of diff can reach in
+ * htcp_recalc_alpha() before alpha will stop increasing due to overflow.
+ * See comment above HTCP_CALC_ALPHA for more info.
+ */
+static int htcp_max_diff = INT_MAX / ((1 << HTCP_ALPHA_INC_SHIFT) * 10);
+
+/* Per-netstack vars. */
+static VNET_DEFINE(uint8_t, htcp_adaptive_backoff) = 0;
+static VNET_DEFINE(uint8_t, htcp_rtt_scaling) = 0;
+#define V_htcp_adaptive_backoff VNET(htcp_adaptive_backoff)
+#define V_htcp_rtt_scaling VNET(htcp_rtt_scaling)
+
+MALLOC_DECLARE(M_HTCP);
+MALLOC_DEFINE(M_HTCP, "htcp data",
+ "Per connection data required for the HTCP congestion control algorithm");
+
+struct cc_algo htcp_cc_algo = {
+ .name = "htcp",
+ .ack_received = htcp_ack_received,
+ .cb_destroy = htcp_cb_destroy,
+ .cb_init = htcp_cb_init,
+ .cong_signal = htcp_cong_signal,
+ .mod_init = htcp_mod_init,
+ .post_recovery = htcp_post_recovery,
+};
+
+static void
+htcp_ack_received(struct cc_var *ccv, uint16_t type)
+{
+ struct htcp *htcp_data;
+
+ htcp_data = ccv->cc_data;
+ htcp_record_rtt(ccv);
+
+ /*
+ * Regular ACK and we're not in cong/fast recovery and we're cwnd
+ * limited and we're either not doing ABC or are slow starting or are
+ * doing ABC and we've sent a cwnd's worth of bytes.
+ */
+ if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) &&
+ (ccv->flags & CCF_CWND_LIMITED) && (!V_tcp_do_rfc3465 ||
+ CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) ||
+ (V_tcp_do_rfc3465 && ccv->flags & CCF_ABC_SENTAWND))) {
+ htcp_recalc_beta(ccv);
+ htcp_recalc_alpha(ccv);
+ /*
+ * Use the logic in NewReno ack_received() for slow start and
+ * for the first HTCP_DELTA_L ticks after either the flow starts
+ * or a congestion event (when alpha equals 1).
+ */
+ if (htcp_data->alpha == 1 ||
+ CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh))
+ newreno_cc_algo.ack_received(ccv, type);
+ else {
+ if (V_tcp_do_rfc3465) {
+ /* Increment cwnd by alpha segments. */
+ CCV(ccv, snd_cwnd) += htcp_data->alpha *
+ CCV(ccv, t_maxseg);
+ ccv->flags &= ~CCF_ABC_SENTAWND;
+ } else
+ /*
+ * Increment cwnd by alpha/cwnd segments to
+ * approximate an increase of alpha segments
+ * per RTT.
+ */
+ CCV(ccv, snd_cwnd) += (((htcp_data->alpha <<
+ HTCP_SHIFT) / (CCV(ccv, snd_cwnd) /
+ CCV(ccv, t_maxseg))) * CCV(ccv, t_maxseg))
+ >> HTCP_SHIFT;
+ }
+ }
+}
+
+static void
+htcp_cb_destroy(struct cc_var *ccv)
+{
+
+ if (ccv->cc_data != NULL)
+ free(ccv->cc_data, M_HTCP);
+}
+
+static int
+htcp_cb_init(struct cc_var *ccv)
+{
+ struct htcp *htcp_data;
+
+ htcp_data = malloc(sizeof(struct htcp), M_HTCP, M_NOWAIT);
+
+ if (htcp_data == NULL)
+ return (ENOMEM);
+
+ /* Init some key variables with sensible defaults. */
+ htcp_data->alpha = HTCP_INIT_ALPHA;
+ htcp_data->beta = HTCP_MINBETA;
+ htcp_data->maxrtt = TCPTV_SRTTBASE;
+ htcp_data->minrtt = TCPTV_SRTTBASE;
+ htcp_data->prev_cwnd = 0;
+ htcp_data->t_last_cong = ticks;
+
+ ccv->cc_data = htcp_data;
+
+ return (0);
+}
+
+/*
+ * Perform any necessary tasks before we enter congestion recovery.
+ */
+static void
+htcp_cong_signal(struct cc_var *ccv, uint32_t type)
+{
+ struct htcp *htcp_data;
+
+ htcp_data = ccv->cc_data;
+
+ switch (type) {
+ case CC_NDUPACK:
+ if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) {
+ if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
+ /*
+ * Apply hysteresis to maxrtt to ensure
+ * reductions in the RTT are reflected in our
+ * measurements.
+ */
+ htcp_data->maxrtt = (htcp_data->minrtt +
+ (htcp_data->maxrtt - htcp_data->minrtt) *
+ 95) / 100;
+ htcp_ssthresh_update(ccv);
+ htcp_data->t_last_cong = ticks;
+ htcp_data->prev_cwnd = CCV(ccv, snd_cwnd);
+ }
+ ENTER_RECOVERY(CCV(ccv, t_flags));
+ }
+ break;
+
+ case CC_ECN:
+ if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
+ /*
+ * Apply hysteresis to maxrtt to ensure reductions in
+ * the RTT are reflected in our measurements.
+ */
+ htcp_data->maxrtt = (htcp_data->minrtt + (htcp_data->maxrtt -
+ htcp_data->minrtt) * 95) / 100;
+ htcp_ssthresh_update(ccv);
+ CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
+ htcp_data->t_last_cong = ticks;
+ htcp_data->prev_cwnd = CCV(ccv, snd_cwnd);
+ ENTER_CONGRECOVERY(CCV(ccv, t_flags));
+ }
+ break;
+
+ case CC_RTO:
+ /*
+ * Grab the current time and record it so we know when the
+ * most recent congestion event was. Only record it when the
+ * timeout has fired more than once, as there is a reasonable
+ * chance the first one is a false alarm and may not indicate
+ * congestion.
+ */
+ if (CCV(ccv, t_rxtshift) >= 2)
+ htcp_data->t_last_cong = ticks;
+ break;
+ }
+}
+
+static int
+htcp_mod_init(void)
+{
+
+ htcp_cc_algo.after_idle = newreno_cc_algo.after_idle;
+
+ /*
+ * HTCP_RTT_REF is defined in ms, and t_srtt in the tcpcb is stored in
+ * units of TCP_RTT_SCALE*hz. Scale HTCP_RTT_REF to be in the same units
+ * as t_srtt.
+ */
+ htcp_rtt_ref = (HTCP_RTT_REF * TCP_RTT_SCALE * hz) / 1000;
+
+ return (0);
+}
+
+/*
+ * Perform any necessary tasks before we exit congestion recovery.
+ */
+static void
+htcp_post_recovery(struct cc_var *ccv)
+{
+ struct htcp *htcp_data;
+
+ htcp_data = ccv->cc_data;
+
+ if (IN_FASTRECOVERY(CCV(ccv, t_flags))) {
+ /*
+ * If inflight data is less than ssthresh, set cwnd
+ * conservatively to avoid a burst of data, as suggested in the
+ * NewReno RFC. Otherwise, use the HTCP method.
+ *
+ * XXXLAS: Find a way to do this without needing curack
+ */
+ if (SEQ_GT(ccv->curack + CCV(ccv, snd_ssthresh),
+ CCV(ccv, snd_max)))
+ CCV(ccv, snd_cwnd) = CCV(ccv, snd_max) - ccv->curack +
+ CCV(ccv, t_maxseg);
+ else
+ CCV(ccv, snd_cwnd) = max(1, ((htcp_data->beta *
+ htcp_data->prev_cwnd / CCV(ccv, t_maxseg))
+ >> HTCP_SHIFT)) * CCV(ccv, t_maxseg);
+ }
+}
+
+static void
+htcp_recalc_alpha(struct cc_var *ccv)
+{
+ struct htcp *htcp_data;
+ int alpha, diff, now;
+
+ htcp_data = ccv->cc_data;
+ now = ticks;
+
+ /*
+ * If ticks has wrapped around (will happen approximately once every 49
+ * days on a machine with the default kern.hz=1000) and a flow straddles
+ * the wrap point, our alpha calcs will be completely wrong. We cut our
+ * losses and restart alpha from scratch by setting t_last_cong = now -
+ * HTCP_DELTA_L.
+ *
+ * This does not deflate our cwnd at all. It simply slows the rate cwnd
+ * is growing by until alpha regains the value it held prior to taking
+ * this drastic measure.
+ */
+ if (now < htcp_data->t_last_cong)
+ htcp_data->t_last_cong = now - HTCP_DELTA_L;
+
+ diff = now - htcp_data->t_last_cong - HTCP_DELTA_L;
+
+ /* Cap alpha if the value of diff would overflow HTCP_CALC_ALPHA(). */
+ if (diff < htcp_max_diff) {
+ /*
+ * If it has been more than HTCP_DELTA_L ticks since congestion,
+ * increase alpha according to the function defined in the spec.
+ */
+ if (diff > 0) {
+ alpha = HTCP_CALC_ALPHA(diff);
+
+ /*
+ * Adaptive backoff fairness adjustment:
+ * 2 * (1 - beta) * alpha_raw
+ */
+ if (V_htcp_adaptive_backoff)
+ alpha = max(1, (2 * ((1 << HTCP_SHIFT) -
+ htcp_data->beta) * alpha) >> HTCP_SHIFT);
+
+ /*
+ * RTT scaling: (RTT / RTT_ref) * alpha
+ * alpha will be the raw value from HTCP_CALC_ALPHA() if
+ * adaptive backoff is off, or the adjusted value if
+ * adaptive backoff is on.
+ */
+ if (V_htcp_rtt_scaling)
+ alpha = max(1, (min(max(HTCP_MINROWE,
+ (CCV(ccv, t_srtt) << HTCP_SHIFT) /
+ htcp_rtt_ref), HTCP_MAXROWE) * alpha)
+ >> HTCP_SHIFT);
+
+ } else
+ alpha = 1;
+
+ htcp_data->alpha = alpha;
+ }
+}
+
+static void
+htcp_recalc_beta(struct cc_var *ccv)
+{
+ struct htcp *htcp_data;
+
+ htcp_data = ccv->cc_data;
+
+ /*
+ * TCPTV_SRTTBASE is the initialised value of each connection's SRTT, so
+ * we only calc beta if the connection's SRTT has been changed from its
+ * inital value. beta is bounded to ensure it is always between
+ * HTCP_MINBETA and HTCP_MAXBETA.
+ */
+ if (V_htcp_adaptive_backoff && htcp_data->minrtt != TCPTV_SRTTBASE &&
+ htcp_data->maxrtt != TCPTV_SRTTBASE)
+ htcp_data->beta = min(max(HTCP_MINBETA,
+ (htcp_data->minrtt << HTCP_SHIFT) / htcp_data->maxrtt),
+ HTCP_MAXBETA);
+ else
+ htcp_data->beta = HTCP_MINBETA;
+}
+
+/*
+ * Record the minimum and maximum RTT seen for the connection. These are used in
+ * the calculation of beta if adaptive backoff is enabled.
+ */
+static void
+htcp_record_rtt(struct cc_var *ccv)
+{
+ struct htcp *htcp_data;
+
+ htcp_data = ccv->cc_data;
+
+ /* XXXLAS: Should there be some hysteresis for minrtt? */
+
+ /*
+ * Record the current SRTT as our minrtt if it's the smallest we've seen
+ * or minrtt is currently equal to its initialised value. Ignore SRTT
+ * until a min number of samples have been taken.
+ */
+ if ((CCV(ccv, t_srtt) < htcp_data->minrtt ||
+ htcp_data->minrtt == TCPTV_SRTTBASE) &&
+ (CCV(ccv, t_rttupdated) >= HTCP_MIN_RTT_SAMPLES))
+ htcp_data->minrtt = CCV(ccv, t_srtt);
+
+ /*
+ * Record the current SRTT as our maxrtt if it's the largest we've
+ * seen. Ignore SRTT until a min number of samples have been taken.
+ */
+ if (CCV(ccv, t_srtt) > htcp_data->maxrtt
+ && CCV(ccv, t_rttupdated) >= HTCP_MIN_RTT_SAMPLES)
+ htcp_data->maxrtt = CCV(ccv, t_srtt);
+}
+
+/*
+ * Update the ssthresh in the event of congestion.
+ */
+static void
+htcp_ssthresh_update(struct cc_var *ccv)
+{
+ struct htcp *htcp_data;
+
+ htcp_data = ccv->cc_data;
+
+ /*
+ * On the first congestion event, set ssthresh to cwnd * 0.5, on
+ * subsequent congestion events, set it to cwnd * beta.
+ */
+ if (CCV(ccv, snd_ssthresh) == TCP_MAXWIN << TCP_MAX_WINSHIFT)
+ CCV(ccv, snd_ssthresh) = (CCV(ccv, snd_cwnd) * HTCP_MINBETA)
+ >> HTCP_SHIFT;
+ else {
+ htcp_recalc_beta(ccv);
+ CCV(ccv, snd_ssthresh) = (CCV(ccv, snd_cwnd) * htcp_data->beta)
+ >> HTCP_SHIFT;
+ }
+}
+
+
+SYSCTL_DECL(_net_inet_tcp_cc_htcp);
+SYSCTL_NODE(_net_inet_tcp_cc, OID_AUTO, htcp, CTLFLAG_RW,
+ NULL, "H-TCP related settings");
+SYSCTL_VNET_UINT(_net_inet_tcp_cc_htcp, OID_AUTO, adaptive_backoff, CTLFLAG_RW,
+ &VNET_NAME(htcp_adaptive_backoff), 0, "enable H-TCP adaptive backoff");
+SYSCTL_VNET_UINT(_net_inet_tcp_cc_htcp, OID_AUTO, rtt_scaling, CTLFLAG_RW,
+ &VNET_NAME(htcp_rtt_scaling), 0, "enable H-TCP RTT scaling");
+
+DECLARE_CC_MODULE(htcp, &htcp_cc_algo);
diff --git a/sys/netinet/cc/cc_newreno.c b/sys/netinet/cc/cc_newreno.c
index e383510..c095540 100644
--- a/sys/netinet/cc/cc_newreno.c
+++ b/sys/netinet/cc/cc_newreno.c
@@ -52,41 +52,35 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
+#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>
+#include <sys/systm.h>
-#include <net/if.h>
-#include <net/if_var.h>
+#include <net/vnet.h>
#include <netinet/cc.h>
-#include <netinet/in.h>
-#include <netinet/in_pcb.h>
#include <netinet/tcp_seq.h>
#include <netinet/tcp_var.h>
#include <netinet/cc/cc_module.h>
-void newreno_ack_received(struct cc_var *ccv, uint16_t type);
-void newreno_cong_signal(struct cc_var *ccv, uint32_t type);
-void newreno_post_recovery(struct cc_var *ccv);
-void newreno_after_idle(struct cc_var *ccv);
+static void newreno_ack_received(struct cc_var *ccv, uint16_t type);
+static void newreno_after_idle(struct cc_var *ccv);
+static void newreno_cong_signal(struct cc_var *ccv, uint32_t type);
+static void newreno_post_recovery(struct cc_var *ccv);
struct cc_algo newreno_cc_algo = {
.name = "newreno",
.ack_received = newreno_ack_received,
+ .after_idle = newreno_after_idle,
.cong_signal = newreno_cong_signal,
.post_recovery = newreno_post_recovery,
- .after_idle = newreno_after_idle
};
-/*
- * Increase cwnd on receipt of a successful ACK:
- * if cwnd <= ssthresh, increases by 1 MSS per ACK
- * if cwnd > ssthresh, increase by ~1 MSS per RTT
- */
-void
+static void
newreno_ack_received(struct cc_var *ccv, uint16_t type)
{
if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) &&
@@ -153,10 +147,37 @@ newreno_ack_received(struct cc_var *ccv, uint16_t type)
}
}
+static void
+newreno_after_idle(struct cc_var *ccv)
+{
+ int rw;
+
+ /*
+ * If we've been idle for more than one retransmit timeout the old
+ * congestion window is no longer current and we have to reduce it to
+ * the restart window before we can transmit again.
+ *
+ * The restart window is the initial window or the last CWND, whichever
+ * is smaller.
+ *
+ * This is done to prevent us from flooding the path with a full CWND at
+ * wirespeed, overloading router and switch buffers along the way.
+ *
+ * See RFC5681 Section 4.1. "Restarting Idle Connections".
+ */
+ if (V_tcp_do_rfc3390)
+ rw = min(4 * CCV(ccv, t_maxseg),
+ max(2 * CCV(ccv, t_maxseg), 4380));
+ else
+ rw = CCV(ccv, t_maxseg) * 2;
+
+ CCV(ccv, snd_cwnd) = min(rw, CCV(ccv, snd_cwnd));
+}
+
/*
- * manage congestion signals
+ * Perform any necessary tasks before we enter congestion recovery.
*/
-void
+static void
newreno_cong_signal(struct cc_var *ccv, uint32_t type)
{
u_int win;
@@ -183,11 +204,9 @@ newreno_cong_signal(struct cc_var *ccv, uint32_t type)
}
/*
- * decrease the cwnd in response to packet loss or a transmit timeout.
- * th can be null, in which case cwnd will be set according to reno instead
- * of new reno.
+ * Perform any necessary tasks before we exit congestion recovery.
*/
-void
+static void
newreno_post_recovery(struct cc_var *ccv)
{
if (IN_FASTRECOVERY(CCV(ccv, t_flags))) {
@@ -209,23 +228,5 @@ newreno_post_recovery(struct cc_var *ccv)
}
}
-/*
- * if a connection has been idle for a while and more data is ready to be sent,
- * reset cwnd
- */
-void
-newreno_after_idle(struct cc_var *ccv)
-{
- /*
- * We have been idle for "a while" and no acks are expected to clock out
- * any data we send -- slow start to get ack "clock" running again.
- */
- if (V_tcp_do_rfc3390)
- CCV(ccv, snd_cwnd) = min(4 * CCV(ccv, t_maxseg),
- max(2 * CCV(ccv, t_maxseg), 4380));
- else
- CCV(ccv, snd_cwnd) = CCV(ccv, t_maxseg) * 2;
-}
-
DECLARE_CC_MODULE(newreno, &newreno_cc_algo);
diff --git a/sys/netinet/if_ether.c b/sys/netinet/if_ether.c
index ee5d8e0..df05c04 100644
--- a/sys/netinet/if_ether.c
+++ b/sys/netinet/if_ether.c
@@ -323,7 +323,7 @@ retry:
}
if ((la->la_flags & LLE_VALID) &&
- ((la->la_flags & LLE_STATIC) || la->la_expire > time_second)) {
+ ((la->la_flags & LLE_STATIC) || la->la_expire > time_uptime)) {
bcopy(&la->ll_addr, desten, ifp->if_addrlen);
/*
* If entry has an expiry time and it is approaching,
@@ -331,7 +331,7 @@ retry:
* arpt_down interval.
*/
if (!(la->la_flags & LLE_STATIC) &&
- time_second + la->la_preempt > la->la_expire) {
+ time_uptime + la->la_preempt > la->la_expire) {
arprequest(ifp, NULL,
&SIN(dst)->sin_addr, IF_LLADDR(ifp));
@@ -351,7 +351,7 @@ retry:
goto done;
}
- renew = (la->la_asked == 0 || la->la_expire != time_second);
+ renew = (la->la_asked == 0 || la->la_expire != time_uptime);
if ((renew || m != NULL) && (flags & LLE_EXCLUSIVE) == 0) {
flags |= LLE_EXCLUSIVE;
LLE_RUNLOCK(la);
@@ -403,7 +403,7 @@ retry:
int canceled;
LLE_ADDREF(la);
- la->la_expire = time_second;
+ la->la_expire = time_uptime;
canceled = callout_reset(&la->la_timer, hz * V_arpt_down,
arptimer, la);
if (canceled)
@@ -713,7 +713,7 @@ match:
int canceled;
LLE_ADDREF(la);
- la->la_expire = time_second + V_arpt_keep;
+ la->la_expire = time_uptime + V_arpt_keep;
canceled = callout_reset(&la->la_timer,
hz * V_arpt_keep, arptimer, la);
if (canceled)
diff --git a/sys/netinet/in.c b/sys/netinet/in.c
index 2ec54e2..d8c71f1 100644
--- a/sys/netinet/in.c
+++ b/sys/netinet/in.c
@@ -1333,7 +1333,7 @@ in_lltable_new(const struct sockaddr *l3addr, u_int flags)
* For IPv4 this will trigger "arpresolve" to generate
* an ARP request.
*/
- lle->base.la_expire = time_second; /* mark expired */
+ lle->base.la_expire = time_uptime; /* mark expired */
lle->l3_addr4 = *(const struct sockaddr_in *)l3addr;
lle->base.lle_refcnt = 1;
LLE_LOCK_INIT(&lle->base);
diff --git a/sys/netinet/ip6.h b/sys/netinet/ip6.h
index 3fb08a7..c833a3d 100644
--- a/sys/netinet/ip6.h
+++ b/sys/netinet/ip6.h
@@ -263,7 +263,7 @@ struct ip6_frag {
/*
* IP6_EXTHDR_CHECK ensures that region between the IP6 header and the
* target header (including IPv6 itself, extension headers and
- * TCP/UDP/ICMP6 headers) are continuous. KAME requires drivers
+ * TCP/UDP/ICMP6 headers) are contiguous. KAME requires drivers
* to store incoming data into one internal mbuf or one or more external
* mbufs(never into two or more internal mbufs). Thus, the third case is
* supposed to never be matched but is prepared just in case.
diff --git a/sys/netinet/tcp_input.c b/sys/netinet/tcp_input.c
index 8fb9a52..4dbc038 100644
--- a/sys/netinet/tcp_input.c
+++ b/sys/netinet/tcp_input.c
@@ -378,6 +378,8 @@ cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
tp->t_dupacks = 0;
tp->t_bytes_acked = 0;
EXIT_RECOVERY(tp->t_flags);
+ tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
+ tp->t_maxseg) * tp->t_maxseg;
tp->snd_cwnd = tp->t_maxseg;
break;
case CC_RTO_ERR:
diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c
index bf42dac..b7ec6f7 100644
--- a/sys/netinet/tcp_output.c
+++ b/sys/netinet/tcp_output.c
@@ -148,7 +148,7 @@ tcp_output(struct tcpcb *tp)
{
struct socket *so = tp->t_inpcb->inp_socket;
long len, recwin, sendwin;
- int off, flags, error, rw;
+ int off, flags, error;
struct mbuf *m;
struct ip *ip = NULL;
struct ipovly *ipov = NULL;
@@ -182,37 +182,8 @@ tcp_output(struct tcpcb *tp)
* to send, then transmit; otherwise, investigate further.
*/
idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
- if (idle && ticks - tp->t_rcvtime >= tp->t_rxtcur) {
- /*
- * If we've been idle for more than one retransmit
- * timeout the old congestion window is no longer
- * current and we have to reduce it to the restart
- * window before we can transmit again.
- *
- * The restart window is the initial window or the last
- * CWND, whichever is smaller.
- *
- * This is done to prevent us from flooding the path with
- * a full CWND at wirespeed, overloading router and switch
- * buffers along the way.
- *
- * See RFC5681 Section 4.1. "Restarting Idle Connections".
- */
- if (V_tcp_do_rfc3390)
- rw = min(4 * tp->t_maxseg,
- max(2 * tp->t_maxseg, 4380));
-#ifdef INET6
- else if ((isipv6 ? in6_localaddr(&tp->t_inpcb->in6p_faddr) :
- in_localaddr(tp->t_inpcb->inp_faddr)))
-#else
- else if (in_localaddr(tp->t_inpcb->inp_faddr))
-#endif
- rw = V_ss_fltsz_local * tp->t_maxseg;
- else
- rw = V_ss_fltsz * tp->t_maxseg;
-
- tp->snd_cwnd = min(rw, tp->snd_cwnd);
- }
+ if (idle && ticks - tp->t_rcvtime >= tp->t_rxtcur)
+ cc_after_idle(tp);
tp->t_flags &= ~TF_LASTIDLE;
if (idle) {
if (tp->t_flags & TF_MORETOCOME) {
diff --git a/sys/netinet/tcp_timer.c b/sys/netinet/tcp_timer.c
index 2748e64..40ee4d2 100644
--- a/sys/netinet/tcp_timer.c
+++ b/sys/netinet/tcp_timer.c
@@ -567,7 +567,7 @@ tcp_timer_rexmt(void * xtp)
*/
tp->t_rtttime = 0;
- cc_cong_signal(tp, 0, CC_RTO);
+ cc_cong_signal(tp, NULL, CC_RTO);
(void) tcp_output(tp);
OpenPOWER on IntegriCloud