summaryrefslogtreecommitdiffstats
path: root/sys/netinet/tcp_subr.c
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>2002-08-24 17:22:44 +0000
committerdillon <dillon@FreeBSD.org>2002-08-24 17:22:44 +0000
commit25a0b2072198cf9a11b601db4a7167409d8b9c64 (patch)
tree74b4b5806b5e5688ea27474237b6de2845e262d2 /sys/netinet/tcp_subr.c
parent6be03c7fd1154402cb7844b27d2974232846d4a7 (diff)
downloadFreeBSD-src-25a0b2072198cf9a11b601db4a7167409d8b9c64.zip
FreeBSD-src-25a0b2072198cf9a11b601db4a7167409d8b9c64.tar.gz
Correct bug in t_bw_rtttime rollover, #undef USERTT
Diffstat (limited to 'sys/netinet/tcp_subr.c')
-rw-r--r--sys/netinet/tcp_subr.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
index 45e5026..fed7471 100644
--- a/sys/netinet/tcp_subr.c
+++ b/sys/netinet/tcp_subr.c
@@ -1628,6 +1628,9 @@ tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
* that is not using all available bandwidth, but for now our
* slop will ramp us up if this case occurs and the bandwidth later
* increases.
+ *
+ * Note: if ticks rollover 'bw' may wind up negative. We must
+ * effectively reset t_bw_rtttime for this case.
*/
save_ticks = ticks;
if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1)
@@ -1637,7 +1640,7 @@ tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
(save_ticks - tp->t_bw_rtttime);
tp->t_bw_rtttime = save_ticks;
tp->t_bw_rtseq = ack_seq;
- if (tp->t_bw_rtttime == 0)
+ if (tp->t_bw_rtttime == 0 || (int)bw < 0)
return;
bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4;
@@ -1666,6 +1669,7 @@ tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
*/
#define USERTT ((tp->t_srtt + tp->t_rttbest) / 2)
bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + 2 * tp->t_maxseg;
+#undef USERTT
if (tcp_inflight_debug > 0) {
static int ltime;
OpenPOWER on IntegriCloud