From f7ed3f73322c863f0f06500aab654100411cb9f7 Mon Sep 17 00:00:00 2001 From: dillon Date: Wed, 17 Jul 2002 23:32:03 +0000 Subject: I don't know how the minimum retransmit timeout managed to get set to one second but it badly breaks throughput on networks with minor packet loss. Complaints by: at least two people tracked down to this. MFC after: 3 days --- sys/netinet/tcp_timer.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'sys/netinet/tcp_timer.h') diff --git a/sys/netinet/tcp_timer.h b/sys/netinet/tcp_timer.h index ff86d2a..94c8a5e 100644 --- a/sys/netinet/tcp_timer.h +++ b/sys/netinet/tcp_timer.h @@ -90,7 +90,13 @@ #define TCPTV_KEEPINTVL ( 75*hz) /* default probe interval */ #define TCPTV_KEEPCNT 8 /* max probes before drop */ -#define TCPTV_MIN ( 1*hz) /* minimum allowable value */ +/* + * Minimum retransmit timer is 3 ticks, for algorithmic stability. + * The maximum is 64 seconds. The prior minimum of 1*hz (1 second) badly + * breaks throughput on any networks faster then a modem that has minor + * (e.g. 1%) packet loss. + */ +#define TCPTV_MIN ( 3 ) /* minimum allowable value */ #define TCPTV_REXMTMAX ( 64*hz) /* max allowable REXMT value */ #define TCPTV_TWTRUNC 8 /* RTO factor to truncate TW */ -- cgit v1.1