summaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2009-02-28 04:44:33 +0000
committerDavid S. Miller <davem@davemloft.net>2009-03-02 03:00:12 -0800
commit7363a5b233734dba339f2874ff6ed6c489d3d865 (patch)
treee982156d409f29c82bcc7ec5e4e6160a11bc3685 /net/ipv4
parentd0af4160d19ff2849386140881e729f9ba86f2aa (diff)
downloadop-kernel-dev-7363a5b233734dba339f2874ff6ed6c489d3d865.zip
op-kernel-dev-7363a5b233734dba339f2874ff6ed6c489d3d865.tar.gz
tcp: separate timeout marking loop to it's own function
Some comment about its current state added. So far I have seen very few cases where the thing is actually useful, usually just marginally (though admittedly I don't usually see top of window losses where it seems possible that there could be some gain), instead, more often the cases suffer from L-marking spike which is certainly not desirable (I'll bury improving it to my todo list, but on a low prio position). Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_input.c63
1 files changed, 39 insertions, 24 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 125b451..03f5ede 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2461,6 +2461,44 @@ static int tcp_time_to_recover(struct sock *sk)
return 0;
}
+/* New heuristics: it is possible only after we switched to restart timer
+ * each time when something is ACKed. Hence, we can detect timed out packets
+ * during fast retransmit without falling to slow start.
+ *
+ * Usefulness of this as is very questionable, since we should know which of
+ * the segments is the next to timeout which is relatively expensive to find
+ * in general case unless we add some data structure just for that. The
+ * current approach certainly won't find the right one too often and when it
+ * finally does find _something_ it usually marks large part of the window
+ * right away (because a retransmission with a larger timestamp blocks the
+ * loop from advancing). -ij
+ */
+static void tcp_timeout_skbs(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+
+ if (!tcp_is_fack(tp) || !tcp_head_timedout(sk))
+ return;
+
+ skb = tp->scoreboard_skb_hint;
+ if (tp->scoreboard_skb_hint == NULL)
+ skb = tcp_write_queue_head(sk);
+
+ tcp_for_write_queue_from(skb, sk) {
+ if (skb == tcp_send_head(sk))
+ break;
+ if (!tcp_skb_timedout(sk, skb))
+ break;
+
+ tcp_skb_mark_lost(tp, skb);
+ }
+
+ tp->scoreboard_skb_hint = skb;
+
+ tcp_verify_left_out(tp);
+}
+
/* Mark head of queue up as lost. With RFC3517 SACK, the packets is
* is against sacked "cnt", otherwise it's against facked "cnt"
*/
@@ -2533,30 +2571,7 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
tcp_mark_head_lost(sk, sacked_upto);
}
- /* New heuristics: it is possible only after we switched
- * to restart timer each time when something is ACKed.
- * Hence, we can detect timed out packets during fast
- * retransmit without falling to slow start.
- */
- if (tcp_is_fack(tp) && tcp_head_timedout(sk)) {
- struct sk_buff *skb;
-
- skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
- : tcp_write_queue_head(sk);
-
- tcp_for_write_queue_from(skb, sk) {
- if (skb == tcp_send_head(sk))
- break;
- if (!tcp_skb_timedout(sk, skb))
- break;
-
- tcp_skb_mark_lost(tp, skb);
- }
-
- tp->scoreboard_skb_hint = skb;
-
- tcp_verify_left_out(tp);
- }
+ tcp_timeout_skbs(sk);
}
/* CWND moderation, preventing bursts due to too big ACKs
OpenPOWER on IntegriCloud