diff options
author | Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> | 2008-03-03 12:10:16 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-03-03 12:10:16 -0800 |
commit | d152a7d88ab4134a895f91a9e00f70d118696039 (patch) | |
tree | c83d7e040955c8574fb8d88372b09f629627f74f | |
parent | 401023710d73aaef1191ab4d6a79d39c51add828 (diff) | |
download | op-kernel-dev-d152a7d88ab4134a895f91a9e00f70d118696039.zip op-kernel-dev-d152a7d88ab4134a895f91a9e00f70d118696039.tar.gz |
[TCP]: Must count fack_count also when skipping
It makes fackets_out to grow too slowly compared with the
real write queue.
This shouldn't cause those BUG_TRAP(packets <= tp->packets_out)
to trigger but how knows how such inconsistent fackets_out
affects here and there around TCP when everything is nowadays
assuming accurate fackets_out. So lets see if this silences
them all.
Reported by Guillaume Chazarain <guichaz@gmail.com>.
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/ipv4/tcp_input.c | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 19c449f..7facdb0 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1367,7 +1367,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, * a normal way */ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, - u32 skip_to_seq) + u32 skip_to_seq, int *fack_count) { tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) @@ -1375,6 +1375,8 @@ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, if (!before(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) break; + + *fack_count += tcp_skb_pcount(skb); } return skb; } @@ -1390,7 +1392,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, return skb; if (before(next_dup->start_seq, skip_to_seq)) { - skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq); + skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count); tcp_sacktag_walk(skb, sk, NULL, next_dup->start_seq, next_dup->end_seq, 1, fack_count, reord, flag); @@ -1537,7 +1539,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, /* Head todo? */ if (before(start_seq, cache->start_seq)) { - skb = tcp_sacktag_skip(skb, sk, start_seq); + skb = tcp_sacktag_skip(skb, sk, start_seq, + &fack_count); skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, cache->start_seq, @@ -1565,7 +1568,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, goto walk; } - skb = tcp_sacktag_skip(skb, sk, cache->end_seq); + skb = tcp_sacktag_skip(skb, sk, cache->end_seq, + &fack_count); /* Check overlap against next cached too (past this one already) */ cache++; continue; @@ -1577,7 +1581,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, break; fack_count = tp->fackets_out; } - skb = tcp_sacktag_skip(skb, sk, start_seq); + skb = tcp_sacktag_skip(skb, sk, start_seq, &fack_count); walk: skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, end_seq, |