summaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2007-08-09 14:33:18 +0300
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 16:47:55 -0700
commitb5860bbac7be1381626f3dc8a0cb970a60fcefb4 (patch)
tree53034429084f92fdb55c062498e1f4255be9bd4a /net/ipv4/tcp_input.c
parent35e8694198ba94b62df8aa35fa6e52a1cfb86df2 (diff)
downloadop-kernel-dev-b5860bbac7be1381626f3dc8a0cb970a60fcefb4.zip
op-kernel-dev-b5860bbac7be1381626f3dc8a0cb970a60fcefb4.tar.gz
[TCP]: Tighten tcp_sock's belt, drop left_out
It is easily calculable when needed and user are not that many after all. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c11
1 files changed, 2 insertions, 9 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ea690af..957e0fb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1346,8 +1346,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
}
}
- tp->left_out = tp->sacked_out + tp->lost_out;
-
if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss &&
(!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
@@ -1408,7 +1406,6 @@ static void tcp_remove_reno_sacks(struct sock *sk, int acked)
static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
{
tp->sacked_out = 0;
- tp->left_out = tp->lost_out;
}
int tcp_use_frto(struct sock *sk)
@@ -1573,7 +1570,6 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
void tcp_clear_retrans(struct tcp_sock *tp)
{
- tp->left_out = 0;
tp->retrans_out = 0;
tp->fackets_out = 0;
@@ -1973,7 +1969,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
msg,
NIPQUAD(inet->daddr), ntohs(inet->dport),
- tp->snd_cwnd, tp->left_out,
+ tp->snd_cwnd, tp->sacked_out + tp->lost_out,
tp->snd_ssthresh, tp->prior_ssthresh,
tp->packets_out);
}
@@ -2102,7 +2098,6 @@ static int tcp_try_undo_loss(struct sock *sk)
DBGUNDO(sk, "partial loss");
tp->lost_out = 0;
- tp->left_out = tp->sacked_out;
tcp_undo_cwr(sk, 1);
NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
inet_csk(sk)->icsk_retransmits = 0;
@@ -2126,8 +2121,6 @@ static void tcp_try_to_open(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
- tcp_sync_left_out(tp);
-
if (tp->retrans_out == 0)
tp->retrans_stamp = 0;
@@ -2137,7 +2130,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
int state = TCP_CA_Open;
- if (tp->left_out || tp->retrans_out || tp->undo_marker)
+ if (tp->sacked_out || tp->retrans_out || tp->undo_marker)
state = TCP_CA_Disorder;
if (inet_csk(sk)->icsk_ca_state != state) {
OpenPOWER on IntegriCloud