summaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c2
-rw-r--r--net/ipv4/tcp_output.c22
-rw-r--r--net/ipv4/tcp_vegas.c82
3 files changed, 23 insertions, 83 deletions
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index bea54a6..8d489e7 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -61,7 +61,7 @@ static struct
static struct xt_table nat_table = {
.name = "nat",
.valid_hooks = NAT_VALID_HOOKS,
- .lock = __RW_LOCK_UNLOCKED(__nat_table.lock),
+ .lock = __RW_LOCK_UNLOCKED(nat_table.lock),
.me = THIS_MODULE,
.af = AF_INET,
};
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 85b07eb..fe3b4bd 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -722,8 +722,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
- if (skb->len <= mss_now || !sk_can_gso(sk) ||
- tcp_urg_mode(tcp_sk(sk))) {
+ if (skb->len <= mss_now || !sk_can_gso(sk)) {
/* Avoid the costly divide in the normal
* non-TSO case.
*/
@@ -1029,10 +1028,6 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
/* Compute the current effective MSS, taking SACKs and IP options,
* and even PMTU discovery events into account.
- *
- * LARGESEND note: !tcp_urg_mode is overkill, only frames up to snd_up
- * cannot be large. However, taking into account rare use of URG, this
- * is not a big flaw.
*/
unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
{
@@ -1047,7 +1042,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
mss_now = tp->mss_cache;
- if (large_allowed && sk_can_gso(sk) && !tcp_urg_mode(tp))
+ if (large_allowed && sk_can_gso(sk))
doing_tso = 1;
if (dst) {
@@ -1164,9 +1159,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
{
int tso_segs = tcp_skb_pcount(skb);
- if (!tso_segs ||
- (tso_segs > 1 && (tcp_skb_mss(skb) != mss_now ||
- tcp_urg_mode(tcp_sk(sk))))) {
+ if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
tcp_set_skb_tso_segs(sk, skb, mss_now);
tso_segs = tcp_skb_pcount(skb);
}
@@ -1519,6 +1512,10 @@ static int tcp_mtu_probe(struct sock *sk)
* send_head. This happens as incoming acks open up the remote
* window for us.
*
+ * LARGESEND note: !tcp_urg_mode is overkill, only frames between
+ * snd_up-64k-mss .. snd_up cannot be large. However, taking into
+ * account rare use of URG, this is not a big flaw.
+ *
* Returns 1, if no segments are in flight and we have queued segments, but
* cannot send anything now because of SWS or another problem.
*/
@@ -1570,7 +1567,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
}
limit = mss_now;
- if (tso_segs > 1)
+ if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
cwnd_quota);
@@ -1619,6 +1616,7 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
*/
void tcp_push_one(struct sock *sk, unsigned int mss_now)
{
+ struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk);
unsigned int tso_segs, cwnd_quota;
@@ -1633,7 +1631,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
BUG_ON(!tso_segs);
limit = mss_now;
- if (tso_segs > 1)
+ if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
cwnd_quota);
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 14504da..a453aac 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -40,18 +40,14 @@
#include "tcp_vegas.h"
-/* Default values of the Vegas variables, in fixed-point representation
- * with V_PARAM_SHIFT bits to the right of the binary point.
- */
-#define V_PARAM_SHIFT 1
-static int alpha = 2<<V_PARAM_SHIFT;
-static int beta = 4<<V_PARAM_SHIFT;
-static int gamma = 1<<V_PARAM_SHIFT;
+static int alpha = 2;
+static int beta = 4;
+static int gamma = 1;
module_param(alpha, int, 0644);
-MODULE_PARM_DESC(alpha, "lower bound of packets in network (scale by 2)");
+MODULE_PARM_DESC(alpha, "lower bound of packets in network");
module_param(beta, int, 0644);
-MODULE_PARM_DESC(beta, "upper bound of packets in network (scale by 2)");
+MODULE_PARM_DESC(beta, "upper bound of packets in network");
module_param(gamma, int, 0644);
MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
@@ -172,49 +168,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
return;
}
- /* The key players are v_beg_snd_una and v_beg_snd_nxt.
- *
- * These are so named because they represent the approximate values
- * of snd_una and snd_nxt at the beginning of the current RTT. More
- * precisely, they represent the amount of data sent during the RTT.
- * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
- * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding
- * bytes of data have been ACKed during the course of the RTT, giving
- * an "actual" rate of:
- *
- * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration)
- *
- * Unfortunately, v_beg_snd_una is not exactly equal to snd_una,
- * because delayed ACKs can cover more than one segment, so they
- * don't line up nicely with the boundaries of RTTs.
- *
- * Another unfortunate fact of life is that delayed ACKs delay the
- * advance of the left edge of our send window, so that the number
- * of bytes we send in an RTT is often less than our cwnd will allow.
- * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
- */
-
if (after(ack, vegas->beg_snd_nxt)) {
/* Do the Vegas once-per-RTT cwnd adjustment. */
- u32 old_wnd, old_snd_cwnd;
-
-
- /* Here old_wnd is essentially the window of data that was
- * sent during the previous RTT, and has all
- * been acknowledged in the course of the RTT that ended
- * with the ACK we just received. Likewise, old_snd_cwnd
- * is the cwnd during the previous RTT.
- */
- old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) /
- tp->mss_cache;
- old_snd_cwnd = vegas->beg_snd_cwnd;
/* Save the extent of the current window so we can use this
* at the end of the next RTT.
*/
- vegas->beg_snd_una = vegas->beg_snd_nxt;
vegas->beg_snd_nxt = tp->snd_nxt;
- vegas->beg_snd_cwnd = tp->snd_cwnd;
/* We do the Vegas calculations only if we got enough RTT
* samples that we can be reasonably sure that we got
@@ -252,22 +212,14 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
*
* This is:
* (actual rate in segments) * baseRTT
- * We keep it as a fixed point number with
- * V_PARAM_SHIFT bits to the right of the binary point.
*/
- target_cwnd = ((u64)old_wnd * vegas->baseRTT);
- target_cwnd <<= V_PARAM_SHIFT;
- do_div(target_cwnd, rtt);
+ target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
/* Calculate the difference between the window we had,
* and the window we would like to have. This quantity
* is the "Diff" from the Arizona Vegas papers.
- *
- * Again, this is a fixed point number with
- * V_PARAM_SHIFT bits to the right of the binary
- * point.
*/
- diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
+ diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
if (diff > gamma && tp->snd_ssthresh > 2 ) {
/* Going too fast. Time to slow down
@@ -282,16 +234,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
* truncation robs us of full link
* utilization.
*/
- tp->snd_cwnd = min(tp->snd_cwnd,
- ((u32)target_cwnd >>
- V_PARAM_SHIFT)+1);
+ tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
} else if (tp->snd_cwnd <= tp->snd_ssthresh) {
/* Slow start. */
tcp_slow_start(tp);
} else {
/* Congestion avoidance. */
- u32 next_snd_cwnd;
/* Figure out where we would like cwnd
* to be.
@@ -300,32 +249,25 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
/* The old window was too fast, so
* we slow down.
*/
- next_snd_cwnd = old_snd_cwnd - 1;
+ tp->snd_cwnd--;
} else if (diff < alpha) {
/* We don't have enough extra packets
* in the network, so speed up.
*/
- next_snd_cwnd = old_snd_cwnd + 1;
+ tp->snd_cwnd++;
} else {
/* Sending just as fast as we
* should be.
*/
- next_snd_cwnd = old_snd_cwnd;
}
-
- /* Adjust cwnd upward or downward, toward the
- * desired value.
- */
- if (next_snd_cwnd > tp->snd_cwnd)
- tp->snd_cwnd++;
- else if (next_snd_cwnd < tp->snd_cwnd)
- tp->snd_cwnd--;
}
if (tp->snd_cwnd < 2)
tp->snd_cwnd = 2;
else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
tp->snd_cwnd = tp->snd_cwnd_clamp;
+
+ tp->snd_ssthresh = tcp_current_ssthresh(sk);
}
/* Wipe the slate clean for the next RTT. */
OpenPOWER on IntegriCloud