diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2005-09-01 17:48:23 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-09-01 17:48:23 -0700 |
commit | d80d99d643090c3cf2b1f9fb3fadd1256f7e384f (patch) | |
tree | 5e8bd46fa6c73cace5efb77c43e863cd36edb0c9 | |
parent | 2dac4b96b9362954a0638317b90e3e7bcb112e83 (diff) | |
download | op-kernel-dev-d80d99d643090c3cf2b1f9fb3fadd1256f7e384f.zip op-kernel-dev-d80d99d643090c3cf2b1f9fb3fadd1256f7e384f.tar.gz |
[NET]: Add sk_stream_wmem_schedule
This patch introduces sk_stream_wmem_schedule as a short-hand for
the sk_forward_alloc checking on egress.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/sock.h | 12 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 3 |
2 files changed, 9 insertions, 6 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 312cb25..e51e626 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -709,6 +709,12 @@ static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) sk_stream_mem_schedule(sk, skb->truesize, 1); } +static inline int sk_stream_wmem_schedule(struct sock *sk, int size) +{ + return size <= sk->sk_forward_alloc || + sk_stream_mem_schedule(sk, size, 0); +} + /* Used by processes to "lock" a socket state, so that * interrupts and bottom half handlers won't change it * from under us. It essentially blocks any incoming @@ -1203,8 +1209,7 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, skb = alloc_skb_fclone(size + hdr_len, gfp); if (skb) { skb->truesize += mem; - if (sk->sk_forward_alloc >= (int)skb->truesize || - sk_stream_mem_schedule(sk, skb->truesize, 0)) { + if (sk_stream_wmem_schedule(sk, skb->truesize)) { skb_reserve(skb, hdr_len); return skb; } @@ -1227,8 +1232,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) { struct page *page = NULL; - if (sk->sk_forward_alloc >= (int)PAGE_SIZE || - sk_stream_mem_schedule(sk, PAGE_SIZE, 0)) + if (sk_stream_wmem_schedule(sk, PAGE_SIZE)) page = alloc_pages(sk->sk_allocation, 0); else { sk->sk_prot->enter_memory_pressure(); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 02fdda6..854f6d0 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -552,8 +552,7 @@ new_segment: tcp_mark_push(tp, skb); goto new_segment; } - if (sk->sk_forward_alloc < copy && - !sk_stream_mem_schedule(sk, copy, 0)) + if (!sk_stream_wmem_schedule(sk, copy)) goto wait_for_memory; if (can_coalesce) { |