summaryrefslogtreecommitdiffstats
path: root/net/unix/af_unix.c
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@parallels.com>2014-05-15 19:56:28 +0400
committerDavid S. Miller <davem@davemloft.net>2014-05-16 16:04:03 -0400
commit31ff6aa5c86f7564f0dd97c5b3e1404cad238d00 (patch)
tree2be445d20551a71766a19aa3e0eff76385e73bd5 /net/unix/af_unix.c
parenta188a54d11629bef2169052297e61f3767ca8ce5 (diff)
downloadop-kernel-dev-31ff6aa5c86f7564f0dd97c5b3e1404cad238d00.zip
op-kernel-dev-31ff6aa5c86f7564f0dd97c5b3e1404cad238d00.tar.gz
net: unix: Align send data_len up to PAGE_SIZE
Using whole of allocated pages reduces requested skb->data size. This is just a little more thriftily allocation. netperf does not show difference with the current performance. Signed-off-by: Kirill Tkhai <ktkhai@parallels.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/unix/af_unix.c')
-rw-r--r--net/unix/af_unix.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index bb7e8ba..7b9114e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1492,10 +1492,14 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (len > sk->sk_sndbuf - 32)
goto out;
- if (len > SKB_MAX_ALLOC)
+ if (len > SKB_MAX_ALLOC) {
data_len = min_t(size_t,
len - SKB_MAX_ALLOC,
MAX_SKB_FRAGS * PAGE_SIZE);
+ data_len = PAGE_ALIGN(data_len);
+
+ BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
+ }
skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
msg->msg_flags & MSG_DONTWAIT, &err,
@@ -1670,6 +1674,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
+ data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
+
skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
msg->msg_flags & MSG_DONTWAIT, &err,
get_order(UNIX_SKB_FRAGS_SZ));
OpenPOWER on IntegriCloud