diff options
author | Olaf Kirch <okir@suse.de> | 2005-06-22 17:16:24 +0000 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2005-06-22 16:07:19 -0400 |
commit | e053d1ab62c8ef0eff3dd4c95448cad3c6d2fbf4 (patch) | |
tree | 70ca8b8721761fdd4c12c330f8389958ad160c61 /net/sunrpc | |
parent | 007e251f2b2760f738c92adc8c80cbae0bed3ce5 (diff) | |
download | op-kernel-dev-e053d1ab62c8ef0eff3dd4c95448cad3c6d2fbf4.zip op-kernel-dev-e053d1ab62c8ef0eff3dd4c95448cad3c6d2fbf4.tar.gz |
[PATCH] RPC: Lazy RPC receive buffer allocation
Signed-off-by: Olaf Kirch <okir@suse.de>
Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/xdr.c | 16 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 26 |
2 files changed, 35 insertions, 7 deletions
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index f86d1ba..65b268d 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -176,7 +176,7 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, xdr->buflen += len; } -void +int xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor) @@ -190,7 +190,7 @@ xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, len -= base; ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); if (ret != len || !desc->count) - return; + return 0; base = 0; } else base -= len; @@ -210,6 +210,14 @@ xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, do { char *kaddr; + /* ACL likes to be lazy in allocating pages - ACLs + * are small by default but can get huge. */ + if (unlikely(*ppage == NULL)) { + *ppage = alloc_page(GFP_ATOMIC); + if (unlikely(*ppage == NULL)) + return -ENOMEM; + } + len = PAGE_CACHE_SIZE; kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); if (base) { @@ -226,13 +234,15 @@ xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, flush_dcache_page(*ppage); kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); if (ret != len || !desc->count) - return; + return 0; ppage++; } while ((pglen -= len) != 0); copy_tail: len = xdr->tail[0].iov_len; if (base < len) copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); + + return 0; } diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index c74a6bb..a180ed4 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -725,7 +725,8 @@ csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) goto no_checksum; desc.csum = csum_partial(skb->data, desc.offset, skb->csum); - xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits); + if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) + return -1; if (desc.offset != skb->len) { unsigned int csum2; csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); @@ -737,7 +738,8 @@ csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) return -1; return 0; no_checksum: - xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits); + if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) + return -1; if (desc.count) return -1; return 0; @@ -907,6 +909,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) struct rpc_rqst *req; struct xdr_buf *rcvbuf; size_t len; + int r; /* Find and lock the request corresponding to this xid */ spin_lock(&xprt->sock_lock); @@ -927,16 +930,30 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) len = xprt->tcp_reclen - xprt->tcp_offset; memcpy(&my_desc, desc, sizeof(my_desc)); my_desc.count = len; - xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, + r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, &my_desc, tcp_copy_data); desc->count -= len; desc->offset += len; } else - xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, + r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, desc, tcp_copy_data); xprt->tcp_copied += len; xprt->tcp_offset += len; + if (r < 0) { + /* Error when copying to the receive buffer, + * usually because we weren't able to allocate + * additional buffer pages. All we can do now + * is turn off XPRT_COPY_DATA, so the request + * will not receive any additional updates, + * and time out. + * Any remaining data from this record will + * be discarded. + */ + xprt->tcp_flags &= ~XPRT_COPY_DATA; + goto out; + } + if (xprt->tcp_copied == req->rq_private_buf.buflen) xprt->tcp_flags &= ~XPRT_COPY_DATA; else if (xprt->tcp_offset == xprt->tcp_reclen) { @@ -949,6 +966,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) req->rq_task->tk_pid); xprt_complete_rqst(xprt, req, xprt->tcp_copied); } +out: spin_unlock(&xprt->sock_lock); tcp_check_recm(xprt); } |