summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-12-04 22:39:56 +0800
committerDavid S. Miller <davem@davemloft.net>2015-12-04 16:53:05 -0500
commitd3716f18a7d841565c930efde30737a3557eee69 (patch)
tree5f0599a1e442a490a1d87e610110f9245825bc2d
parent6a61d4dbf4f54b5683e0f1e58d873cecca7cb977 (diff)
downloadop-kernel-dev-d3716f18a7d841565c930efde30737a3557eee69.zip
op-kernel-dev-d3716f18a7d841565c930efde30737a3557eee69.tar.gz
rhashtable: Use __vmalloc with GFP_ATOMIC for table allocation
When an rhashtable user pounds rhashtable hard with back-to-back insertions we may end up growing the table in GFP_ATOMIC context. Unfortunately when the table reaches a certain size this often fails because we don't have enough physically contiguous pages to hold the new table. Eric Dumazet suggested (and in fact wrote this patch) using __vmalloc instead which can be used in GFP_ATOMIC context. Reported-by: Phil Sutter <phil@nwl.cc> Suggested-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--lib/rhashtable.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 2ff7ed9..1c624db 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -120,8 +120,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
gfp != GFP_KERNEL)
tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
- if (tbl == NULL && gfp == GFP_KERNEL)
- tbl = vzalloc(size);
+ if (tbl == NULL)
+ tbl = __vmalloc(size, gfp | __GFP_HIGHMEM | __GFP_ZERO,
+ PAGE_KERNEL);
if (tbl == NULL)
return NULL;
OpenPOWER on IntegriCloud