diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2015-03-24 00:50:27 +1100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-23 22:07:52 -0400 |
commit | b9ecfdaa1090b5988422eaf5348ea1954d2d7219 (patch) | |
tree | c2721bb2c9be54c153869b0ed3b68bbee0bfc70a /lib/rhashtable.c | |
parent | b824478b2145be78ac19e1cf44e2b9036c7a9608 (diff) | |
download | op-kernel-dev-b9ecfdaa1090b5988422eaf5348ea1954d2d7219.zip op-kernel-dev-b9ecfdaa1090b5988422eaf5348ea1954d2d7219.tar.gz |
rhashtable: Allow GFP_ATOMIC bucket table allocation
This patch adds the ability to allocate bucket table with GFP_ATOMIC
instead of GFP_KERNEL. This is needed when we perform an immediate
rehash during insertion.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Acked-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r-- | lib/rhashtable.c | 26 |
1 files changed, 15 insertions, 11 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 5e04403..220a11a 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -58,7 +58,8 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #endif -static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) +static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, + gfp_t gfp) { unsigned int i, size; #if defined(CONFIG_PROVE_LOCKING) @@ -75,12 +76,13 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) if (sizeof(spinlock_t) != 0) { #ifdef CONFIG_NUMA - if (size * sizeof(spinlock_t) > PAGE_SIZE) + if (size * sizeof(spinlock_t) > PAGE_SIZE && + gfp == GFP_KERNEL) tbl->locks = vmalloc(size * sizeof(spinlock_t)); else #endif tbl->locks = kmalloc_array(size, sizeof(spinlock_t), - GFP_KERNEL); + gfp); if (!tbl->locks) return -ENOMEM; for (i = 0; i < size; i++) @@ -105,23 +107,25 @@ static void bucket_table_free_rcu(struct rcu_head *head) } static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, - size_t nbuckets) + size_t nbuckets, + gfp_t gfp) { struct bucket_table *tbl = NULL; size_t size; int i; size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); - if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) - tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); - if (tbl == NULL) + if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) || + gfp != GFP_KERNEL) + tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); + if (tbl == NULL && gfp == GFP_KERNEL) tbl = vzalloc(size); if (tbl == NULL) return NULL; tbl->size = nbuckets; - if (alloc_bucket_locks(ht, tbl) < 0) { + if (alloc_bucket_locks(ht, tbl, gfp) < 0) { bucket_table_free(tbl); return NULL; } @@ -288,7 +292,7 @@ static int rhashtable_expand(struct rhashtable *ht) old_tbl = rhashtable_last_table(ht, old_tbl); - new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); + new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL); if (new_tbl == NULL) return -ENOMEM; @@ -332,7 +336,7 @@ static int rhashtable_shrink(struct rhashtable *ht) if (rht_dereference(old_tbl->future_tbl, ht)) return -EEXIST; - new_tbl = bucket_table_alloc(ht, size); + new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); if (new_tbl == NULL) return -ENOMEM; @@ -689,7 +693,7 @@ int rhashtable_init(struct rhashtable *ht, } } - tbl = bucket_table_alloc(ht, size); + tbl = bucket_table_alloc(ht, size, GFP_KERNEL); if (tbl == NULL) return -ENOMEM; |