summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorMatthew Wilcox <mawilcox@microsoft.com>2016-12-16 11:55:56 -0500
committerMatthew Wilcox <mawilcox@microsoft.com>2017-02-13 21:44:01 -0500
commit7ad3d4d85c7af9632055a6ac0aa15b6b6a321c6b (patch)
tree1586a6c01ced64d24c67859d792140853b148d20 /lib
parent0a835c4f090af2c76fc2932c539c3b32fd21fbbb (diff)
downloadop-kernel-dev-7ad3d4d85c7af9632055a6ac0aa15b6b6a321c6b.zip
op-kernel-dev-7ad3d4d85c7af9632055a6ac0aa15b6b6a321c6b.tar.gz
ida: Move ida_bitmap to a percpu variable
When we preload the IDA, we allocate an IDA bitmap. Instead of storing that preallocated bitmap in the IDA, we store it in a percpu variable. Generally there are more IDAs in the system than CPUs, so this cuts down on the number of preallocated bitmaps that are unused, and about half of the IDA users did not call ida_destroy() so they were leaking IDA bitmaps. Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/idr.c39
-rw-r--r--lib/radix-tree.c45
2 files changed, 44 insertions, 40 deletions
diff --git a/lib/idr.c b/lib/idr.c
index b87056e..2abd776 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -4,6 +4,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
static DEFINE_SPINLOCK(simple_ida_lock);
/**
@@ -193,38 +194,6 @@ EXPORT_SYMBOL(idr_replace);
* limitation, it should be quite straightforward to raise the maximum.
*/
-/**
- * ida_pre_get - reserve resources for ida allocation
- * @ida: ida handle
- * @gfp: memory allocation flags
- *
- * This function should be called before calling ida_get_new_above(). If it
- * is unable to allocate memory, it will return %0. On success, it returns %1.
- */
-int ida_pre_get(struct ida *ida, gfp_t gfp)
-{
- struct ida_bitmap *bitmap;
-
- /*
- * This looks weird, but the IDA API has no preload_end() equivalent.
- * Instead, ida_get_new() can return -EAGAIN, prompting the caller
- * to return to the ida_pre_get() step.
- */
- idr_preload(gfp);
- idr_preload_end();
-
- if (!ida->free_bitmap) {
- bitmap = kmalloc(sizeof(struct ida_bitmap), gfp);
- if (!bitmap)
- return 0;
- bitmap = xchg(&ida->free_bitmap, bitmap);
- kfree(bitmap);
- }
-
- return 1;
-}
-EXPORT_SYMBOL(ida_pre_get);
-
#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS)
/**
@@ -292,10 +261,9 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
new += bit;
if (new < 0)
return -ENOSPC;
- bitmap = ida->free_bitmap;
+ bitmap = this_cpu_xchg(ida_bitmap, NULL);
if (!bitmap)
return -EAGAIN;
- ida->free_bitmap = NULL;
memset(bitmap, 0, sizeof(*bitmap));
__set_bit(bit, bitmap->bitmap);
radix_tree_iter_replace(root, &iter, slot, bitmap);
@@ -361,9 +329,6 @@ void ida_destroy(struct ida *ida)
kfree(bitmap);
radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
}
-
- kfree(ida->free_bitmap);
- ida->free_bitmap = NULL;
}
EXPORT_SYMBOL(ida_destroy);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index eaea14b..7b9f851 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -70,6 +70,14 @@ static struct kmem_cache *radix_tree_node_cachep;
#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
/*
+ * The IDA is even shorter since it uses a bitmap at the last level.
+ */
+#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
+#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
+ RADIX_TREE_MAP_SHIFT))
+#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
+
+/*
* Per-cpu pool of preloaded nodes
*/
struct radix_tree_preload {
@@ -346,9 +354,8 @@ static void dump_ida_node(void *entry, unsigned long index)
static void ida_dump(struct ida *ida)
{
struct radix_tree_root *root = &ida->ida_rt;
- pr_debug("ida: %p %p free %d bitmap %p\n", ida, root->rnode,
- root->gfp_mask >> ROOT_TAG_SHIFT,
- ida->free_bitmap);
+ pr_debug("ida: %p node %p free %d\n", ida, root->rnode,
+ root->gfp_mask >> ROOT_TAG_SHIFT);
dump_ida_node(root->rnode, 0);
}
#endif
@@ -2080,6 +2087,36 @@ void idr_preload(gfp_t gfp_mask)
}
EXPORT_SYMBOL(idr_preload);
+/**
+ * ida_pre_get - reserve resources for ida allocation
+ * @ida: ida handle
+ * @gfp: memory allocation flags
+ *
+ * This function should be called before calling ida_get_new_above(). If it
+ * is unable to allocate memory, it will return %0. On success, it returns %1.
+ */
+int ida_pre_get(struct ida *ida, gfp_t gfp)
+{
+ __radix_tree_preload(gfp, IDA_PRELOAD_SIZE);
+ /*
+ * The IDA API has no preload_end() equivalent. Instead,
+ * ida_get_new() can return -EAGAIN, prompting the caller
+ * to return to the ida_pre_get() step.
+ */
+ preempt_enable();
+
+ if (!this_cpu_read(ida_bitmap)) {
+ struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
+ if (!bitmap)
+ return 0;
+ bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);
+ kfree(bitmap);
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(ida_pre_get);
+
void **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp, int end)
{
@@ -2219,6 +2256,8 @@ static int radix_tree_cpu_dead(unsigned int cpu)
kmem_cache_free(radix_tree_node_cachep, node);
rtp->nr--;
}
+ kfree(per_cpu(ida_bitmap, cpu));
+ per_cpu(ida_bitmap, cpu) = NULL;
return 0;
}
OpenPOWER on IntegriCloud