summaryrefslogtreecommitdiffstats
path: root/lib/debugobjects.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r--lib/debugobjects.c66
1 files changed, 63 insertions, 3 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 90e46fa..fdcda3d 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -30,7 +30,7 @@ struct debug_bucket {
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
-static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE];
+static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
static DEFINE_SPINLOCK(pool_lock);
@@ -884,6 +884,63 @@ void __init debug_objects_early_init(void)
}
/*
+ * Convert the statically allocated objects to dynamic ones:
+ */
+static int debug_objects_replace_static_objects(void)
+{
+ struct debug_bucket *db = obj_hash;
+ struct hlist_node *node, *tmp;
+ struct debug_obj *obj, *new;
+ HLIST_HEAD(objects);
+ int i, cnt = 0;
+
+ for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
+ obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
+ if (!obj)
+ goto free;
+ hlist_add_head(&obj->node, &objects);
+ }
+
+ /*
+ * When debug_objects_mem_init() is called we know that only
+ * one CPU is up, so disabling interrupts is enough
+ * protection. This avoids the lockdep hell of lock ordering.
+ */
+ local_irq_disable();
+
+ /* Remove the statically allocated objects from the pool */
+ hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
+ hlist_del(&obj->node);
+ /* Move the allocated objects to the pool */
+ hlist_move_list(&objects, &obj_pool);
+
+ /* Replace the active object references */
+ for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
+ hlist_move_list(&db->list, &objects);
+
+ hlist_for_each_entry(obj, node, &objects, node) {
+ new = hlist_entry(obj_pool.first, typeof(*obj), node);
+ hlist_del(&new->node);
+ /* copy object data */
+ *new = *obj;
+ hlist_add_head(&new->node, &db->list);
+ cnt++;
+ }
+ }
+
+ printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
+ obj_pool_used);
+ local_irq_enable();
+ return 0;
+free:
+ hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
+ hlist_del(&obj->node);
+ kmem_cache_free(obj_cache, obj);
+ }
+ return -ENOMEM;
+}
+
+/*
* Called after the kmem_caches are functional to setup a dedicated
* cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
* prevents that the debug code is called on kmem_cache_free() for the
@@ -898,8 +955,11 @@ void __init debug_objects_mem_init(void)
sizeof (struct debug_obj), 0,
SLAB_DEBUG_OBJECTS, NULL);
- if (!obj_cache)
+ if (!obj_cache || debug_objects_replace_static_objects()) {
debug_objects_enabled = 0;
- else
+ if (obj_cache)
+ kmem_cache_destroy(obj_cache);
+ printk(KERN_WARNING "ODEBUG: out of memory.\n");
+ } else
debug_objects_selftest();
}
OpenPOWER on IntegriCloud