summaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@o2.pl>2007-02-10 01:44:58 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 10:51:26 -0800
commit381a229209aa6f7f72375797b7bcfcfe2ae6fcbb (patch)
treebbd92afe1437ac77084c6ba3485f7f5a988dd673 /kernel/lockdep.c
parent898552c9d807fe59f3ecaf9c300c109358375c12 (diff)
downloadop-kernel-dev-381a229209aa6f7f72375797b7bcfcfe2ae6fcbb.zip
op-kernel-dev-381a229209aa6f7f72375797b7bcfcfe2ae6fcbb.tar.gz
[PATCH] lockdep: more unlock-on-error fixes
- returns after DEBUG_LOCKS_WARN_ON added in 3 places - debug_locks checking after lookup_chain_cache() added in __lock_acquire() - locking for testing and changing global variable max_lockdep_depth added in __lock_acquire() From: Ingo Molnar <mingo@elte.hu> My __acquire_lock() cleanup introduced a locking bug: on SMP systems we'd release a non-owned graph lock. Fix this by moving the graph unlock back, and by leaving the max_lockdep_depth variable update possibly racy. (we dont care, it's just statistics) Also add some minimal debugging code to graph_unlock()/graph_lock(), which caught this locking bug. Signed-off-by: Jarek Poplawski <jarkao2@o2.pl> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c23
1 files changed, 19 insertions, 4 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 509efd4..2d616f4 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -70,6 +70,9 @@ static int graph_lock(void)
static inline int graph_unlock(void)
{
+ if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
+ return DEBUG_LOCKS_WARN_ON(1);
+
__raw_spin_unlock(&lockdep_lock);
return 0;
}
@@ -712,6 +715,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
struct lock_list *entry;
int ret;
+ if (!__raw_spin_is_locked(&lockdep_lock))
+ return DEBUG_LOCKS_WARN_ON(1);
+
if (depth > max_recursion_depth)
max_recursion_depth = depth;
if (depth >= RECURSION_LIMIT)
@@ -1293,7 +1299,8 @@ out_unlock_set:
if (!subclass || force)
lock->class_cache = class;
- DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
+ if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
+ return NULL;
return class;
}
@@ -1308,7 +1315,8 @@ static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
struct list_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
- DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ return 0;
/*
* We can walk it lock-free, because entries only get added
* to the hash:
@@ -1394,7 +1402,9 @@ static void check_chain_key(struct task_struct *curr)
return;
}
id = hlock->class - lock_classes;
- DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS);
+ if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
+ return;
+
if (prev_hlock && (prev_hlock->irq_context !=
hlock->irq_context))
chain_key = 0;
@@ -2205,7 +2215,11 @@ out_calc_hash:
if (!check_prevs_add(curr, hlock))
return 0;
graph_unlock();
- }
+ } else
+ /* after lookup_chain_cache(): */
+ if (unlikely(!debug_locks))
+ return 0;
+
curr->lockdep_depth++;
check_chain_key(curr);
if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
@@ -2214,6 +2228,7 @@ out_calc_hash:
printk("turning off the locking correctness validator.\n");
return 0;
}
+
if (unlikely(curr->lockdep_depth > max_lockdep_depth))
max_lockdep_depth = curr->lockdep_depth;
OpenPOWER on IntegriCloud