summaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-15 13:46:29 +0200
committerIngo Molnar <mingo@elte.hu>2008-10-15 13:46:29 +0200
commitb2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (patch)
tree53ccb1c2c14751fe69cf93102e76e97021f6df07 /kernel/lockdep.c
parent4f962d4d65923d7b722192e729840cfb79af0a5a (diff)
parent278429cff8809958d25415ba0ed32b59866ab1a8 (diff)
downloadop-kernel-dev-b2aaf8f74cdc84a9182f6cabf198b7763bcb9d40.zip
op-kernel-dev-b2aaf8f74cdc84a9182f6cabf198b7763bcb9d40.tar.gz
Merge branch 'linus' into stackprotector
Conflicts: arch/x86/kernel/Makefile include/asm-x86/pda.h
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c383
1 files changed, 311 insertions, 72 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 81a4e4a..dbda475 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -39,6 +39,7 @@
#include <linux/irqflags.h>
#include <linux/utsname.h>
#include <linux/hash.h>
+#include <linux/ftrace.h>
#include <asm/sections.h>
@@ -81,6 +82,8 @@ static int graph_lock(void)
__raw_spin_unlock(&lockdep_lock);
return 0;
}
+ /* prevent any recursions within lockdep from causing deadlocks */
+ current->lockdep_recursion++;
return 1;
}
@@ -89,6 +92,7 @@ static inline int graph_unlock(void)
if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
+ current->lockdep_recursion--;
__raw_spin_unlock(&lockdep_lock);
return 0;
}
@@ -120,6 +124,15 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
unsigned long nr_lock_classes;
static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
+static inline struct lock_class *hlock_class(struct held_lock *hlock)
+{
+ if (!hlock->class_idx) {
+ DEBUG_LOCKS_WARN_ON(1);
+ return NULL;
+ }
+ return lock_classes + hlock->class_idx - 1;
+}
+
#ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
@@ -218,7 +231,7 @@ static void lock_release_holdtime(struct held_lock *hlock)
holdtime = sched_clock() - hlock->holdtime_stamp;
- stats = get_lock_stats(hlock->class);
+ stats = get_lock_stats(hlock_class(hlock));
if (hlock->read)
lock_time_inc(&stats->read_holdtime, holdtime);
else
@@ -368,6 +381,19 @@ unsigned int nr_process_chains;
unsigned int max_lockdep_depth;
unsigned int max_recursion_depth;
+static unsigned int lockdep_dependency_gen_id;
+
+static bool lockdep_dependency_visit(struct lock_class *source,
+ unsigned int depth)
+{
+ if (!depth)
+ lockdep_dependency_gen_id++;
+ if (source->dep_gen_id == lockdep_dependency_gen_id)
+ return true;
+ source->dep_gen_id = lockdep_dependency_gen_id;
+ return false;
+}
+
#ifdef CONFIG_DEBUG_LOCKDEP
/*
* We cannot printk in early bootup code. Not even early_printk()
@@ -501,7 +527,7 @@ static void print_lockdep_cache(struct lockdep_map *lock)
static void print_lock(struct held_lock *hlock)
{
- print_lock_name(hlock->class);
+ print_lock_name(hlock_class(hlock));
printk(", at: ");
print_ip_sym(hlock->acquire_ip);
}
@@ -554,6 +580,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
{
struct lock_list *entry;
+ if (lockdep_dependency_visit(class, depth))
+ return;
+
if (DEBUG_LOCKS_WARN_ON(depth >= 20))
return;
@@ -846,11 +875,11 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
if (!entry)
return 0;
- entry->class = this;
- entry->distance = distance;
if (!save_trace(&entry->trace))
return 0;
+ entry->class = this;
+ entry->distance = distance;
/*
* Since we never remove from the dependency list, the list can
* be walked lockless by other CPUs, it's only allocation
@@ -928,7 +957,7 @@ static noinline int print_circular_bug_tail(void)
if (debug_locks_silent)
return 0;
- this.class = check_source->class;
+ this.class = hlock_class(check_source);
if (!save_trace(&this.trace))
return 0;
@@ -955,6 +984,67 @@ static int noinline print_infinite_recursion_bug(void)
return 0;
}
+unsigned long __lockdep_count_forward_deps(struct lock_class *class,
+ unsigned int depth)
+{
+ struct lock_list *entry;
+ unsigned long ret = 1;
+
+ if (lockdep_dependency_visit(class, depth))
+ return 0;
+
+ /*
+ * Recurse this class's dependency list:
+ */
+ list_for_each_entry(entry, &class->locks_after, entry)
+ ret += __lockdep_count_forward_deps(entry->class, depth + 1);
+
+ return ret;
+}
+
+unsigned long lockdep_count_forward_deps(struct lock_class *class)
+{
+ unsigned long ret, flags;
+
+ local_irq_save(flags);
+ __raw_spin_lock(&lockdep_lock);
+ ret = __lockdep_count_forward_deps(class, 0);
+ __raw_spin_unlock(&lockdep_lock);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+unsigned long __lockdep_count_backward_deps(struct lock_class *class,
+ unsigned int depth)
+{
+ struct lock_list *entry;
+ unsigned long ret = 1;
+
+ if (lockdep_dependency_visit(class, depth))
+ return 0;
+ /*
+ * Recurse this class's dependency list:
+ */
+ list_for_each_entry(entry, &class->locks_before, entry)
+ ret += __lockdep_count_backward_deps(entry->class, depth + 1);
+
+ return ret;
+}
+
+unsigned long lockdep_count_backward_deps(struct lock_class *class)
+{
+ unsigned long ret, flags;
+
+ local_irq_save(flags);
+ __raw_spin_lock(&lockdep_lock);
+ ret = __lockdep_count_backward_deps(class, 0);
+ __raw_spin_unlock(&lockdep_lock);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
/*
* Prove that the dependency graph starting at <entry> can not
* lead to <target>. Print an error and return 0 if it does.
@@ -964,6 +1054,9 @@ check_noncircular(struct lock_class *source, unsigned int depth)
{
struct lock_list *entry;
+ if (lockdep_dependency_visit(source, depth))
+ return 1;
+
debug_atomic_inc(&nr_cyclic_check_recursions);
if (depth > max_recursion_depth)
max_recursion_depth = depth;
@@ -973,7 +1066,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
* Check this lock's dependency list:
*/
list_for_each_entry(entry, &source->locks_after, entry) {
- if (entry->class == check_target->class)
+ if (entry->class == hlock_class(check_target))
return print_circular_bug_header(entry, depth+1);
debug_atomic_inc(&nr_cyclic_checks);
if (!check_noncircular(entry->class, depth+1))
@@ -982,7 +1075,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
return 1;
}
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
/*
* Forwards and backwards subgraph searching, for the purposes of
* proving that two subgraphs can be connected by a new dependency
@@ -1007,6 +1100,9 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
struct lock_list *entry;
int ret;
+ if (lockdep_dependency_visit(source, depth))
+ return 1;
+
if (depth > max_recursion_depth)
max_recursion_depth = depth;
if (depth >= RECURSION_LIMIT)
@@ -1046,6 +1142,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
struct lock_list *entry;
int ret;
+ if (lockdep_dependency_visit(source, depth))
+ return 1;
+
if (!__raw_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
@@ -1060,6 +1159,11 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
return 2;
}
+ if (!source && debug_locks_off_graph_unlock()) {
+ WARN_ON(1);
+ return 0;
+ }
+
/*
* Check this lock's dependency list:
*/
@@ -1099,9 +1203,9 @@ print_bad_irq_dependency(struct task_struct *curr,
printk("\nand this task is already holding:\n");
print_lock(prev);
printk("which would create a new lock dependency:\n");
- print_lock_name(prev->class);
+ print_lock_name(hlock_class(prev));
printk(" ->");
- print_lock_name(next->class);
+ print_lock_name(hlock_class(next));
printk("\n");
printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
@@ -1142,12 +1246,12 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
find_usage_bit = bit_backwards;
/* fills in <backwards_match> */
- ret = find_usage_backwards(prev->class, 0);
+ ret = find_usage_backwards(hlock_class(prev), 0);
if (!ret || ret == 1)
return ret;
find_usage_bit = bit_forwards;
- ret = find_usage_forwards(next->class, 0);
+ ret = find_usage_forwards(hlock_class(next), 0);
if (!ret || ret == 1)
return ret;
/* ret == 2 */
@@ -1268,18 +1372,32 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
struct lockdep_map *next_instance, int read)
{
struct held_lock *prev;
+ struct held_lock *nest = NULL;
int i;
for (i = 0; i < curr->lockdep_depth; i++) {
prev = curr->held_locks + i;
- if (prev->class != next->class)
+
+ if (prev->instance == next->nest_lock)
+ nest = prev;
+
+ if (hlock_class(prev) != hlock_class(next))
continue;
+
/*
* Allow read-after-read recursion of the same
* lock class (i.e. read_lock(lock)+read_lock(lock)):
*/
if ((read == 2) && prev->read)
return 2;
+
+ /*
+ * We're holding the nest_lock, which serializes this lock's
+ * nesting behaviour.
+ */
+ if (nest)
+ return 2;
+
return print_deadlock_bug(curr, prev, next);
}
return 1;
@@ -1325,7 +1443,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
*/
check_source = next;
check_target = prev;
- if (!(check_noncircular(next->class, 0)))
+ if (!(check_noncircular(hlock_class(next), 0)))
return print_circular_bug_tail();
if (!check_prev_add_irq(curr, prev, next))
@@ -1349,8 +1467,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
* chains - the second one will be new, but L1 already has
* L2 added to its dependency list, due to the first chain.)
*/
- list_for_each_entry(entry, &prev->class->locks_after, entry) {
- if (entry->class == next->class) {
+ list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
+ if (entry->class == hlock_class(next)) {
if (distance == 1)
entry->distance = 1;
return 2;
@@ -1361,26 +1479,28 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
* Ok, all validations passed, add the new lock
* to the previous lock's dependency list:
*/
- ret = add_lock_to_list(prev->class, next->class,
- &prev->class->locks_after, next->acquire_ip, distance);
+ ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
+ &hlock_class(prev)->locks_after,
+ next->acquire_ip, distance);
if (!ret)
return 0;
- ret = add_lock_to_list(next->class, prev->class,
- &next->class->locks_before, next->acquire_ip, distance);
+ ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
+ &hlock_class(next)->locks_before,
+ next->acquire_ip, distance);
if (!ret)
return 0;
/*
* Debugging printouts:
*/
- if (verbose(prev->class) || verbose(next->class)) {
+ if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
graph_unlock();
printk("\n new dependency: ");
- print_lock_name(prev->class);
+ print_lock_name(hlock_class(prev));
printk(" => ");
- print_lock_name(next->class);
+ print_lock_name(hlock_class(next));
printk("\n");
dump_stack();
return graph_lock();
@@ -1458,7 +1578,14 @@ out_bug:
}
unsigned long nr_lock_chains;
-static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
+struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
+int nr_chain_hlocks;
+static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
+
+struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
+{
+ return lock_classes + chain_hlocks[chain->base + i];
+}
/*
* Look up a dependency chain. If the key is not present yet then
@@ -1466,10 +1593,15 @@ static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
* validated. If the key is already hashed, return 0.
* (On return with 1 graph_lock is held.)
*/
-static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
+static inline int lookup_chain_cache(struct task_struct *curr,
+ struct held_lock *hlock,
+ u64 chain_key)
{
+ struct lock_class *class = hlock_class(hlock);
struct list_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
+ struct held_lock *hlock_curr, *hlock_next;
+ int i, j, n, cn;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
@@ -1517,6 +1649,32 @@ cache_hit:
}
chain = lock_chains + nr_lock_chains++;
chain->chain_key = chain_key;
+ chain->irq_context = hlock->irq_context;
+ /* Find the first held_lock of current chain */
+ hlock_next = hlock;
+ for (i = curr->lockdep_depth - 1; i >= 0; i--) {
+ hlock_curr = curr->held_locks + i;
+ if (hlock_curr->irq_context != hlock_next->irq_context)
+ break;
+ hlock_next = hlock;
+ }
+ i++;
+ chain->depth = curr->lockdep_depth + 1 - i;
+ cn = nr_chain_hlocks;
+ while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
+ n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
+ if (n == cn)
+ break;
+ cn = n;
+ }
+ if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
+ chain->base = cn;
+ for (j = 0; j < chain->depth - 1; j++, i++) {
+ int lock_id = curr->held_locks[i].class_idx - 1;
+ chain_hlocks[chain->base + j] = lock_id;
+ }
+ chain_hlocks[chain->base + j] = class - lock_classes;
+ }
list_add_tail_rcu(&chain->entry, hash_head);
debug_atomic_inc(&chain_lookup_misses);
inc_chains();
@@ -1538,7 +1696,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
* graph_lock for us)
*/
if (!hlock->trylock && (hlock->check == 2) &&
- lookup_chain_cache(chain_key, hlock->class)) {
+ lookup_chain_cache(curr, hlock, chain_key)) {
/*
* Check whether last held lock:
*
@@ -1601,14 +1759,13 @@ static void check_chain_key(struct task_struct *curr)
hlock = curr->held_locks + i;
if (chain_key != hlock->prev_chain_key) {
debug_locks_off();
- printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n",
+ WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
curr->lockdep_depth, i,
(unsigned long long)chain_key,
(unsigned long long)hlock->prev_chain_key);
- WARN_ON(1);
return;
}
- id = hlock->class - lock_classes;
+ id = hlock->class_idx - 1;
if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
return;
@@ -1620,11 +1777,10 @@ static void check_chain_key(struct task_struct *curr)
}
if (chain_key != curr->curr_chain_key) {
debug_locks_off();
- printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n",
+ WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
curr->lockdep_depth, i,
(unsigned long long)chain_key,
(unsigned long long)curr->curr_chain_key);
- WARN_ON(1);
}
#endif
}
@@ -1653,7 +1809,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
print_lock(this);
printk("{%s} state was registered at:\n", usage_str[prev_bit]);
- print_stack_trace(this->class->usage_traces + prev_bit, 1);
+ print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
print_irqtrace_events(curr);
printk("\nother info that might help us debug this:\n");
@@ -1672,7 +1828,7 @@ static inline int
valid_state(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
{
- if (unlikely(this->class->usage_mask & (1 << bad_bit)))
+ if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
return print_usage_bug(curr, this, bad_bit, new_bit);
return 1;
}
@@ -1680,7 +1836,7 @@ valid_state(struct task_struct *curr, struct held_lock *this,
static int mark_lock(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit);
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
/*
* print irq inversion bug:
@@ -1711,7 +1867,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
lockdep_print_held_locks(curr);
printk("\nthe first lock's dependencies:\n");
- print_lock_dependencies(this->class, 0);
+ print_lock_dependencies(hlock_class(this), 0);
printk("\nthe second lock's dependencies:\n");
print_lock_dependencies(other, 0);
@@ -1734,7 +1890,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
find_usage_bit = bit;
/* fills in <forwards_match> */
- ret = find_usage_forwards(this->class, 0);
+ ret = find_usage_forwards(hlock_class(this), 0);
if (!ret || ret == 1)
return ret;
@@ -1753,7 +1909,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
find_usage_bit = bit;
/* fills in <backwards_match> */
- ret = find_usage_backwards(this->class, 0);
+ ret = find_usage_backwards(hlock_class(this), 0);
if (!ret || ret == 1)
return ret;
@@ -1819,7 +1975,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
return 0;
#endif
- if (hardirq_verbose(this->class))
+ if (hardirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_USED_IN_SOFTIRQ:
@@ -1844,7 +2000,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
return 0;
#endif
- if (softirq_verbose(this->class))
+ if (softirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_USED_IN_HARDIRQ_READ:
@@ -1857,7 +2013,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
if (!check_usage_forwards(curr, this,
LOCK_ENABLED_HARDIRQS, "hard"))
return 0;
- if (hardirq_verbose(this->class))
+ if (hardirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_USED_IN_SOFTIRQ_READ:
@@ -1870,7 +2026,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
if (!check_usage_forwards(curr, this,
LOCK_ENABLED_SOFTIRQS, "soft"))
return 0;
- if (softirq_verbose(this->class))
+ if (softirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_ENABLED_HARDIRQS:
@@ -1896,7 +2052,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
return 0;
#endif
- if (hardirq_verbose(this->class))
+ if (hardirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_ENABLED_SOFTIRQS:
@@ -1922,7 +2078,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
return 0;
#endif
- if (softirq_verbose(this->class))
+ if (softirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_ENABLED_HARDIRQS_READ:
@@ -1937,7 +2093,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_USED_IN_HARDIRQ, "hard"))
return 0;
#endif
- if (hardirq_verbose(this->class))
+ if (hardirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_ENABLED_SOFTIRQS_READ:
@@ -1952,7 +2108,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_USED_IN_SOFTIRQ, "soft"))
return 0;
#endif
- if (softirq_verbose(this->class))
+ if (softirq_verbose(hlock_class(this)))
ret = 2;
break;
default:
@@ -2013,11 +2169,13 @@ void early_boot_irqs_on(void)
/*
* Hardirqs will be enabled:
*/
-void trace_hardirqs_on(void)
+void trace_hardirqs_on_caller(unsigned long a0)
{
struct task_struct *curr = current;
unsigned long ip;
+ time_hardirqs_on(CALLER_ADDR0, a0);
+
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
@@ -2055,16 +2213,23 @@ void trace_hardirqs_on(void)
curr->hardirq_enable_event = ++curr->irq_events;
debug_atomic_inc(&hardirqs_on_events);
}
+EXPORT_SYMBOL(trace_hardirqs_on_caller);
+void trace_hardirqs_on(void)
+{
+ trace_hardirqs_on_caller(CALLER_ADDR0);
+}
EXPORT_SYMBOL(trace_hardirqs_on);
/*
* Hardirqs were disabled:
*/
-void trace_hardirqs_off(void)
+void trace_hardirqs_off_caller(unsigned long a0)
{
struct task_struct *curr = current;
+ time_hardirqs_off(CALLER_ADDR0, a0);
+
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
@@ -2082,7 +2247,12 @@ void trace_hardirqs_off(void)
} else
debug_atomic_inc(&redundant_hardirqs_off);
}
+EXPORT_SYMBOL(trace_hardirqs_off_caller);
+void trace_hardirqs_off(void)
+{
+ trace_hardirqs_off_caller(CALLER_ADDR0);
+}
EXPORT_SYMBOL(trace_hardirqs_off);
/*
@@ -2246,7 +2416,7 @@ static inline int separate_irq_context(struct task_struct *curr,
* Mark a lock with a usage bit, and validate the state transition:
*/
static int mark_lock(struct task_struct *curr, struct held_lock *this,
- enum lock_usage_bit new_bit)
+ enum lock_usage_bit new_bit)
{
unsigned int new_mask = 1 << new_bit, ret = 1;
@@ -2254,7 +2424,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
* If already set then do not dirty the cacheline,
* nor do any checks:
*/
- if (likely(this->class->usage_mask & new_mask))
+ if (likely(hlock_class(this)->usage_mask & new_mask))
return 1;
if (!graph_lock())
@@ -2262,14 +2432,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
/*
* Make sure we didnt race:
*/
- if (unlikely(this->class->usage_mask & new_mask)) {
+ if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
graph_unlock();
return 1;
}
- this->class->usage_mask |= new_mask;
+ hlock_class(this)->usage_mask |= new_mask;
- if (!save_trace(this->class->usage_traces + new_bit))
+ if (!save_trace(hlock_class(this)->usage_traces + new_bit))
return 0;
switch (new_bit) {
@@ -2349,7 +2519,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
*/
static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check, int hardirqs_off,
- unsigned long ip)
+ struct lockdep_map *nest_lock, unsigned long ip)
{
struct task_struct *curr = current;
struct lock_class *class = NULL;
@@ -2403,14 +2573,16 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
return 0;
hlock = curr->held_locks + depth;
-
- hlock->class = class;
+ if (DEBUG_LOCKS_WARN_ON(!class))
+ return 0;
+ hlock->class_idx = class - lock_classes + 1;
hlock->acquire_ip = ip;
hlock->instance = lock;
+ hlock->nest_lock = nest_lock;
hlock->trylock = trylock;
hlock->read = read;
hlock->check = check;
- hlock->hardirqs_off = hardirqs_off;
+ hlock->hardirqs_off = !!hardirqs_off;
#ifdef CONFIG_LOCK_STAT
hlock->waittime_stamp = 0;
hlock->holdtime_stamp = sched_clock();
@@ -2518,6 +2690,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
return 1;
}
+static int
+__lock_set_subclass(struct lockdep_map *lock,
+ unsigned int subclass, unsigned long ip)
+{
+ struct task_struct *curr = current;
+ struct held_lock *hlock, *prev_hlock;
+ struct lock_class *class;
+ unsigned int depth;
+ int i;
+
+ depth = curr->lockdep_depth;
+ if (DEBUG_LOCKS_WARN_ON(!depth))
+ return 0;
+
+ prev_hlock = NULL;
+ for (i = depth-1; i >= 0; i--) {
+ hlock = curr->held_locks + i;
+ /*
+ * We must not cross into another context:
+ */
+ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
+ break;
+ if (hlock->instance == lock)
+ goto found_it;
+ prev_hlock = hlock;
+ }
+ return print_unlock_inbalance_bug(curr, lock, ip);
+
+found_it:
+ class = register_lock_class(lock, subclass, 0);
+ hlock->class_idx = class - lock_classes + 1;
+
+ curr->lockdep_depth = i;
+ curr->curr_chain_key = hlock->prev_chain_key;
+
+ for (; i < depth; i++) {
+ hlock = curr->held_locks + i;
+ if (!__lock_acquire(hlock->instance,
+ hlock_class(hlock)->subclass, hlock->trylock,
+ hlock->read, hlock->check, hlock->hardirqs_off,
+ hlock->nest_lock, hlock->acquire_ip))
+ return 0;
+ }
+
+ if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
+ return 0;
+ return 1;
+}
+
/*
* Remove the lock to the list of currently held locks in a
* potentially non-nested (out of order) manner. This is a
@@ -2568,9 +2789,9 @@ found_it:
for (i++; i < depth; i++) {
hlock = curr->held_locks + i;
if (!__lock_acquire(hlock->instance,
- hlock->class->subclass, hlock->trylock,
+ hlock_class(hlock)->subclass, hlock->trylock,
hlock->read, hlock->check, hlock->hardirqs_off,
- hlock->acquire_ip))
+ hlock->nest_lock, hlock->acquire_ip))
return 0;
}
@@ -2613,7 +2834,7 @@ static int lock_release_nested(struct task_struct *curr,
#ifdef CONFIG_DEBUG_LOCKDEP
hlock->prev_chain_key = 0;
- hlock->class = NULL;
+ hlock->class_idx = 0;
hlock->acquire_ip = 0;
hlock->irq_context = 0;
#endif
@@ -2650,7 +2871,8 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
*/
static void check_flags(unsigned long flags)
{
-#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)
+#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
+ defined(CONFIG_TRACE_IRQFLAGS)
if (!debug_locks)
return;
@@ -2681,18 +2903,36 @@ static void check_flags(unsigned long flags)
#endif
}
+void
+lock_set_subclass(struct lockdep_map *lock,
+ unsigned int subclass, unsigned long ip)
+{
+ unsigned long flags;
+
+ if (unlikely(current->lockdep_recursion))
+ return;
+
+ raw_local_irq_save(flags);
+ current->lockdep_recursion = 1;
+ check_flags(flags);
+ if (__lock_set_subclass(lock, subclass, ip))
+ check_chain_key(current);
+ current->lockdep_recursion = 0;
+ raw_local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL_GPL(lock_set_subclass);
+
/*
* We are not always called with irqs disabled - do that here,
* and also avoid lockdep recursion:
*/
void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
- int trylock, int read, int check, unsigned long ip)
+ int trylock, int read, int check,
+ struct lockdep_map *nest_lock, unsigned long ip)
{
unsigned long flags;
- if (unlikely(!lock_stat && !prove_locking))
- return;
-
if (unlikely(current->lockdep_recursion))
return;
@@ -2701,20 +2941,18 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
current->lockdep_recursion = 1;
__lock_acquire(lock, subclass, trylock, read, check,
- irqs_disabled_flags(flags), ip);
+ irqs_disabled_flags(flags), nest_lock, ip);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquire);
-void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
+void lock_release(struct lockdep_map *lock, int nested,
+ unsigned long ip)
{
unsigned long flags;
- if (unlikely(!lock_stat && !prove_locking))
- return;
-
if (unlikely(current->lockdep_recursion))
return;
@@ -2787,11 +3025,11 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
found_it:
hlock->waittime_stamp = sched_clock();
- point = lock_contention_point(hlock->class, ip);
+ point = lock_contention_point(hlock_class(hlock), ip);
- stats = get_lock_stats(hlock->class);
+ stats = get_lock_stats(hlock_class(hlock));
if (point < ARRAY_SIZE(stats->contention_point))
- stats->contention_point[i]++;
+ stats->contention_point[point]++;
if (lock->cpu != smp_processor_id())
stats->bounces[bounce_contended + !!hlock->read]++;
put_lock_stats(stats);
@@ -2835,7 +3073,7 @@ found_it:
hlock->holdtime_stamp = now;
}
- stats = get_lock_stats(hlock->class);
+ stats = get_lock_stats(hlock_class(hlock));
if (waittime) {
if (hlock->read)
lock_time_inc(&stats->read_waittime, waittime);
@@ -2930,6 +3168,7 @@ static void zap_class(struct lock_class *class)
list_del_rcu(&class->hash_entry);
list_del_rcu(&class->lock_entry);
+ class->key = NULL;
}
static inline int within(const void *addr, void *start, unsigned long size)
OpenPOWER on IntegriCloud