summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/lockdep.c21
-rw-r--r--kernel/lockdep_proc.c6
2 files changed, 17 insertions, 10 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index a8dc99d..cb64022 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -5,7 +5,8 @@
*
* Started by Ingo Molnar:
*
- * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
* this code maps all the lock dependencies as they occur in a live kernel
* and will warn about the following classes of locking bugs:
@@ -37,6 +38,7 @@
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
#include <linux/utsname.h>
+#include <linux/hash.h>
#include <asm/sections.h>
@@ -238,8 +240,7 @@ LIST_HEAD(all_lock_classes);
*/
#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
-#define CLASSHASH_MASK (CLASSHASH_SIZE - 1)
-#define __classhashfn(key) ((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK)
+#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
#define classhashentry(key) (classhash_table + __classhashfn((key)))
static struct list_head classhash_table[CLASSHASH_SIZE];
@@ -250,9 +251,7 @@ static struct list_head classhash_table[CLASSHASH_SIZE];
*/
#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
-#define CHAINHASH_MASK (CHAINHASH_SIZE - 1)
-#define __chainhashfn(chain) \
- (((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK)
+#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
static struct list_head chainhash_table[CHAINHASH_SIZE];
@@ -676,7 +675,8 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
* (or spin_lock_init()) call - which acts as the key. For static
* locks we use the lock object itself as the key.
*/
- BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));
+ BUILD_BUG_ON(sizeof(struct lock_class_key) >
+ sizeof(struct lockdep_map));
key = lock->key->subkeys + subclass;
@@ -686,9 +686,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
* We can walk the hash lockfree, because the hash only
* grows, and we are careful when adding entries to the end:
*/
- list_for_each_entry(class, hash_head, hash_entry)
- if (class->key == key)
+ list_for_each_entry(class, hash_head, hash_entry) {
+ if (class->key == key) {
+ WARN_ON_ONCE(class->name != lock->name);
return class;
+ }
+ }
return NULL;
}
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index e682926..39163ed 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -5,7 +5,8 @@
*
* Started by Ingo Molnar:
*
- * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
* Code for /proc/lockdep and /proc/lockdep_stats:
*
@@ -498,6 +499,9 @@ static void *ls_start(struct seq_file *m, loff_t *pos)
if (data->iter == data->stats)
seq_header(m);
+ if (data->iter == data->iter_end)
+ data->iter = NULL;
+
return data->iter;
}
OpenPOWER on IntegriCloud