From 93d81d1aca26e64a75d06a85f7e128b5f49053e7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Jan 2009 15:32:51 +0100 Subject: mutex: small cleanup Remove a local variable by combining an assingment and test in one. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/mutex.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/kernel/mutex.c b/kernel/mutex.c index 4f45d4b..357c6d2 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -129,7 +129,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, { struct task_struct *task = current; struct mutex_waiter waiter; - unsigned int old_val; unsigned long flags; spin_lock_mutex(&lock->wait_lock, flags); @@ -142,8 +141,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, list_add_tail(&waiter.list, &lock->wait_list); waiter.task = task; - old_val = atomic_xchg(&lock->count, -1); - if (old_val == 1) + if (atomic_xchg(&lock->count, -1) == 1) goto done; lock_contended(&lock->dep_map, ip); @@ -158,8 +156,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * that when we release the lock, we properly wake up the * other waiters: */ - old_val = atomic_xchg(&lock->count, -1); - if (old_val == 1) + if (atomic_xchg(&lock->count, -1) == 1) break; /* -- cgit v1.1 From 41719b03091911028116155deddc5eedf8c45e37 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Jan 2009 15:36:26 +0100 Subject: mutex: preemption fixes The problem is that dropping the spinlock right before schedule is a voluntary preemption point and can cause a schedule, right after which we schedule again. Fix this inefficiency by keeping preemption disabled until we schedule, do this by explicity disabling preemption and providing a schedule() variant that assumes preemption is already disabled. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 + kernel/mutex.c | 5 ++++- kernel/sched.c | 10 +++++++--- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 4cae9b8..9f0b372 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -328,6 +328,7 @@ extern signed long schedule_timeout(signed long timeout); extern signed long schedule_timeout_interruptible(signed long timeout); extern signed long schedule_timeout_killable(signed long timeout); extern signed long schedule_timeout_uninterruptible(signed long timeout); +asmlinkage void __schedule(void); asmlinkage void schedule(void); struct nsproxy; diff --git a/kernel/mutex.c b/kernel/mutex.c index 357c6d2..524ffc3 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -131,6 +131,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, struct mutex_waiter waiter; unsigned long flags; + preempt_disable(); spin_lock_mutex(&lock->wait_lock, flags); debug_mutex_lock_common(lock, &waiter); @@ -170,13 +171,14 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, spin_unlock_mutex(&lock->wait_lock, flags); debug_mutex_free_waiter(&waiter); + preempt_enable(); return -EINTR; } __set_task_state(task, state); /* didnt get the lock, go to sleep: */ spin_unlock_mutex(&lock->wait_lock, flags); - schedule(); + __schedule(); spin_lock_mutex(&lock->wait_lock, flags); } @@ -193,6 +195,7 @@ done: spin_unlock_mutex(&lock->wait_lock, flags); debug_mutex_free_waiter(&waiter); + preempt_enable(); return 0; } diff --git a/kernel/sched.c b/kernel/sched.c index 8be2c13..b001c13 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4538,15 +4538,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev) /* * schedule() is the main scheduler function. */ -asmlinkage void __sched schedule(void) +asmlinkage void __sched __schedule(void) { struct task_struct *prev, *next; unsigned long *switch_count; struct rq *rq; int cpu; -need_resched: - preempt_disable(); cpu = smp_processor_id(); rq = cpu_rq(cpu); rcu_qsctr_inc(cpu); @@ -4603,7 +4601,13 @@ need_resched_nonpreemptible: if (unlikely(reacquire_kernel_lock(current) < 0)) goto need_resched_nonpreemptible; +} +asmlinkage void __sched schedule(void) +{ +need_resched: + preempt_disable(); + __schedule(); preempt_enable_no_resched(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; -- cgit v1.1 From 0d66bf6d3514b35eb6897629059443132992dbd7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 12 Jan 2009 14:01:47 +0100 Subject: mutex: implement adaptive spinning Change mutex contention behaviour such that it will sometimes busy wait on acquisition - moving its behaviour closer to that of spinlocks. This concept got ported to mainline from the -rt tree, where it was originally implemented for rtmutexes by Steven Rostedt, based on work by Gregory Haskins. Testing with Ingo's test-mutex application (http://lkml.org/lkml/2006/1/8/50) gave a 345% boost for VFS scalability on my testbox: # ./test-mutex-shm V 16 10 | grep "^avg ops" avg ops/sec: 296604 # ./test-mutex-shm V 16 10 | grep "^avg ops" avg ops/sec: 85870 The key criteria for the busy wait is that the lock owner has to be running on a (different) cpu. The idea is that as long as the owner is running, there is a fair chance it'll release the lock soon, and thus we'll be better off spinning instead of blocking/scheduling. Since regular mutexes (as opposed to rtmutexes) do not atomically track the owner, we add the owner in a non-atomic fashion and deal with the races in the slowpath. Furthermore, to ease the testing of the performance impact of this new code, there is means to disable this behaviour runtime (without having to reboot the system), when scheduler debugging is enabled (CONFIG_SCHED_DEBUG=y), by issuing the following command: # echo NO_OWNER_SPIN > /debug/sched_features This command re-enables spinning again (this is also the default): # echo OWNER_SPIN > /debug/sched_features Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/mutex.h | 5 ++- include/linux/sched.h | 1 + kernel/mutex-debug.c | 9 +--- kernel/mutex-debug.h | 18 ++++---- kernel/mutex.c | 115 +++++++++++++++++++++++++++++++++++++++++++----- kernel/mutex.h | 22 ++++++++- kernel/sched.c | 61 +++++++++++++++++++++++++ kernel/sched_features.h | 1 + 8 files changed, 201 insertions(+), 31 deletions(-) diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 7a0e5c4..3069ec7 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -50,8 +50,10 @@ struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; -#ifdef CONFIG_DEBUG_MUTEXES +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) struct thread_info *owner; +#endif +#ifdef CONFIG_DEBUG_MUTEXES const char *name; void *magic; #endif @@ -68,7 +70,6 @@ struct mutex_waiter { struct list_head list; struct task_struct *task; #ifdef CONFIG_DEBUG_MUTEXES - struct mutex *lock; void *magic; #endif }; diff --git a/include/linux/sched.h b/include/linux/sched.h index 9f0b372..c34b137 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -330,6 +330,7 @@ extern signed long schedule_timeout_killable(signed long timeout); extern signed long schedule_timeout_uninterruptible(signed long timeout); asmlinkage void __schedule(void); asmlinkage void schedule(void); +extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); struct nsproxy; struct user_namespace; diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index 1d94160..50d022e 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c @@ -26,11 +26,6 @@ /* * Must be called with lock->wait_lock held. */ -void debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner) -{ - lock->owner = new_owner; -} - void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) { memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); @@ -59,7 +54,6 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, /* Mark the current thread as blocked on the lock: */ ti->task->blocked_on = waiter; - waiter->lock = lock; } void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, @@ -82,7 +76,7 @@ void debug_mutex_unlock(struct mutex *lock) DEBUG_LOCKS_WARN_ON(lock->magic != lock); DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); + mutex_clear_owner(lock); } void debug_mutex_init(struct mutex *lock, const char *name, @@ -95,7 +89,6 @@ void debug_mutex_init(struct mutex *lock, const char *name, debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->owner = NULL; lock->magic = lock; } diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h index babfbdf..6b2d735 100644 --- a/kernel/mutex-debug.h +++ b/kernel/mutex-debug.h @@ -13,14 +13,6 @@ /* * This must be called with lock->wait_lock held. */ -extern void -debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner); - -static inline void debug_mutex_clear_owner(struct mutex *lock) -{ - lock->owner = NULL; -} - extern void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter); extern void debug_mutex_wake_waiter(struct mutex *lock, @@ -35,6 +27,16 @@ extern void debug_mutex_unlock(struct mutex *lock); extern void debug_mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key); +static inline void mutex_set_owner(struct mutex *lock) +{ + lock->owner = current_thread_info(); +} + +static inline void mutex_clear_owner(struct mutex *lock) +{ + lock->owner = NULL; +} + #define spin_lock_mutex(lock, flags) \ do { \ struct mutex *l = container_of(lock, struct mutex, wait_lock); \ diff --git a/kernel/mutex.c b/kernel/mutex.c index 524ffc3..ff42e97 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -10,6 +10,11 @@ * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and * David Howells for suggestions and improvements. * + * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline + * from the -rt tree, where it was originally implemented for rtmutexes + * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale + * and Sven Dietrich. + * * Also see Documentation/mutex-design.txt. */ #include @@ -46,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) atomic_set(&lock->count, 1); spin_lock_init(&lock->wait_lock); INIT_LIST_HEAD(&lock->wait_list); + mutex_clear_owner(lock); debug_mutex_init(lock, name, key); } @@ -91,6 +97,7 @@ void inline __sched mutex_lock(struct mutex *lock) * 'unlocked' into 'locked' state. */ __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); + mutex_set_owner(lock); } EXPORT_SYMBOL(mutex_lock); @@ -115,6 +122,14 @@ void __sched mutex_unlock(struct mutex *lock) * The unlocking fastpath is the 0->1 transition from 'locked' * into 'unlocked' state: */ +#ifndef CONFIG_DEBUG_MUTEXES + /* + * When debugging is enabled we must not clear the owner before time, + * the slow path will always be taken, and that clears the owner field + * after verifying that it was indeed current. + */ + mutex_clear_owner(lock); +#endif __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); } @@ -132,10 +147,71 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, unsigned long flags; preempt_disable(); + mutex_acquire(&lock->dep_map, subclass, 0, ip); +#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) + /* + * Optimistic spinning. + * + * We try to spin for acquisition when we find that there are no + * pending waiters and the lock owner is currently running on a + * (different) CPU. + * + * The rationale is that if the lock owner is running, it is likely to + * release the lock soon. + * + * Since this needs the lock owner, and this mutex implementation + * doesn't track the owner atomically in the lock field, we need to + * track it non-atomically. + * + * We can't do this for DEBUG_MUTEXES because that relies on wait_lock + * to serialize everything. + */ + + for (;;) { + struct thread_info *owner; + + /* + * If there are pending waiters, join them. + */ + if (!list_empty(&lock->wait_list)) + break; + + /* + * If there's an owner, wait for it to either + * release the lock or go to sleep. + */ + owner = ACCESS_ONCE(lock->owner); + if (owner && !mutex_spin_on_owner(lock, owner)) + break; + + /* + * When there's no owner, we might have preempted between the + * owner acquiring the lock and setting the owner field. If + * we're an RT task that will live-lock because we won't let + * the owner complete. + */ + if (!owner && (need_resched() || rt_task(task))) + break; + + if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { + lock_acquired(&lock->dep_map, ip); + mutex_set_owner(lock); + preempt_enable(); + return 0; + } + + /* + * The cpu_relax() call is a compiler barrier which forces + * everything in this loop to be re-loaded. We don't need + * memory barriers as we'll eventually observe the right + * values at the cost of a few extra spins. + */ + cpu_relax(); + } +#endif spin_lock_mutex(&lock->wait_lock, flags); debug_mutex_lock_common(lock, &waiter); - mutex_acquire(&lock->dep_map, subclass, 0, ip); debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); /* add waiting tasks to the end of the waitqueue (FIFO): */ @@ -185,8 +261,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, done: lock_acquired(&lock->dep_map, ip); /* got the lock - rejoice! */ - mutex_remove_waiter(lock, &waiter, task_thread_info(task)); - debug_mutex_set_owner(lock, task_thread_info(task)); + mutex_remove_waiter(lock, &waiter, current_thread_info()); + mutex_set_owner(lock); /* set it to 0 if there are no waiters left: */ if (likely(list_empty(&lock->wait_list))) @@ -222,7 +298,8 @@ int __sched mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); - return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); + return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, + subclass, _RET_IP_); } EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); @@ -260,8 +337,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) wake_up_process(waiter->task); } - debug_mutex_clear_owner(lock); - spin_unlock_mutex(&lock->wait_lock, flags); } @@ -298,18 +373,30 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count); */ int __sched mutex_lock_interruptible(struct mutex *lock) { + int ret; + might_sleep(); - return __mutex_fastpath_lock_retval + ret = __mutex_fastpath_lock_retval (&lock->count, __mutex_lock_interruptible_slowpath); + if (!ret) + mutex_set_owner(lock); + + return ret; } EXPORT_SYMBOL(mutex_lock_interruptible); int __sched mutex_lock_killable(struct mutex *lock) { + int ret; + might_sleep(); - return __mutex_fastpath_lock_retval + ret = __mutex_fastpath_lock_retval (&lock->count, __mutex_lock_killable_slowpath); + if (!ret) + mutex_set_owner(lock); + + return ret; } EXPORT_SYMBOL(mutex_lock_killable); @@ -352,9 +439,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) prev = atomic_xchg(&lock->count, -1); if (likely(prev == 1)) { - debug_mutex_set_owner(lock, current_thread_info()); + mutex_set_owner(lock); mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); } + /* Set it back to 0 if there are no waiters: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); @@ -380,8 +468,13 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) */ int __sched mutex_trylock(struct mutex *lock) { - return __mutex_fastpath_trylock(&lock->count, - __mutex_trylock_slowpath); + int ret; + + ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); + if (ret) + mutex_set_owner(lock); + + return ret; } EXPORT_SYMBOL(mutex_trylock); diff --git a/kernel/mutex.h b/kernel/mutex.h index a075daf..67578ca 100644 --- a/kernel/mutex.h +++ b/kernel/mutex.h @@ -16,8 +16,26 @@ #define mutex_remove_waiter(lock, waiter, ti) \ __list_del((waiter)->list.prev, (waiter)->list.next) -#define debug_mutex_set_owner(lock, new_owner) do { } while (0) -#define debug_mutex_clear_owner(lock) do { } while (0) +#ifdef CONFIG_SMP +static inline void mutex_set_owner(struct mutex *lock) +{ + lock->owner = current_thread_info(); +} + +static inline void mutex_clear_owner(struct mutex *lock) +{ + lock->owner = NULL; +} +#else +static inline void mutex_set_owner(struct mutex *lock) +{ +} + +static inline void mutex_clear_owner(struct mutex *lock) +{ +} +#endif + #define debug_mutex_wake_waiter(lock, waiter) do { } while (0) #define debug_mutex_free_waiter(waiter) do { } while (0) #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) diff --git a/kernel/sched.c b/kernel/sched.c index b001c13..589e730 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4614,6 +4614,67 @@ need_resched: } EXPORT_SYMBOL(schedule); +#ifdef CONFIG_SMP +/* + * Look out! "owner" is an entirely speculative pointer + * access and not reliable. + */ +int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) +{ + unsigned int cpu; + struct rq *rq; + + if (!sched_feat(OWNER_SPIN)) + return 0; + +#ifdef CONFIG_DEBUG_PAGEALLOC + /* + * Need to access the cpu field knowing that + * DEBUG_PAGEALLOC could have unmapped it if + * the mutex owner just released it and exited. + */ + if (probe_kernel_address(&owner->cpu, cpu)) + goto out; +#else + cpu = owner->cpu; +#endif + + /* + * Even if the access succeeded (likely case), + * the cpu field may no longer be valid. + */ + if (cpu >= nr_cpumask_bits) + goto out; + + /* + * We need to validate that we can do a + * get_cpu() and that we have the percpu area. + */ + if (!cpu_online(cpu)) + goto out; + + rq = cpu_rq(cpu); + + for (;;) { + /* + * Owner changed, break to re-assess state. + */ + if (lock->owner != owner) + break; + + /* + * Is that owner really running on that cpu? + */ + if (task_thread_info(rq->curr) != owner || need_resched()) + return 0; + + cpu_relax(); + } +out: + return 1; +} +#endif + #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption diff --git a/kernel/sched_features.h b/kernel/sched_features.h index da5d93b..07bc02e 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -13,3 +13,4 @@ SCHED_FEAT(LB_WAKEUP_UPDATE, 1) SCHED_FEAT(ASYM_EFF_LOAD, 1) SCHED_FEAT(WAKEUP_OVERLAP, 0) SCHED_FEAT(LAST_BUDDY, 1) +SCHED_FEAT(OWNER_SPIN, 1) -- cgit v1.1 From ac6e60ee405aa3bf718f7fe4cb01b7ee0b8877ec Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 14 Jan 2009 17:29:31 +0100 Subject: mutex: adaptive spinnning, performance tweaks Spin more agressively. This is less fair but also markedly faster. The numbers: * dbench 50 (higher is better): spin 1282MB/s v10 548MB/s v10 no wait 1868MB/s * 4k creates (numbers in files/second higher is better): spin avg 200.60 median 193.20 std 19.71 high 305.93 low 186.82 v10 avg 180.94 median 175.28 std 13.91 high 229.31 low 168.73 v10 no wait avg 232.18 median 222.38 std 22.91 high 314.66 low 209.12 * File stats (numbers in seconds, lower is better): spin 2.27s v10 5.1s v10 no wait 1.6s ( The source changes are smaller than they look, I just moved the need_resched checks in __mutex_lock_common after the cmpxchg. ) Signed-off-by: Chris Mason Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/mutex.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/kernel/mutex.c b/kernel/mutex.c index ff42e97..5d79781 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -171,12 +171,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, struct thread_info *owner; /* - * If there are pending waiters, join them. - */ - if (!list_empty(&lock->wait_list)) - break; - - /* * If there's an owner, wait for it to either * release the lock or go to sleep. */ @@ -184,6 +178,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, if (owner && !mutex_spin_on_owner(lock, owner)) break; + if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { + lock_acquired(&lock->dep_map, ip); + mutex_set_owner(lock); + preempt_enable(); + return 0; + } + /* * When there's no owner, we might have preempted between the * owner acquiring the lock and setting the owner field. If @@ -193,13 +194,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, if (!owner && (need_resched() || rt_task(task))) break; - if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { - lock_acquired(&lock->dep_map, ip); - mutex_set_owner(lock); - preempt_enable(); - return 0; - } - /* * The cpu_relax() call is a compiler barrier which forces * everything in this loop to be re-loaded. We don't need -- cgit v1.1 From cf47b8f3d96b0b8b10b557444a28b3ca4024ff82 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 14 Jan 2009 13:40:46 -0500 Subject: Btrfs: stop spinning on mutex_trylock and let the adaptive code spin for us Mutexes now spin internally and the btrfs spin is no longer required for performance. Signed-off-by: Chris Mason Signed-off-by: Ingo Molnar --- fs/btrfs/locking.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 39bae77..40ba8e8 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -37,16 +37,6 @@ int btrfs_tree_lock(struct extent_buffer *eb) { - int i; - - if (mutex_trylock(&eb->mutex)) - return 0; - for (i = 0; i < 512; i++) { - cpu_relax(); - if (mutex_trylock(&eb->mutex)) - return 0; - } - cpu_relax(); mutex_lock_nested(&eb->mutex, BTRFS_MAX_LEVEL - btrfs_header_level(eb)); return 0; } -- cgit v1.1 From 6f2b9b9a9d750a9175dc79c74bfed5add840983c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 29 Jan 2009 16:03:20 +0100 Subject: timer: implement lockdep deadlock detection This modifies the timer code in a way to allow lockdep to detect deadlocks resulting from a lock being taken in the timer function as well as around the del_timer_sync() call. Signed-off-by: Johannes Berg --- include/linux/timer.h | 93 ++++++++++++++++++++++++++++++++++++++++++++++----- kernel/timer.c | 68 +++++++++++++++++++++++++++++++------ 2 files changed, 141 insertions(+), 20 deletions(-) diff --git a/include/linux/timer.h b/include/linux/timer.h index daf9685..51774eb 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -5,6 +5,7 @@ #include #include #include +#include struct tvec_base; @@ -21,52 +22,126 @@ struct timer_list { char start_comm[16]; int start_pid; #endif +#ifdef CONFIG_LOCKDEP + struct lockdep_map lockdep_map; +#endif }; extern struct tvec_base boot_tvec_bases; +#ifdef CONFIG_LOCKDEP +/* + * NB: because we have to copy the lockdep_map, setting the lockdep_map key + * (second argument) here is required, otherwise it could be initialised to + * the copy of the lockdep_map later! We use the pointer to and the string + * ":" as the key resp. the name of the lockdep_map. + */ +#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \ + .lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn), +#else +#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) +#endif + #define TIMER_INITIALIZER(_function, _expires, _data) { \ .entry = { .prev = TIMER_ENTRY_STATIC }, \ .function = (_function), \ .expires = (_expires), \ .data = (_data), \ .base = &boot_tvec_bases, \ + __TIMER_LOCKDEP_MAP_INITIALIZER( \ + __FILE__ ":" __stringify(__LINE__)) \ } #define DEFINE_TIMER(_name, _function, _expires, _data) \ struct timer_list _name = \ TIMER_INITIALIZER(_function, _expires, _data) -void init_timer(struct timer_list *timer); -void init_timer_deferrable(struct timer_list *timer); +void init_timer_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key); +void init_timer_deferrable_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key); + +#ifdef CONFIG_LOCKDEP +#define init_timer(timer) \ + do { \ + static struct lock_class_key __key; \ + init_timer_key((timer), #timer, &__key); \ + } while (0) + +#define init_timer_deferrable(timer) \ + do { \ + static struct lock_class_key __key; \ + init_timer_deferrable_key((timer), #timer, &__key); \ + } while (0) + +#define init_timer_on_stack(timer) \ + do { \ + static struct lock_class_key __key; \ + init_timer_on_stack_key((timer), #timer, &__key); \ + } while (0) + +#define setup_timer(timer, fn, data) \ + do { \ + static struct lock_class_key __key; \ + setup_timer_key((timer), #timer, &__key, (fn), (data));\ + } while (0) + +#define setup_timer_on_stack(timer, fn, data) \ + do { \ + static struct lock_class_key __key; \ + setup_timer_on_stack_key((timer), #timer, &__key, \ + (fn), (data)); \ + } while (0) +#else +#define init_timer(timer)\ + init_timer_key((timer), NULL, NULL) +#define init_timer_deferrable(timer)\ + init_timer_deferrable_key((timer), NULL, NULL) +#define init_timer_on_stack(timer)\ + init_timer_on_stack_key((timer), NULL, NULL) +#define setup_timer(timer, fn, data)\ + setup_timer_key((timer), NULL, NULL, (fn), (data)) +#define setup_timer_on_stack(timer, fn, data)\ + setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data)) +#endif #ifdef CONFIG_DEBUG_OBJECTS_TIMERS -extern void init_timer_on_stack(struct timer_list *timer); +extern void init_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key); extern void destroy_timer_on_stack(struct timer_list *timer); #else static inline void destroy_timer_on_stack(struct timer_list *timer) { } -static inline void init_timer_on_stack(struct timer_list *timer) +static inline void init_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key) { - init_timer(timer); + init_timer_key(timer, name, key); } #endif -static inline void setup_timer(struct timer_list * timer, +static inline void setup_timer_key(struct timer_list * timer, + const char *name, + struct lock_class_key *key, void (*function)(unsigned long), unsigned long data) { timer->function = function; timer->data = data; - init_timer(timer); + init_timer_key(timer, name, key); } -static inline void setup_timer_on_stack(struct timer_list *timer, +static inline void setup_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key, void (*function)(unsigned long), unsigned long data) { timer->function = function; timer->data = data; - init_timer_on_stack(timer); + init_timer_on_stack_key(timer, name, key); } /** diff --git a/kernel/timer.c b/kernel/timer.c index 13dd64f..ef1c385 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -491,14 +491,18 @@ static inline void debug_timer_free(struct timer_list *timer) debug_object_free(timer, &timer_debug_descr); } -static void __init_timer(struct timer_list *timer); +static void __init_timer(struct timer_list *timer, + const char *name, + struct lock_class_key *key); -void init_timer_on_stack(struct timer_list *timer) +void init_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key) { debug_object_init_on_stack(timer, &timer_debug_descr); - __init_timer(timer); + __init_timer(timer, name, key); } -EXPORT_SYMBOL_GPL(init_timer_on_stack); +EXPORT_SYMBOL_GPL(init_timer_on_stack_key); void destroy_timer_on_stack(struct timer_list *timer) { @@ -512,7 +516,9 @@ static inline void debug_timer_activate(struct timer_list *timer) { } static inline void debug_timer_deactivate(struct timer_list *timer) { } #endif -static void __init_timer(struct timer_list *timer) +static void __init_timer(struct timer_list *timer, + const char *name, + struct lock_class_key *key) { timer->entry.next = NULL; timer->base = __raw_get_cpu_var(tvec_bases); @@ -521,6 +527,7 @@ static void __init_timer(struct timer_list *timer) timer->start_pid = -1; memset(timer->start_comm, 0, TASK_COMM_LEN); #endif + lockdep_init_map(&timer->lockdep_map, name, key, 0); } /** @@ -530,19 +537,23 @@ static void __init_timer(struct timer_list *timer) * init_timer() must be done to a timer prior calling *any* of the * other timer functions. */ -void init_timer(struct timer_list *timer) +void init_timer_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key) { debug_timer_init(timer); - __init_timer(timer); + __init_timer(timer, name, key); } -EXPORT_SYMBOL(init_timer); +EXPORT_SYMBOL(init_timer_key); -void init_timer_deferrable(struct timer_list *timer) +void init_timer_deferrable_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key) { - init_timer(timer); + init_timer_key(timer, name, key); timer_set_deferrable(timer); } -EXPORT_SYMBOL(init_timer_deferrable); +EXPORT_SYMBOL(init_timer_deferrable_key); static inline void detach_timer(struct timer_list *timer, int clear_pending) @@ -789,6 +800,15 @@ EXPORT_SYMBOL(try_to_del_timer_sync); */ int del_timer_sync(struct timer_list *timer) { +#ifdef CONFIG_LOCKDEP + unsigned long flags; + + local_irq_save(flags); + lock_map_acquire(&timer->lockdep_map); + lock_map_release(&timer->lockdep_map); + local_irq_restore(flags); +#endif + for (;;) { int ret = try_to_del_timer_sync(timer); if (ret >= 0) @@ -861,10 +881,36 @@ static inline void __run_timers(struct tvec_base *base) set_running_timer(base, timer); detach_timer(timer, 1); + spin_unlock_irq(&base->lock); { int preempt_count = preempt_count(); + +#ifdef CONFIG_LOCKDEP + /* + * It is permissible to free the timer from + * inside the function that is called from + * it, this we need to take into account for + * lockdep too. To avoid bogus "held lock + * freed" warnings as well as problems when + * looking into timer->lockdep_map, make a + * copy and use that here. + */ + struct lockdep_map lockdep_map = + timer->lockdep_map; +#endif + /* + * Couple the lock chain with the lock chain at + * del_timer_sync() by acquiring the lock_map + * around the fn() call here and in + * del_timer_sync(). + */ + lock_map_acquire(&lockdep_map); + fn(data); + + lock_map_release(&lockdep_map); + if (preempt_count != preempt_count()) { printk(KERN_ERR "huh, entered %p " "with preempt_count %08x, exited" -- cgit v1.1 From cf40bd16fdad42c053040bcd3988f5fdedbb6c57 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Wed, 21 Jan 2009 08:12:39 +0100 Subject: lockdep: annotate reclaim context (__GFP_NOFS) Here is another version, with the incremental patch rolled up, and added reclaim context annotation to kswapd, and allocation tracing to slab allocators (which may only ever reach the page allocator in rare cases, so it is good to put annotations here too). Haven't tested this version as such, but it should be getting closer to merge worthy ;) -- After noticing some code in mm/filemap.c accidentally perform a __GFP_FS allocation when it should not have been, I thought it might be a good idea to try to catch this kind of thing with lockdep. I coded up a little idea that seems to work. Unfortunately the system has to actually be in __GFP_FS page reclaim, then take the lock, before it will mark it. But at least that might still be some orders of magnitude more common (and more debuggable) than an actual deadlock condition, so we have some improvement I hope (the concept is no less complete than discovery of a lock's interrupt contexts). I guess we could even do the same thing with __GFP_IO (normal reclaim), and even GFP_NOIO locks too... but filesystems will have the most locks and fiddly code paths, so let's start there and see how it goes. It *seems* to work. I did a quick test. ================================= [ INFO: inconsistent lock state ] 2.6.28-rc6-00007-ged31348-dirty #26 --------------------------------- inconsistent {in-reclaim-W} -> {ov-reclaim-W} usage. modprobe/8526 [HC0[0]:SC0[0]:HE1:SE1] takes: (testlock){--..}, at: [] brd_init+0x55/0x216 [brd] {in-reclaim-W} state was registered at: [] __lock_acquire+0x75b/0x1a60 [] lock_acquire+0x91/0xc0 [] mutex_lock_nested+0xb1/0x310 [] brd_init+0x2b/0x216 [brd] [] _stext+0x3b/0x170 [] sys_init_module+0xaf/0x1e0 [] system_call_fastpath+0x16/0x1b [] 0xffffffffffffffff irq event stamp: 3929 hardirqs last enabled at (3929): [] mutex_lock_nested+0x285/0x310 hardirqs last disabled at (3928): [] mutex_lock_nested+0x59/0x310 softirqs last enabled at (3732): [] sk_filter+0x83/0xe0 softirqs last disabled at (3730): [] sk_filter+0x16/0xe0 other info that might help us debug this: 1 lock held by modprobe/8526: #0: (testlock){--..}, at: [] brd_init+0x55/0x216 [brd] stack backtrace: Pid: 8526, comm: modprobe Not tainted 2.6.28-rc6-00007-ged31348-dirty #26 Call Trace: [] print_usage_bug+0x193/0x1d0 [] mark_lock+0xaf0/0xca0 [] mark_held_locks+0x55/0xc0 [] ? brd_init+0x0/0x216 [brd] [] trace_reclaim_fs+0x2a/0x60 [] __alloc_pages_internal+0x475/0x580 [] ? mutex_lock_nested+0x26e/0x310 [] ? brd_init+0x0/0x216 [brd] [] brd_init+0x6a/0x216 [brd] [] ? brd_init+0x0/0x216 [brd] [] _stext+0x3b/0x170 [] ? mutex_unlock+0x9/0x10 [] ? __mutex_unlock_slowpath+0x10d/0x180 [] ? trace_hardirqs_on_caller+0x12c/0x190 [] sys_init_module+0xaf/0x1e0 [] system_call_fastpath+0x16/0x1b Signed-off-by: Nick Piggin Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 17 +++- include/linux/sched.h | 1 + kernel/lockdep.c | 229 ++++++++++++++++++++++++++++++++++++++++++--- kernel/lockdep_internals.h | 3 +- kernel/lockdep_proc.c | 6 +- mm/page_alloc.c | 5 + mm/slab.c | 4 + mm/slob.c | 2 + mm/slub.c | 1 + mm/vmscan.c | 3 + 10 files changed, 254 insertions(+), 17 deletions(-) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 23bf02f..cc97bdb 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -27,12 +27,16 @@ enum lock_usage_bit LOCK_USED = 0, LOCK_USED_IN_HARDIRQ, LOCK_USED_IN_SOFTIRQ, + LOCK_USED_IN_RECLAIM_FS, LOCK_ENABLED_SOFTIRQS, LOCK_ENABLED_HARDIRQS, + LOCK_HELD_OVER_RECLAIM_FS, LOCK_USED_IN_HARDIRQ_READ, LOCK_USED_IN_SOFTIRQ_READ, + LOCK_USED_IN_RECLAIM_FS_READ, LOCK_ENABLED_SOFTIRQS_READ, LOCK_ENABLED_HARDIRQS_READ, + LOCK_HELD_OVER_RECLAIM_FS_READ, LOCK_USAGE_STATES }; @@ -42,16 +46,20 @@ enum lock_usage_bit #define LOCKF_USED (1 << LOCK_USED) #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) +#define LOCKF_USED_IN_RECLAIM_FS (1 << LOCK_USED_IN_RECLAIM_FS) #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) +#define LOCKF_HELD_OVER_RECLAIM_FS (1 << LOCK_HELD_OVER_RECLAIM_FS) #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) +#define LOCKF_USED_IN_RECLAIM_FS_READ (1 << LOCK_USED_IN_RECLAIM_FS_READ) #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) +#define LOCKF_HELD_OVER_RECLAIM_FS_READ (1 << LOCK_HELD_OVER_RECLAIM_FS_READ) #define LOCKF_ENABLED_IRQS_READ \ (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) @@ -324,7 +332,11 @@ static inline void lock_set_subclass(struct lockdep_map *lock, lock_set_class(lock, lock->name, lock->key, subclass, ip); } -# define INIT_LOCKDEP .lockdep_recursion = 0, +extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); +extern void lockdep_clear_current_reclaim_state(void); +extern void lockdep_trace_alloc(gfp_t mask); + +# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) @@ -342,6 +354,9 @@ static inline void lockdep_on(void) # define lock_release(l, n, i) do { } while (0) # define lock_set_class(l, n, k, s, i) do { } while (0) # define lock_set_subclass(l, s, i) do { } while (0) +# define lockdep_set_current_reclaim_state(g) do { } while (0) +# define lockdep_clear_current_reclaim_state() do { } while (0) +# define lockdep_trace_alloc(g) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 4efb552..b00a77f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1313,6 +1313,7 @@ struct task_struct { int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[MAX_LOCK_DEPTH]; + gfp_t lockdep_reclaim_gfp; #endif /* journalling filesystem info */ diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 06b0c35..977f940 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -310,12 +310,14 @@ EXPORT_SYMBOL(lockdep_on); #if VERBOSE # define HARDIRQ_VERBOSE 1 # define SOFTIRQ_VERBOSE 1 +# define RECLAIM_VERBOSE 1 #else # define HARDIRQ_VERBOSE 0 # define SOFTIRQ_VERBOSE 0 +# define RECLAIM_VERBOSE 0 #endif -#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE +#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE /* * Quick filtering for interesting events: */ @@ -454,6 +456,10 @@ static const char *usage_str[] = [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R", [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R", [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R", + [LOCK_USED_IN_RECLAIM_FS] = "in-reclaim-W", + [LOCK_USED_IN_RECLAIM_FS_READ] = "in-reclaim-R", + [LOCK_HELD_OVER_RECLAIM_FS] = "ov-reclaim-W", + [LOCK_HELD_OVER_RECLAIM_FS_READ] = "ov-reclaim-R", }; const char * __get_key_name(struct lockdep_subclass_key *key, char *str) @@ -462,9 +468,10 @@ const char * __get_key_name(struct lockdep_subclass_key *key, char *str) } void -get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4) +get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, + char *c4, char *c5, char *c6) { - *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.'; + *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.', *c5 = '.', *c6 = '.'; if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) *c1 = '+'; @@ -493,14 +500,29 @@ get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) *c4 = '?'; } + + if (class->usage_mask & LOCKF_USED_IN_RECLAIM_FS) + *c5 = '+'; + else + if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS) + *c5 = '-'; + + if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS_READ) + *c6 = '-'; + if (class->usage_mask & LOCKF_USED_IN_RECLAIM_FS_READ) { + *c6 = '+'; + if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS_READ) + *c6 = '?'; + } + } static void print_lock_name(struct lock_class *class) { - char str[KSYM_NAME_LEN], c1, c2, c3, c4; + char str[KSYM_NAME_LEN], c1, c2, c3, c4, c5, c6; const char *name; - get_usage_chars(class, &c1, &c2, &c3, &c4); + get_usage_chars(class, &c1, &c2, &c3, &c4, &c5, &c6); name = class->name; if (!name) { @@ -513,7 +535,7 @@ static void print_lock_name(struct lock_class *class) if (class->subclass) printk("/%d", class->subclass); } - printk("){%c%c%c%c}", c1, c2, c3, c4); + printk("){%c%c%c%c%c%c}", c1, c2, c3, c4, c5, c6); } static void print_lockdep_cache(struct lockdep_map *lock) @@ -1306,6 +1328,26 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, LOCK_ENABLED_SOFTIRQS, "soft")) return 0; + /* + * Prove that the new dependency does not connect a reclaim-fs-safe + * lock with a reclaim-fs-unsafe lock - to achieve this we search + * the backwards-subgraph starting at , and the + * forwards-subgraph starting at : + */ + if (!check_usage(curr, prev, next, LOCK_USED_IN_RECLAIM_FS, + LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs")) + return 0; + + /* + * Prove that the new dependency does not connect a reclaim-fs-safe-read + * lock with a reclaim-fs-unsafe lock - to achieve this we search + * the backwards-subgraph starting at , and the + * forwards-subgraph starting at : + */ + if (!check_usage(curr, prev, next, LOCK_USED_IN_RECLAIM_FS_READ, + LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs-read")) + return 0; + return 1; } @@ -1949,6 +1991,14 @@ static int softirq_verbose(struct lock_class *class) return 0; } +static int reclaim_verbose(struct lock_class *class) +{ +#if RECLAIM_VERBOSE + return class_filter(class); +#endif + return 0; +} + #define STRICT_READ_CHECKS 1 static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, @@ -2007,6 +2057,31 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (softirq_verbose(hlock_class(this))) ret = 2; break; + case LOCK_USED_IN_RECLAIM_FS: + if (!valid_state(curr, this, new_bit, LOCK_HELD_OVER_RECLAIM_FS)) + return 0; + if (!valid_state(curr, this, new_bit, + LOCK_HELD_OVER_RECLAIM_FS_READ)) + return 0; + /* + * just marked it reclaim-fs-safe, check that this lock + * took no reclaim-fs-unsafe lock in the past: + */ + if (!check_usage_forwards(curr, this, + LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs")) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it reclaim-fs-safe, check that this lock + * took no reclaim-fs-unsafe-read lock in the past: + */ + if (!check_usage_forwards(curr, this, + LOCK_HELD_OVER_RECLAIM_FS_READ, "reclaim-fs-read")) + return 0; +#endif + if (reclaim_verbose(hlock_class(this))) + ret = 2; + break; case LOCK_USED_IN_HARDIRQ_READ: if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) return 0; @@ -2033,6 +2108,19 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (softirq_verbose(hlock_class(this))) ret = 2; break; + case LOCK_USED_IN_RECLAIM_FS_READ: + if (!valid_state(curr, this, new_bit, LOCK_HELD_OVER_RECLAIM_FS)) + return 0; + /* + * just marked it reclaim-fs-read-safe, check that this lock + * took no reclaim-fs-unsafe lock in the past: + */ + if (!check_usage_forwards(curr, this, + LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs")) + return 0; + if (reclaim_verbose(hlock_class(this))) + ret = 2; + break; case LOCK_ENABLED_HARDIRQS: if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) return 0; @@ -2085,6 +2173,32 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (softirq_verbose(hlock_class(this))) ret = 2; break; + case LOCK_HELD_OVER_RECLAIM_FS: + if (!valid_state(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS)) + return 0; + if (!valid_state(curr, this, new_bit, + LOCK_USED_IN_RECLAIM_FS_READ)) + return 0; + /* + * just marked it reclaim-fs-unsafe, check that no reclaim-fs-safe + * lock in the system ever took it in the past: + */ + if (!check_usage_backwards(curr, this, + LOCK_USED_IN_RECLAIM_FS, "reclaim-fs")) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it softirq-unsafe, check that no + * softirq-safe-read lock in the system ever took + * it in the past: + */ + if (!check_usage_backwards(curr, this, + LOCK_USED_IN_RECLAIM_FS_READ, "reclaim-fs-read")) + return 0; +#endif + if (reclaim_verbose(hlock_class(this))) + ret = 2; + break; case LOCK_ENABLED_HARDIRQS_READ: if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) return 0; @@ -2115,6 +2229,21 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (softirq_verbose(hlock_class(this))) ret = 2; break; + case LOCK_HELD_OVER_RECLAIM_FS_READ: + if (!valid_state(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS)) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it reclaim-fs-read-unsafe, check that no + * reclaim-fs-safe lock in the system ever took it in the past: + */ + if (!check_usage_backwards(curr, this, + LOCK_USED_IN_RECLAIM_FS, "reclaim-fs")) + return 0; +#endif + if (reclaim_verbose(hlock_class(this))) + ret = 2; + break; default: WARN_ON(1); break; @@ -2123,11 +2252,17 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, return ret; } +enum mark_type { + HARDIRQ, + SOFTIRQ, + RECLAIM_FS, +}; + /* * Mark all held locks with a usage bit: */ static int -mark_held_locks(struct task_struct *curr, int hardirq) +mark_held_locks(struct task_struct *curr, enum mark_type mark) { enum lock_usage_bit usage_bit; struct held_lock *hlock; @@ -2136,17 +2271,32 @@ mark_held_locks(struct task_struct *curr, int hardirq) for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; - if (hardirq) { + switch (mark) { + case HARDIRQ: if (hlock->read) usage_bit = LOCK_ENABLED_HARDIRQS_READ; else usage_bit = LOCK_ENABLED_HARDIRQS; - } else { + break; + + case SOFTIRQ: if (hlock->read) usage_bit = LOCK_ENABLED_SOFTIRQS_READ; else usage_bit = LOCK_ENABLED_SOFTIRQS; + break; + + case RECLAIM_FS: + if (hlock->read) + usage_bit = LOCK_HELD_OVER_RECLAIM_FS_READ; + else + usage_bit = LOCK_HELD_OVER_RECLAIM_FS; + break; + + default: + BUG(); } + if (!mark_lock(curr, hlock, usage_bit)) return 0; } @@ -2200,7 +2350,7 @@ void trace_hardirqs_on_caller(unsigned long ip) * We are going to turn hardirqs on, so set the * usage bit for all held locks: */ - if (!mark_held_locks(curr, 1)) + if (!mark_held_locks(curr, HARDIRQ)) return; /* * If we have softirqs enabled, then set the usage @@ -2208,7 +2358,7 @@ void trace_hardirqs_on_caller(unsigned long ip) * this bit from being set before) */ if (curr->softirqs_enabled) - if (!mark_held_locks(curr, 0)) + if (!mark_held_locks(curr, SOFTIRQ)) return; curr->hardirq_enable_ip = ip; @@ -2288,7 +2438,7 @@ void trace_softirqs_on(unsigned long ip) * enabled too: */ if (curr->hardirqs_enabled) - mark_held_locks(curr, 0); + mark_held_locks(curr, SOFTIRQ); } /* @@ -2317,6 +2467,31 @@ void trace_softirqs_off(unsigned long ip) debug_atomic_inc(&redundant_softirqs_off); } +void lockdep_trace_alloc(gfp_t gfp_mask) +{ + struct task_struct *curr = current; + + if (unlikely(!debug_locks)) + return; + + /* no reclaim without waiting on it */ + if (!(gfp_mask & __GFP_WAIT)) + return; + + /* this guy won't enter reclaim */ + if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) + return; + + /* We're only interested __GFP_FS allocations for now */ + if (!(gfp_mask & __GFP_FS)) + return; + + if (DEBUG_LOCKS_WARN_ON(irqs_disabled())) + return; + + mark_held_locks(curr, RECLAIM_FS); +} + static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) { /* @@ -2362,6 +2537,22 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) } } + /* + * We reuse the irq context infrastructure more broadly as a general + * context checking code. This tests GFP_FS recursion (a lock taken + * during reclaim for a GFP_FS allocation is held over a GFP_FS + * allocation). + */ + if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) { + if (hlock->read) { + if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ)) + return 0; + } else { + if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS)) + return 0; + } + } + return 1; } @@ -2453,6 +2644,10 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, case LOCK_ENABLED_SOFTIRQS: case LOCK_ENABLED_HARDIRQS_READ: case LOCK_ENABLED_SOFTIRQS_READ: + case LOCK_USED_IN_RECLAIM_FS: + case LOCK_USED_IN_RECLAIM_FS_READ: + case LOCK_HELD_OVER_RECLAIM_FS: + case LOCK_HELD_OVER_RECLAIM_FS_READ: ret = mark_lock_irq(curr, this, new_bit); if (!ret) return 0; @@ -2966,6 +3161,16 @@ void lock_release(struct lockdep_map *lock, int nested, } EXPORT_SYMBOL_GPL(lock_release); +void lockdep_set_current_reclaim_state(gfp_t gfp_mask) +{ + current->lockdep_reclaim_gfp = gfp_mask; +} + +void lockdep_clear_current_reclaim_state(void) +{ + current->lockdep_reclaim_gfp = 0; +} + #ifdef CONFIG_LOCK_STAT static int print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 56b1969..e887b78 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h @@ -32,7 +32,8 @@ extern struct list_head all_lock_classes; extern struct lock_chain lock_chains[]; extern void -get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4); +get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, + char *c4, char *c5, char *c6); extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index 13716b8..b84a1df 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c @@ -84,7 +84,7 @@ static int l_show(struct seq_file *m, void *v) { struct lock_class *class = v; struct lock_list *entry; - char c1, c2, c3, c4; + char c1, c2, c3, c4, c5, c6; if (v == SEQ_START_TOKEN) { seq_printf(m, "all lock classes:\n"); @@ -100,8 +100,8 @@ static int l_show(struct seq_file *m, void *v) seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); #endif - get_usage_chars(class, &c1, &c2, &c3, &c4); - seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); + get_usage_chars(class, &c1, &c2, &c3, &c4, &c5, &c6); + seq_printf(m, " %c%c%c%c%c%c", c1, c2, c3, c4, c5, c6); seq_printf(m, ": "); print_name(m, class); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5675b30..22b15a4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1479,6 +1479,8 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, unsigned long did_some_progress; unsigned long pages_reclaimed = 0; + lockdep_trace_alloc(gfp_mask); + might_sleep_if(wait); if (should_fail_alloc_page(gfp_mask, order)) @@ -1578,12 +1580,15 @@ nofail_alloc: */ cpuset_update_task_memory_state(); p->flags |= PF_MEMALLOC; + + lockdep_set_current_reclaim_state(gfp_mask); reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); p->reclaim_state = NULL; + lockdep_clear_current_reclaim_state(); p->flags &= ~PF_MEMALLOC; cond_resched(); diff --git a/mm/slab.c b/mm/slab.c index ddc41f3..6b61de8 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3318,6 +3318,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, unsigned long save_flags; void *ptr; + lockdep_trace_alloc(flags); + if (slab_should_failslab(cachep, flags)) return NULL; @@ -3394,6 +3396,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) unsigned long save_flags; void *objp; + lockdep_trace_alloc(flags); + if (slab_should_failslab(cachep, flags)) return NULL; diff --git a/mm/slob.c b/mm/slob.c index bf7e8fc..1264799 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -464,6 +464,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) unsigned int *m; int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + lockdep_trace_alloc(flags); + if (size < PAGE_SIZE - align) { if (!size) return ZERO_SIZE_PTR; diff --git a/mm/slub.c b/mm/slub.c index bdc9abb..214eb20 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1596,6 +1596,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, unsigned long flags; unsigned int objsize; + lockdep_trace_alloc(gfpflags); might_sleep_if(gfpflags & __GFP_WAIT); if (should_failslab(s->objsize, gfpflags)) diff --git a/mm/vmscan.c b/mm/vmscan.c index 9a27c44..303eb65 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1963,6 +1963,9 @@ static int kswapd(void *p) struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; + + lockdep_set_current_reclaim_state(GFP_KERNEL); + node_to_cpumask_ptr(cpumask, pgdat->node_id); if (!cpumask_empty(cpumask)) -- cgit v1.1 From 4fc95e867f1e75351b89db3c68212dfcce7ea563 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 13:10:52 +0100 Subject: lockdep: sanitize bit names s/\(LOCKF\?_ENABLED_[^ ]*\)S\(_READ\)\?\>/\1\2/g So that the USED_IN and ENABLED have the same names. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 22 ++++++------- kernel/lockdep.c | 84 ++++++++++++++++++++++++------------------------- kernel/lockdep_proc.c | 12 +++---- 3 files changed, 59 insertions(+), 59 deletions(-) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index cc97bdb..da2e2b2 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -28,14 +28,14 @@ enum lock_usage_bit LOCK_USED_IN_HARDIRQ, LOCK_USED_IN_SOFTIRQ, LOCK_USED_IN_RECLAIM_FS, - LOCK_ENABLED_SOFTIRQS, - LOCK_ENABLED_HARDIRQS, + LOCK_ENABLED_SOFTIRQ, + LOCK_ENABLED_HARDIRQ, LOCK_HELD_OVER_RECLAIM_FS, LOCK_USED_IN_HARDIRQ_READ, LOCK_USED_IN_SOFTIRQ_READ, LOCK_USED_IN_RECLAIM_FS_READ, - LOCK_ENABLED_SOFTIRQS_READ, - LOCK_ENABLED_HARDIRQS_READ, + LOCK_ENABLED_SOFTIRQ_READ, + LOCK_ENABLED_HARDIRQ_READ, LOCK_HELD_OVER_RECLAIM_FS_READ, LOCK_USAGE_STATES }; @@ -47,22 +47,22 @@ enum lock_usage_bit #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) #define LOCKF_USED_IN_RECLAIM_FS (1 << LOCK_USED_IN_RECLAIM_FS) -#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) -#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) +#define LOCKF_ENABLED_HARDIRQ (1 << LOCK_ENABLED_HARDIRQ) +#define LOCKF_ENABLED_SOFTIRQ (1 << LOCK_ENABLED_SOFTIRQ) #define LOCKF_HELD_OVER_RECLAIM_FS (1 << LOCK_HELD_OVER_RECLAIM_FS) -#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) +#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) #define LOCKF_USED_IN_RECLAIM_FS_READ (1 << LOCK_USED_IN_RECLAIM_FS_READ) -#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) -#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) +#define LOCKF_ENABLED_HARDIRQ_READ (1 << LOCK_ENABLED_HARDIRQ_READ) +#define LOCKF_ENABLED_SOFTIRQ_READ (1 << LOCK_ENABLED_SOFTIRQ_READ) #define LOCKF_HELD_OVER_RECLAIM_FS_READ (1 << LOCK_HELD_OVER_RECLAIM_FS_READ) -#define LOCKF_ENABLED_IRQS_READ \ - (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) +#define LOCKF_ENABLED_IRQ_READ \ + (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) #define LOCKF_USED_IN_IRQ_READ \ (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 977f940..32f9447 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -450,12 +450,12 @@ static const char *usage_str[] = [LOCK_USED] = "initial-use ", [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W", [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W", - [LOCK_ENABLED_SOFTIRQS] = "softirq-on-W", - [LOCK_ENABLED_HARDIRQS] = "hardirq-on-W", + [LOCK_ENABLED_SOFTIRQ] = "softirq-on-W", + [LOCK_ENABLED_HARDIRQ] = "hardirq-on-W", [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R", [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R", - [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R", - [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R", + [LOCK_ENABLED_SOFTIRQ_READ] = "softirq-on-R", + [LOCK_ENABLED_HARDIRQ_READ] = "hardirq-on-R", [LOCK_USED_IN_RECLAIM_FS] = "in-reclaim-W", [LOCK_USED_IN_RECLAIM_FS_READ] = "in-reclaim-R", [LOCK_HELD_OVER_RECLAIM_FS] = "ov-reclaim-W", @@ -476,28 +476,28 @@ get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) *c1 = '+'; else - if (class->usage_mask & LOCKF_ENABLED_HARDIRQS) + if (class->usage_mask & LOCKF_ENABLED_HARDIRQ) *c1 = '-'; if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) *c2 = '+'; else - if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS) + if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ) *c2 = '-'; - if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) + if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ) *c3 = '-'; if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) { *c3 = '+'; - if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) + if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ) *c3 = '?'; } - if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) + if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ) *c4 = '-'; if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) { *c4 = '+'; - if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) + if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ) *c4 = '?'; } @@ -1296,7 +1296,7 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, * forwards-subgraph starting at : */ if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ, - LOCK_ENABLED_HARDIRQS, "hard")) + LOCK_ENABLED_HARDIRQ, "hard")) return 0; /* @@ -1306,7 +1306,7 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, * forwards-subgraph starting at : */ if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ, - LOCK_ENABLED_HARDIRQS, "hard-read")) + LOCK_ENABLED_HARDIRQ, "hard-read")) return 0; /* @@ -1316,7 +1316,7 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, * forwards-subgraph starting at : */ if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ, - LOCK_ENABLED_SOFTIRQS, "soft")) + LOCK_ENABLED_SOFTIRQ, "soft")) return 0; /* * Prove that the new dependency does not connect a softirq-safe-read @@ -1325,7 +1325,7 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, * forwards-subgraph starting at : */ if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ, - LOCK_ENABLED_SOFTIRQS, "soft")) + LOCK_ENABLED_SOFTIRQ, "soft")) return 0; /* @@ -2008,17 +2008,17 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, switch(new_bit) { case LOCK_USED_IN_HARDIRQ: - if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) + if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQ)) return 0; if (!valid_state(curr, this, new_bit, - LOCK_ENABLED_HARDIRQS_READ)) + LOCK_ENABLED_HARDIRQ_READ)) return 0; /* * just marked it hardirq-safe, check that this lock * took no hardirq-unsafe lock in the past: */ if (!check_usage_forwards(curr, this, - LOCK_ENABLED_HARDIRQS, "hard")) + LOCK_ENABLED_HARDIRQ, "hard")) return 0; #if STRICT_READ_CHECKS /* @@ -2026,24 +2026,24 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, * took no hardirq-unsafe-read lock in the past: */ if (!check_usage_forwards(curr, this, - LOCK_ENABLED_HARDIRQS_READ, "hard-read")) + LOCK_ENABLED_HARDIRQ_READ, "hard-read")) return 0; #endif if (hardirq_verbose(hlock_class(this))) ret = 2; break; case LOCK_USED_IN_SOFTIRQ: - if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) + if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQ)) return 0; if (!valid_state(curr, this, new_bit, - LOCK_ENABLED_SOFTIRQS_READ)) + LOCK_ENABLED_SOFTIRQ_READ)) return 0; /* * just marked it softirq-safe, check that this lock * took no softirq-unsafe lock in the past: */ if (!check_usage_forwards(curr, this, - LOCK_ENABLED_SOFTIRQS, "soft")) + LOCK_ENABLED_SOFTIRQ, "soft")) return 0; #if STRICT_READ_CHECKS /* @@ -2051,7 +2051,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, * took no softirq-unsafe-read lock in the past: */ if (!check_usage_forwards(curr, this, - LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) + LOCK_ENABLED_SOFTIRQ_READ, "soft-read")) return 0; #endif if (softirq_verbose(hlock_class(this))) @@ -2083,27 +2083,27 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, ret = 2; break; case LOCK_USED_IN_HARDIRQ_READ: - if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) + if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQ)) return 0; /* * just marked it hardirq-read-safe, check that this lock * took no hardirq-unsafe lock in the past: */ if (!check_usage_forwards(curr, this, - LOCK_ENABLED_HARDIRQS, "hard")) + LOCK_ENABLED_HARDIRQ, "hard")) return 0; if (hardirq_verbose(hlock_class(this))) ret = 2; break; case LOCK_USED_IN_SOFTIRQ_READ: - if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) + if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQ)) return 0; /* * just marked it softirq-read-safe, check that this lock * took no softirq-unsafe lock in the past: */ if (!check_usage_forwards(curr, this, - LOCK_ENABLED_SOFTIRQS, "soft")) + LOCK_ENABLED_SOFTIRQ, "soft")) return 0; if (softirq_verbose(hlock_class(this))) ret = 2; @@ -2121,7 +2121,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (reclaim_verbose(hlock_class(this))) ret = 2; break; - case LOCK_ENABLED_HARDIRQS: + case LOCK_ENABLED_HARDIRQ: if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) return 0; if (!valid_state(curr, this, new_bit, @@ -2147,7 +2147,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (hardirq_verbose(hlock_class(this))) ret = 2; break; - case LOCK_ENABLED_SOFTIRQS: + case LOCK_ENABLED_SOFTIRQ: if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) return 0; if (!valid_state(curr, this, new_bit, @@ -2199,7 +2199,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (reclaim_verbose(hlock_class(this))) ret = 2; break; - case LOCK_ENABLED_HARDIRQS_READ: + case LOCK_ENABLED_HARDIRQ_READ: if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) return 0; #if STRICT_READ_CHECKS @@ -2214,7 +2214,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (hardirq_verbose(hlock_class(this))) ret = 2; break; - case LOCK_ENABLED_SOFTIRQS_READ: + case LOCK_ENABLED_SOFTIRQ_READ: if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) return 0; #if STRICT_READ_CHECKS @@ -2274,16 +2274,16 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark) switch (mark) { case HARDIRQ: if (hlock->read) - usage_bit = LOCK_ENABLED_HARDIRQS_READ; + usage_bit = LOCK_ENABLED_HARDIRQ_READ; else - usage_bit = LOCK_ENABLED_HARDIRQS; + usage_bit = LOCK_ENABLED_HARDIRQ; break; case SOFTIRQ: if (hlock->read) - usage_bit = LOCK_ENABLED_SOFTIRQS_READ; + usage_bit = LOCK_ENABLED_SOFTIRQ_READ; else - usage_bit = LOCK_ENABLED_SOFTIRQS; + usage_bit = LOCK_ENABLED_SOFTIRQ; break; case RECLAIM_FS: @@ -2520,19 +2520,19 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) if (!hlock->hardirqs_off) { if (hlock->read) { if (!mark_lock(curr, hlock, - LOCK_ENABLED_HARDIRQS_READ)) + LOCK_ENABLED_HARDIRQ_READ)) return 0; if (curr->softirqs_enabled) if (!mark_lock(curr, hlock, - LOCK_ENABLED_SOFTIRQS_READ)) + LOCK_ENABLED_SOFTIRQ_READ)) return 0; } else { if (!mark_lock(curr, hlock, - LOCK_ENABLED_HARDIRQS)) + LOCK_ENABLED_HARDIRQ)) return 0; if (curr->softirqs_enabled) if (!mark_lock(curr, hlock, - LOCK_ENABLED_SOFTIRQS)) + LOCK_ENABLED_SOFTIRQ)) return 0; } } @@ -2640,10 +2640,10 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, case LOCK_USED_IN_SOFTIRQ: case LOCK_USED_IN_HARDIRQ_READ: case LOCK_USED_IN_SOFTIRQ_READ: - case LOCK_ENABLED_HARDIRQS: - case LOCK_ENABLED_SOFTIRQS: - case LOCK_ENABLED_HARDIRQS_READ: - case LOCK_ENABLED_SOFTIRQS_READ: + case LOCK_ENABLED_HARDIRQ: + case LOCK_ENABLED_SOFTIRQ: + case LOCK_ENABLED_HARDIRQ_READ: + case LOCK_ENABLED_SOFTIRQ_READ: case LOCK_USED_IN_RECLAIM_FS: case LOCK_USED_IN_RECLAIM_FS_READ: case LOCK_HELD_OVER_RECLAIM_FS: diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index b84a1df..bd474fd 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c @@ -300,27 +300,27 @@ static int lockdep_stats_show(struct seq_file *m, void *v) nr_uncategorized++; if (class->usage_mask & LOCKF_USED_IN_IRQ) nr_irq_safe++; - if (class->usage_mask & LOCKF_ENABLED_IRQS) + if (class->usage_mask & LOCKF_ENABLED_IRQ) nr_irq_unsafe++; if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) nr_softirq_safe++; - if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS) + if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ) nr_softirq_unsafe++; if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) nr_hardirq_safe++; - if (class->usage_mask & LOCKF_ENABLED_HARDIRQS) + if (class->usage_mask & LOCKF_ENABLED_HARDIRQ) nr_hardirq_unsafe++; if (class->usage_mask & LOCKF_USED_IN_IRQ_READ) nr_irq_read_safe++; - if (class->usage_mask & LOCKF_ENABLED_IRQS_READ) + if (class->usage_mask & LOCKF_ENABLED_IRQ_READ) nr_irq_read_unsafe++; if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) nr_softirq_read_safe++; - if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) + if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ) nr_softirq_read_unsafe++; if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) nr_hardirq_read_safe++; - if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) + if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ) nr_hardirq_read_unsafe++; #ifdef CONFIG_PROVE_LOCKING -- cgit v1.1 From a652d7081bc96b3094e85ca30e47f50185d2f717 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 13:13:11 +0100 Subject: lockdep: sanitize reclaim bit names s/HELD_OVER/ENABLED/g so that its similar to the hard and soft-irq names. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 8 ++++---- kernel/lockdep.c | 38 +++++++++++++++++++------------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index da2e2b2..6d729c9 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -30,13 +30,13 @@ enum lock_usage_bit LOCK_USED_IN_RECLAIM_FS, LOCK_ENABLED_SOFTIRQ, LOCK_ENABLED_HARDIRQ, - LOCK_HELD_OVER_RECLAIM_FS, + LOCK_ENABLED_RECLAIM_FS, LOCK_USED_IN_HARDIRQ_READ, LOCK_USED_IN_SOFTIRQ_READ, LOCK_USED_IN_RECLAIM_FS_READ, LOCK_ENABLED_SOFTIRQ_READ, LOCK_ENABLED_HARDIRQ_READ, - LOCK_HELD_OVER_RECLAIM_FS_READ, + LOCK_ENABLED_RECLAIM_FS_READ, LOCK_USAGE_STATES }; @@ -49,7 +49,7 @@ enum lock_usage_bit #define LOCKF_USED_IN_RECLAIM_FS (1 << LOCK_USED_IN_RECLAIM_FS) #define LOCKF_ENABLED_HARDIRQ (1 << LOCK_ENABLED_HARDIRQ) #define LOCKF_ENABLED_SOFTIRQ (1 << LOCK_ENABLED_SOFTIRQ) -#define LOCKF_HELD_OVER_RECLAIM_FS (1 << LOCK_HELD_OVER_RECLAIM_FS) +#define LOCKF_ENABLED_RECLAIM_FS (1 << LOCK_ENABLED_RECLAIM_FS) #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) @@ -59,7 +59,7 @@ enum lock_usage_bit #define LOCKF_USED_IN_RECLAIM_FS_READ (1 << LOCK_USED_IN_RECLAIM_FS_READ) #define LOCKF_ENABLED_HARDIRQ_READ (1 << LOCK_ENABLED_HARDIRQ_READ) #define LOCKF_ENABLED_SOFTIRQ_READ (1 << LOCK_ENABLED_SOFTIRQ_READ) -#define LOCKF_HELD_OVER_RECLAIM_FS_READ (1 << LOCK_HELD_OVER_RECLAIM_FS_READ) +#define LOCKF_ENABLED_RECLAIM_FS_READ (1 << LOCK_ENABLED_RECLAIM_FS_READ) #define LOCKF_ENABLED_IRQ_READ \ (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 32f9447..dd4716c 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -458,8 +458,8 @@ static const char *usage_str[] = [LOCK_ENABLED_HARDIRQ_READ] = "hardirq-on-R", [LOCK_USED_IN_RECLAIM_FS] = "in-reclaim-W", [LOCK_USED_IN_RECLAIM_FS_READ] = "in-reclaim-R", - [LOCK_HELD_OVER_RECLAIM_FS] = "ov-reclaim-W", - [LOCK_HELD_OVER_RECLAIM_FS_READ] = "ov-reclaim-R", + [LOCK_ENABLED_RECLAIM_FS] = "ov-reclaim-W", + [LOCK_ENABLED_RECLAIM_FS_READ] = "ov-reclaim-R", }; const char * __get_key_name(struct lockdep_subclass_key *key, char *str) @@ -504,14 +504,14 @@ get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, if (class->usage_mask & LOCKF_USED_IN_RECLAIM_FS) *c5 = '+'; else - if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS) + if (class->usage_mask & LOCKF_ENABLED_RECLAIM_FS) *c5 = '-'; - if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS_READ) + if (class->usage_mask & LOCKF_ENABLED_RECLAIM_FS_READ) *c6 = '-'; if (class->usage_mask & LOCKF_USED_IN_RECLAIM_FS_READ) { *c6 = '+'; - if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS_READ) + if (class->usage_mask & LOCKF_ENABLED_RECLAIM_FS_READ) *c6 = '?'; } @@ -1335,7 +1335,7 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, * forwards-subgraph starting at : */ if (!check_usage(curr, prev, next, LOCK_USED_IN_RECLAIM_FS, - LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs")) + LOCK_ENABLED_RECLAIM_FS, "reclaim-fs")) return 0; /* @@ -1345,7 +1345,7 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, * forwards-subgraph starting at : */ if (!check_usage(curr, prev, next, LOCK_USED_IN_RECLAIM_FS_READ, - LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs-read")) + LOCK_ENABLED_RECLAIM_FS, "reclaim-fs-read")) return 0; return 1; @@ -2058,17 +2058,17 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, ret = 2; break; case LOCK_USED_IN_RECLAIM_FS: - if (!valid_state(curr, this, new_bit, LOCK_HELD_OVER_RECLAIM_FS)) + if (!valid_state(curr, this, new_bit, LOCK_ENABLED_RECLAIM_FS)) return 0; if (!valid_state(curr, this, new_bit, - LOCK_HELD_OVER_RECLAIM_FS_READ)) + LOCK_ENABLED_RECLAIM_FS_READ)) return 0; /* * just marked it reclaim-fs-safe, check that this lock * took no reclaim-fs-unsafe lock in the past: */ if (!check_usage_forwards(curr, this, - LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs")) + LOCK_ENABLED_RECLAIM_FS, "reclaim-fs")) return 0; #if STRICT_READ_CHECKS /* @@ -2076,7 +2076,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, * took no reclaim-fs-unsafe-read lock in the past: */ if (!check_usage_forwards(curr, this, - LOCK_HELD_OVER_RECLAIM_FS_READ, "reclaim-fs-read")) + LOCK_ENABLED_RECLAIM_FS_READ, "reclaim-fs-read")) return 0; #endif if (reclaim_verbose(hlock_class(this))) @@ -2109,14 +2109,14 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, ret = 2; break; case LOCK_USED_IN_RECLAIM_FS_READ: - if (!valid_state(curr, this, new_bit, LOCK_HELD_OVER_RECLAIM_FS)) + if (!valid_state(curr, this, new_bit, LOCK_ENABLED_RECLAIM_FS)) return 0; /* * just marked it reclaim-fs-read-safe, check that this lock * took no reclaim-fs-unsafe lock in the past: */ if (!check_usage_forwards(curr, this, - LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs")) + LOCK_ENABLED_RECLAIM_FS, "reclaim-fs")) return 0; if (reclaim_verbose(hlock_class(this))) ret = 2; @@ -2173,7 +2173,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (softirq_verbose(hlock_class(this))) ret = 2; break; - case LOCK_HELD_OVER_RECLAIM_FS: + case LOCK_ENABLED_RECLAIM_FS: if (!valid_state(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS)) return 0; if (!valid_state(curr, this, new_bit, @@ -2229,7 +2229,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (softirq_verbose(hlock_class(this))) ret = 2; break; - case LOCK_HELD_OVER_RECLAIM_FS_READ: + case LOCK_ENABLED_RECLAIM_FS_READ: if (!valid_state(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS)) return 0; #if STRICT_READ_CHECKS @@ -2288,9 +2288,9 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark) case RECLAIM_FS: if (hlock->read) - usage_bit = LOCK_HELD_OVER_RECLAIM_FS_READ; + usage_bit = LOCK_ENABLED_RECLAIM_FS_READ; else - usage_bit = LOCK_HELD_OVER_RECLAIM_FS; + usage_bit = LOCK_ENABLED_RECLAIM_FS; break; default: @@ -2646,8 +2646,8 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, case LOCK_ENABLED_SOFTIRQ_READ: case LOCK_USED_IN_RECLAIM_FS: case LOCK_USED_IN_RECLAIM_FS_READ: - case LOCK_HELD_OVER_RECLAIM_FS: - case LOCK_HELD_OVER_RECLAIM_FS_READ: + case LOCK_ENABLED_RECLAIM_FS: + case LOCK_ENABLED_RECLAIM_FS_READ: ret = mark_lock_irq(curr, this, new_bit); if (!ret) return 0; -- cgit v1.1 From 9fe51abf7a1c787f918f66fa3cef9cd0cedb3791 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 14:09:46 +0100 Subject: lockdep: lockdep_states.h Introduce a header file to generate all the states from. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep_states.h | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 kernel/lockdep_states.h diff --git a/kernel/lockdep_states.h b/kernel/lockdep_states.h new file mode 100644 index 0000000..937039e --- /dev/null +++ b/kernel/lockdep_states.h @@ -0,0 +1,3 @@ +LOCKDEP_STATE(HARDIRQ) +LOCKDEP_STATE(SOFTIRQ) +LOCKDEP_STATE(RECLAIM_FS) -- cgit v1.1 From 36bfb9bb03db2002a8574600c6aeb4cdd1ba01a6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 14:12:41 +0100 Subject: lockdep: simplify mark_held_locks remove the explicit state iteration Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 38 ++++++++++++++------------------------ 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index dd4716c..18e0990 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2253,11 +2253,19 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, } enum mark_type { - HARDIRQ, - SOFTIRQ, - RECLAIM_FS, +#define LOCKDEP_STATE(__STATE) __STATE, +#include "lockdep_states.h" +#undef LOCKDEP_STATE }; +#define MARK_HELD_CASE(__STATE) \ + case __STATE: \ + if (hlock->read) \ + usage_bit = LOCK_ENABLED_##__STATE##_READ; \ + else \ + usage_bit = LOCK_ENABLED_##__STATE; \ + break; + /* * Mark all held locks with a usage bit: */ @@ -2272,27 +2280,9 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark) hlock = curr->held_locks + i; switch (mark) { - case HARDIRQ: - if (hlock->read) - usage_bit = LOCK_ENABLED_HARDIRQ_READ; - else - usage_bit = LOCK_ENABLED_HARDIRQ; - break; - - case SOFTIRQ: - if (hlock->read) - usage_bit = LOCK_ENABLED_SOFTIRQ_READ; - else - usage_bit = LOCK_ENABLED_SOFTIRQ; - break; - - case RECLAIM_FS: - if (hlock->read) - usage_bit = LOCK_ENABLED_RECLAIM_FS_READ; - else - usage_bit = LOCK_ENABLED_RECLAIM_FS; - break; - +#define LOCKDEP_STATE(__STATE) MARK_HELD_CASE(__STATE) +#include "lockdep_states.h" +#undef LOCKDEP_STATE default: BUG(); } -- cgit v1.1 From 5346417e17daf5a7712e4cf030b45414e46607cf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 14:15:53 +0100 Subject: lockdep: simplify mark_lock() remove the state iteration Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 18e0990..e68bd7d 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2626,18 +2626,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, return 0; switch (new_bit) { - case LOCK_USED_IN_HARDIRQ: - case LOCK_USED_IN_SOFTIRQ: - case LOCK_USED_IN_HARDIRQ_READ: - case LOCK_USED_IN_SOFTIRQ_READ: - case LOCK_ENABLED_HARDIRQ: - case LOCK_ENABLED_SOFTIRQ: - case LOCK_ENABLED_HARDIRQ_READ: - case LOCK_ENABLED_SOFTIRQ_READ: - case LOCK_USED_IN_RECLAIM_FS: - case LOCK_USED_IN_RECLAIM_FS_READ: - case LOCK_ENABLED_RECLAIM_FS: - case LOCK_ENABLED_RECLAIM_FS_READ: +#define LOCKDEP_STATE(__STATE) \ + case LOCK_USED_IN_##__STATE: \ + case LOCK_USED_IN_##__STATE##_READ: \ + case LOCK_ENABLED_##__STATE: \ + case LOCK_ENABLED_##__STATE##_READ: +#include "lockdep_states.h" +#undef LOCKDEP_STATE ret = mark_lock_irq(curr, this, new_bit); if (!ret) return 0; -- cgit v1.1 From 9851673bc32bc9fcafbbaeffc858ead434bd6d58 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 14:18:40 +0100 Subject: lockdep: move state bit definitions around For convenience later. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 49 ++++------------------------------------------ kernel/lockdep_internals.h | 46 +++++++++++++++++++++++++++++++++++++++++++ kernel/lockdep_states.h | 6 ++++++ 3 files changed, 56 insertions(+), 45 deletions(-) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 6d729c9..5a58ea3 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -20,51 +20,10 @@ struct lockdep_map; #include /* - * Lock-class usage-state bits: + * We'd rather not expose kernel/lockdep_states.h this wide, but we do need + * the total number of states... :-( */ -enum lock_usage_bit -{ - LOCK_USED = 0, - LOCK_USED_IN_HARDIRQ, - LOCK_USED_IN_SOFTIRQ, - LOCK_USED_IN_RECLAIM_FS, - LOCK_ENABLED_SOFTIRQ, - LOCK_ENABLED_HARDIRQ, - LOCK_ENABLED_RECLAIM_FS, - LOCK_USED_IN_HARDIRQ_READ, - LOCK_USED_IN_SOFTIRQ_READ, - LOCK_USED_IN_RECLAIM_FS_READ, - LOCK_ENABLED_SOFTIRQ_READ, - LOCK_ENABLED_HARDIRQ_READ, - LOCK_ENABLED_RECLAIM_FS_READ, - LOCK_USAGE_STATES -}; - -/* - * Usage-state bitmasks: - */ -#define LOCKF_USED (1 << LOCK_USED) -#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) -#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) -#define LOCKF_USED_IN_RECLAIM_FS (1 << LOCK_USED_IN_RECLAIM_FS) -#define LOCKF_ENABLED_HARDIRQ (1 << LOCK_ENABLED_HARDIRQ) -#define LOCKF_ENABLED_SOFTIRQ (1 << LOCK_ENABLED_SOFTIRQ) -#define LOCKF_ENABLED_RECLAIM_FS (1 << LOCK_ENABLED_RECLAIM_FS) - -#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) -#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) - -#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) -#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) -#define LOCKF_USED_IN_RECLAIM_FS_READ (1 << LOCK_USED_IN_RECLAIM_FS_READ) -#define LOCKF_ENABLED_HARDIRQ_READ (1 << LOCK_ENABLED_HARDIRQ_READ) -#define LOCKF_ENABLED_SOFTIRQ_READ (1 << LOCK_ENABLED_SOFTIRQ_READ) -#define LOCKF_ENABLED_RECLAIM_FS_READ (1 << LOCK_ENABLED_RECLAIM_FS_READ) - -#define LOCKF_ENABLED_IRQ_READ \ - (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) -#define LOCKF_USED_IN_IRQ_READ \ - (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) +#define XXX_LOCK_USAGE_STATES (1+3*4) #define MAX_LOCKDEP_SUBCLASSES 8UL @@ -105,7 +64,7 @@ struct lock_class { * IRQ/softirq usage tracking bits: */ unsigned long usage_mask; - struct stack_trace usage_traces[LOCK_USAGE_STATES]; + struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; /* * These fields represent a directed graph of lock dependencies, diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index e887b78..1352409 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h @@ -7,6 +7,52 @@ */ /* + * Lock-class usage-state bits: + */ +enum lock_usage_bit { + LOCK_USED = 0, + LOCK_USED_IN_HARDIRQ, + LOCK_USED_IN_SOFTIRQ, + LOCK_USED_IN_RECLAIM_FS, + LOCK_ENABLED_SOFTIRQ, + LOCK_ENABLED_HARDIRQ, + LOCK_ENABLED_RECLAIM_FS, + LOCK_USED_IN_HARDIRQ_READ, + LOCK_USED_IN_SOFTIRQ_READ, + LOCK_USED_IN_RECLAIM_FS_READ, + LOCK_ENABLED_SOFTIRQ_READ, + LOCK_ENABLED_HARDIRQ_READ, + LOCK_ENABLED_RECLAIM_FS_READ, + LOCK_USAGE_STATES +}; + +/* + * Usage-state bitmasks: + */ +#define LOCKF_USED (1 << LOCK_USED) +#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) +#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) +#define LOCKF_USED_IN_RECLAIM_FS (1 << LOCK_USED_IN_RECLAIM_FS) +#define LOCKF_ENABLED_HARDIRQ (1 << LOCK_ENABLED_HARDIRQ) +#define LOCKF_ENABLED_SOFTIRQ (1 << LOCK_ENABLED_SOFTIRQ) +#define LOCKF_ENABLED_RECLAIM_FS (1 << LOCK_ENABLED_RECLAIM_FS) + +#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) +#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) + +#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) +#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) +#define LOCKF_USED_IN_RECLAIM_FS_READ (1 << LOCK_USED_IN_RECLAIM_FS_READ) +#define LOCKF_ENABLED_HARDIRQ_READ (1 << LOCK_ENABLED_HARDIRQ_READ) +#define LOCKF_ENABLED_SOFTIRQ_READ (1 << LOCK_ENABLED_SOFTIRQ_READ) +#define LOCKF_ENABLED_RECLAIM_FS_READ (1 << LOCK_ENABLED_RECLAIM_FS_READ) + +#define LOCKF_ENABLED_IRQ_READ \ + (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) +#define LOCKF_USED_IN_IRQ_READ \ + (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) + +/* * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies * we track. * diff --git a/kernel/lockdep_states.h b/kernel/lockdep_states.h index 937039e..995b0cc 100644 --- a/kernel/lockdep_states.h +++ b/kernel/lockdep_states.h @@ -1,3 +1,9 @@ +/* + * Lockdep states, + * + * please update XXX_LOCK_USAGE_STATES in include/linux/lockdep.h whenever + * you add one, or come up with a nice dynamic solution. + */ LOCKDEP_STATE(HARDIRQ) LOCKDEP_STATE(SOFTIRQ) LOCKDEP_STATE(RECLAIM_FS) -- cgit v1.1 From d7b1b02134272840f4b655136e00c461e1cf1d53 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 14:38:38 +0100 Subject: lockdep: generate the state bit definitions Generate the state bit definitions from the lockdep_states.h file. Also, move LOCK_USED to last, so that the USED_IN USED_IN_READ ENABLED ENABLED_READ states are nicely bit aligned -- we're going to use that property Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep_internals.h | 47 ++++++++++++++++++++-------------------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 1352409..7e653e6 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h @@ -10,43 +10,36 @@ * Lock-class usage-state bits: */ enum lock_usage_bit { - LOCK_USED = 0, - LOCK_USED_IN_HARDIRQ, - LOCK_USED_IN_SOFTIRQ, - LOCK_USED_IN_RECLAIM_FS, - LOCK_ENABLED_SOFTIRQ, - LOCK_ENABLED_HARDIRQ, - LOCK_ENABLED_RECLAIM_FS, - LOCK_USED_IN_HARDIRQ_READ, - LOCK_USED_IN_SOFTIRQ_READ, - LOCK_USED_IN_RECLAIM_FS_READ, - LOCK_ENABLED_SOFTIRQ_READ, - LOCK_ENABLED_HARDIRQ_READ, - LOCK_ENABLED_RECLAIM_FS_READ, +#define LOCKDEP_STATE(__STATE) \ + LOCK_USED_IN_##__STATE, \ + LOCK_USED_IN_##__STATE##_READ, \ + LOCK_ENABLED_##__STATE, \ + LOCK_ENABLED_##__STATE##_READ, +#include "lockdep_states.h" +#undef LOCKDEP_STATE + LOCK_USED, LOCK_USAGE_STATES }; /* * Usage-state bitmasks: */ -#define LOCKF_USED (1 << LOCK_USED) -#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) -#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) -#define LOCKF_USED_IN_RECLAIM_FS (1 << LOCK_USED_IN_RECLAIM_FS) -#define LOCKF_ENABLED_HARDIRQ (1 << LOCK_ENABLED_HARDIRQ) -#define LOCKF_ENABLED_SOFTIRQ (1 << LOCK_ENABLED_SOFTIRQ) -#define LOCKF_ENABLED_RECLAIM_FS (1 << LOCK_ENABLED_RECLAIM_FS) +#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE), + +enum { +#define LOCKDEP_STATE(__STATE) \ + __LOCKF(USED_IN_##__STATE) \ + __LOCKF(USED_IN_##__STATE##_READ) \ + __LOCKF(ENABLED_##__STATE) \ + __LOCKF(ENABLED_##__STATE##_READ) +#include "lockdep_states.h" +#undef LOCKDEP_STATE + __LOCKF(USED) +}; #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) -#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) -#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) -#define LOCKF_USED_IN_RECLAIM_FS_READ (1 << LOCK_USED_IN_RECLAIM_FS_READ) -#define LOCKF_ENABLED_HARDIRQ_READ (1 << LOCK_ENABLED_HARDIRQ_READ) -#define LOCKF_ENABLED_SOFTIRQ_READ (1 << LOCK_ENABLED_SOFTIRQ_READ) -#define LOCKF_ENABLED_RECLAIM_FS_READ (1 << LOCK_ENABLED_RECLAIM_FS_READ) - #define LOCKF_ENABLED_IRQ_READ \ (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) #define LOCKF_USED_IN_IRQ_READ \ -- cgit v1.1 From fabe9c42c6328de314d811887b4752eb3d202291 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 14:51:01 +0100 Subject: lockdep: generate usage strings generate the usage strings XXX capital invasion :-( Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e68bd7d..d31f7f8 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -445,21 +445,21 @@ atomic_t nr_find_usage_backwards_recursions; * Locking printouts: */ +#define __STR(foo) #foo +#define STR(foo) __STR(foo) + +#define __USAGE(__STATE) \ + [LOCK_USED_IN_##__STATE] = "IN-"STR(__STATE)"-W", \ + [LOCK_ENABLED_##__STATE] = STR(__STATE)"-ON-W", \ + [LOCK_USED_IN_##__STATE##_READ] = "IN-"STR(__STATE)"-R", \ + [LOCK_ENABLED_##__STATE##_READ] = STR(__STATE)"-ON-R", + static const char *usage_str[] = { - [LOCK_USED] = "initial-use ", - [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W", - [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W", - [LOCK_ENABLED_SOFTIRQ] = "softirq-on-W", - [LOCK_ENABLED_HARDIRQ] = "hardirq-on-W", - [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R", - [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R", - [LOCK_ENABLED_SOFTIRQ_READ] = "softirq-on-R", - [LOCK_ENABLED_HARDIRQ_READ] = "hardirq-on-R", - [LOCK_USED_IN_RECLAIM_FS] = "in-reclaim-W", - [LOCK_USED_IN_RECLAIM_FS_READ] = "in-reclaim-R", - [LOCK_ENABLED_RECLAIM_FS] = "ov-reclaim-W", - [LOCK_ENABLED_RECLAIM_FS_READ] = "ov-reclaim-R", +#define LOCKDEP_STATE(__STATE) __USAGE(__STATE) +#include "lockdep_states.h" +#undef LOCKDEP_STATE + [LOCK_USED] = "INITIAL USE", }; const char * __get_key_name(struct lockdep_subclass_key *key, char *str) -- cgit v1.1 From 6a6904d3475cc5e4a506b5c3c3684444e22c4cc4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 16:07:44 +0100 Subject: lockdep: split up mark_lock_irq() split mark_lock_irq() into 4 simple helper functions Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 372 ++++++++++++++++++++++--------------------------------- 1 file changed, 147 insertions(+), 225 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index d31f7f8..0b863c8 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2001,6 +2001,109 @@ static int reclaim_verbose(struct lock_class *class) #define STRICT_READ_CHECKS 1 +static int +mark_lock_irq_used_in(struct task_struct *curr, struct held_lock *this, + int new_bit, int excl_bit, + const char *name, const char *rname, + int (*verbose)(struct lock_class *class)) +{ + if (!valid_state(curr, this, new_bit, excl_bit)) + return 0; + if (!valid_state(curr, this, new_bit, excl_bit + 1)) + return 0; + /* + * just marked it hardirq-safe, check that this lock + * took no hardirq-unsafe lock in the past: + */ + if (!check_usage_forwards(curr, this, excl_bit, name)) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it hardirq-safe, check that this lock + * took no hardirq-unsafe-read lock in the past: + */ + if (!check_usage_forwards(curr, this, excl_bit + 1, rname)) + return 0; +#endif + if (verbose(hlock_class(this))) + return 2; + + return 1; +} + +static int +mark_lock_irq_used_in_read(struct task_struct *curr, struct held_lock *this, + int new_bit, int excl_bit, + const char *name, const char *rname, + int (*verbose)(struct lock_class *class)) +{ + if (!valid_state(curr, this, new_bit, excl_bit)) + return 0; + /* + * just marked it hardirq-read-safe, check that this lock + * took no hardirq-unsafe lock in the past: + */ + if (!check_usage_forwards(curr, this, excl_bit, name)) + return 0; + if (verbose(hlock_class(this))) + return 2; + + return 1; +} + +static int +mark_lock_irq_enabled(struct task_struct *curr, struct held_lock *this, + int new_bit, int excl_bit, + const char *name, const char *rname, + int (*verbose)(struct lock_class *class)) +{ + if (!valid_state(curr, this, new_bit, excl_bit)) + return 0; + if (!valid_state(curr, this, new_bit, excl_bit + 1)) + return 0; + /* + * just marked it hardirq-unsafe, check that no hardirq-safe + * lock in the system ever took it in the past: + */ + if (!check_usage_backwards(curr, this, excl_bit, name)) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it hardirq-unsafe, check that no + * hardirq-safe-read lock in the system ever took + * it in the past: + */ + if (!check_usage_backwards(curr, this, excl_bit + 1, rname)) + return 0; +#endif + if (verbose(hlock_class(this))) + return 2; + + return 1; +} + +static int +mark_lock_irq_enabled_read(struct task_struct *curr, struct held_lock *this, + int new_bit, int excl_bit, + const char *name, const char *rname, + int (*verbose)(struct lock_class *class)) +{ + if (!valid_state(curr, this, new_bit, excl_bit)) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it hardirq-read-unsafe, check that no + * hardirq-safe lock in the system ever took it in the past: + */ + if (!check_usage_backwards(curr, this, excl_bit, name)) + return 0; +#endif + if (verbose(hlock_class(this))) + return 2; + + return 1; +} + static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit) { @@ -2008,242 +2111,61 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, switch(new_bit) { case LOCK_USED_IN_HARDIRQ: - if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQ)) - return 0; - if (!valid_state(curr, this, new_bit, - LOCK_ENABLED_HARDIRQ_READ)) - return 0; - /* - * just marked it hardirq-safe, check that this lock - * took no hardirq-unsafe lock in the past: - */ - if (!check_usage_forwards(curr, this, - LOCK_ENABLED_HARDIRQ, "hard")) - return 0; -#if STRICT_READ_CHECKS - /* - * just marked it hardirq-safe, check that this lock - * took no hardirq-unsafe-read lock in the past: - */ - if (!check_usage_forwards(curr, this, - LOCK_ENABLED_HARDIRQ_READ, "hard-read")) - return 0; -#endif - if (hardirq_verbose(hlock_class(this))) - ret = 2; - break; + return mark_lock_irq_used_in(curr, this, new_bit, + LOCK_ENABLED_HARDIRQ, + "hard", "hard-read", hardirq_verbose); case LOCK_USED_IN_SOFTIRQ: - if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQ)) - return 0; - if (!valid_state(curr, this, new_bit, - LOCK_ENABLED_SOFTIRQ_READ)) - return 0; - /* - * just marked it softirq-safe, check that this lock - * took no softirq-unsafe lock in the past: - */ - if (!check_usage_forwards(curr, this, - LOCK_ENABLED_SOFTIRQ, "soft")) - return 0; -#if STRICT_READ_CHECKS - /* - * just marked it softirq-safe, check that this lock - * took no softirq-unsafe-read lock in the past: - */ - if (!check_usage_forwards(curr, this, - LOCK_ENABLED_SOFTIRQ_READ, "soft-read")) - return 0; -#endif - if (softirq_verbose(hlock_class(this))) - ret = 2; - break; + return mark_lock_irq_used_in(curr, this, new_bit, + LOCK_ENABLED_SOFTIRQ, + "soft", "soft-read", softirq_verbose); case LOCK_USED_IN_RECLAIM_FS: - if (!valid_state(curr, this, new_bit, LOCK_ENABLED_RECLAIM_FS)) - return 0; - if (!valid_state(curr, this, new_bit, - LOCK_ENABLED_RECLAIM_FS_READ)) - return 0; - /* - * just marked it reclaim-fs-safe, check that this lock - * took no reclaim-fs-unsafe lock in the past: - */ - if (!check_usage_forwards(curr, this, - LOCK_ENABLED_RECLAIM_FS, "reclaim-fs")) - return 0; -#if STRICT_READ_CHECKS - /* - * just marked it reclaim-fs-safe, check that this lock - * took no reclaim-fs-unsafe-read lock in the past: - */ - if (!check_usage_forwards(curr, this, - LOCK_ENABLED_RECLAIM_FS_READ, "reclaim-fs-read")) - return 0; -#endif - if (reclaim_verbose(hlock_class(this))) - ret = 2; - break; + return mark_lock_irq_used_in(curr, this, new_bit, + LOCK_ENABLED_RECLAIM_FS, + "reclaim-fs", "reclaim-fs-read", + reclaim_verbose); + case LOCK_USED_IN_HARDIRQ_READ: - if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQ)) - return 0; - /* - * just marked it hardirq-read-safe, check that this lock - * took no hardirq-unsafe lock in the past: - */ - if (!check_usage_forwards(curr, this, - LOCK_ENABLED_HARDIRQ, "hard")) - return 0; - if (hardirq_verbose(hlock_class(this))) - ret = 2; - break; + return mark_lock_irq_used_in_read(curr, this, new_bit, + LOCK_ENABLED_HARDIRQ, + "hard", "hard-read", hardirq_verbose); case LOCK_USED_IN_SOFTIRQ_READ: - if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQ)) - return 0; - /* - * just marked it softirq-read-safe, check that this lock - * took no softirq-unsafe lock in the past: - */ - if (!check_usage_forwards(curr, this, - LOCK_ENABLED_SOFTIRQ, "soft")) - return 0; - if (softirq_verbose(hlock_class(this))) - ret = 2; - break; + return mark_lock_irq_used_in_read(curr, this, new_bit, + LOCK_ENABLED_SOFTIRQ, + "soft", "soft-read", softirq_verbose); case LOCK_USED_IN_RECLAIM_FS_READ: - if (!valid_state(curr, this, new_bit, LOCK_ENABLED_RECLAIM_FS)) - return 0; - /* - * just marked it reclaim-fs-read-safe, check that this lock - * took no reclaim-fs-unsafe lock in the past: - */ - if (!check_usage_forwards(curr, this, - LOCK_ENABLED_RECLAIM_FS, "reclaim-fs")) - return 0; - if (reclaim_verbose(hlock_class(this))) - ret = 2; - break; + return mark_lock_irq_used_in_read(curr, this, new_bit, + LOCK_ENABLED_RECLAIM_FS, + "reclaim-fs", "reclaim-fs-read", + reclaim_verbose); + case LOCK_ENABLED_HARDIRQ: - if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) - return 0; - if (!valid_state(curr, this, new_bit, - LOCK_USED_IN_HARDIRQ_READ)) - return 0; - /* - * just marked it hardirq-unsafe, check that no hardirq-safe - * lock in the system ever took it in the past: - */ - if (!check_usage_backwards(curr, this, - LOCK_USED_IN_HARDIRQ, "hard")) - return 0; -#if STRICT_READ_CHECKS - /* - * just marked it hardirq-unsafe, check that no - * hardirq-safe-read lock in the system ever took - * it in the past: - */ - if (!check_usage_backwards(curr, this, - LOCK_USED_IN_HARDIRQ_READ, "hard-read")) - return 0; -#endif - if (hardirq_verbose(hlock_class(this))) - ret = 2; - break; + return mark_lock_irq_enabled(curr, this, new_bit, + LOCK_USED_IN_HARDIRQ, + "hard", "hard-read", hardirq_verbose); case LOCK_ENABLED_SOFTIRQ: - if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) - return 0; - if (!valid_state(curr, this, new_bit, - LOCK_USED_IN_SOFTIRQ_READ)) - return 0; - /* - * just marked it softirq-unsafe, check that no softirq-safe - * lock in the system ever took it in the past: - */ - if (!check_usage_backwards(curr, this, - LOCK_USED_IN_SOFTIRQ, "soft")) - return 0; -#if STRICT_READ_CHECKS - /* - * just marked it softirq-unsafe, check that no - * softirq-safe-read lock in the system ever took - * it in the past: - */ - if (!check_usage_backwards(curr, this, - LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) - return 0; -#endif - if (softirq_verbose(hlock_class(this))) - ret = 2; - break; + return mark_lock_irq_enabled(curr, this, new_bit, + LOCK_USED_IN_SOFTIRQ, + "soft", "soft-read", softirq_verbose); case LOCK_ENABLED_RECLAIM_FS: - if (!valid_state(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS)) - return 0; - if (!valid_state(curr, this, new_bit, - LOCK_USED_IN_RECLAIM_FS_READ)) - return 0; - /* - * just marked it reclaim-fs-unsafe, check that no reclaim-fs-safe - * lock in the system ever took it in the past: - */ - if (!check_usage_backwards(curr, this, - LOCK_USED_IN_RECLAIM_FS, "reclaim-fs")) - return 0; -#if STRICT_READ_CHECKS - /* - * just marked it softirq-unsafe, check that no - * softirq-safe-read lock in the system ever took - * it in the past: - */ - if (!check_usage_backwards(curr, this, - LOCK_USED_IN_RECLAIM_FS_READ, "reclaim-fs-read")) - return 0; -#endif - if (reclaim_verbose(hlock_class(this))) - ret = 2; - break; + return mark_lock_irq_enabled(curr, this, new_bit, + LOCK_USED_IN_RECLAIM_FS, + "reclaim-fs", "reclaim-fs-read", + reclaim_verbose); + case LOCK_ENABLED_HARDIRQ_READ: - if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) - return 0; -#if STRICT_READ_CHECKS - /* - * just marked it hardirq-read-unsafe, check that no - * hardirq-safe lock in the system ever took it in the past: - */ - if (!check_usage_backwards(curr, this, - LOCK_USED_IN_HARDIRQ, "hard")) - return 0; -#endif - if (hardirq_verbose(hlock_class(this))) - ret = 2; - break; + return mark_lock_irq_enabled_read(curr, this, new_bit, + LOCK_USED_IN_HARDIRQ, + "hard", "hard-read", hardirq_verbose); case LOCK_ENABLED_SOFTIRQ_READ: - if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) - return 0; -#if STRICT_READ_CHECKS - /* - * just marked it softirq-read-unsafe, check that no - * softirq-safe lock in the system ever took it in the past: - */ - if (!check_usage_backwards(curr, this, - LOCK_USED_IN_SOFTIRQ, "soft")) - return 0; -#endif - if (softirq_verbose(hlock_class(this))) - ret = 2; - break; + return mark_lock_irq_enabled_read(curr, this, new_bit, + LOCK_USED_IN_SOFTIRQ, + "soft", "soft-read", softirq_verbose); case LOCK_ENABLED_RECLAIM_FS_READ: - if (!valid_state(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS)) - return 0; -#if STRICT_READ_CHECKS - /* - * just marked it reclaim-fs-read-unsafe, check that no - * reclaim-fs-safe lock in the system ever took it in the past: - */ - if (!check_usage_backwards(curr, this, - LOCK_USED_IN_RECLAIM_FS, "reclaim-fs")) - return 0; -#endif - if (reclaim_verbose(hlock_class(this))) - ret = 2; - break; + return mark_lock_irq_enabled_read(curr, this, new_bit, + LOCK_USED_IN_RECLAIM_FS, + "reclaim-fs", "reclaim-fs-read", + reclaim_verbose); + default: WARN_ON(1); break; -- cgit v1.1 From 604de3b5b63ebc33a762c44d9c742f235b010346 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 16:24:44 +0100 Subject: lockdep: simplify the mark_lock_irq() helpers In order to unify them, take some arguments away Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 60 +++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 16 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 0b863c8..989a60b 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2001,12 +2001,38 @@ static int reclaim_verbose(struct lock_class *class) #define STRICT_READ_CHECKS 1 +static const char *state_names[] = { +#define LOCKDEP_STATE(__STATE) \ + STR(__STATE), +#include "lockdep_states.h" +#undef LOCKDEP_STATE +}; + +static inline const char *state_name(enum lock_usage_bit bit) +{ + return state_names[bit >> 2]; +} + +static const char *state_rnames[] = { +#define LOCKDEP_STATE(__STATE) \ + STR(__STATE)"-READ", +#include "lockdep_states.h" +#undef LOCKDEP_STATE +}; + +static inline const char *state_rname(enum lock_usage_bit bit) +{ + return state_rnames[bit >> 2]; +} + static int mark_lock_irq_used_in(struct task_struct *curr, struct held_lock *this, int new_bit, int excl_bit, - const char *name, const char *rname, int (*verbose)(struct lock_class *class)) { + const char *name = state_name(new_bit); + const char *rname = state_rname(new_bit); + if (!valid_state(curr, this, new_bit, excl_bit)) return 0; if (!valid_state(curr, this, new_bit, excl_bit + 1)) @@ -2034,9 +2060,11 @@ mark_lock_irq_used_in(struct task_struct *curr, struct held_lock *this, static int mark_lock_irq_used_in_read(struct task_struct *curr, struct held_lock *this, int new_bit, int excl_bit, - const char *name, const char *rname, int (*verbose)(struct lock_class *class)) { + const char *name = state_name(new_bit); + const char *rname = state_rname(new_bit); + if (!valid_state(curr, this, new_bit, excl_bit)) return 0; /* @@ -2054,9 +2082,11 @@ mark_lock_irq_used_in_read(struct task_struct *curr, struct held_lock *this, static int mark_lock_irq_enabled(struct task_struct *curr, struct held_lock *this, int new_bit, int excl_bit, - const char *name, const char *rname, int (*verbose)(struct lock_class *class)) { + const char *name = state_name(new_bit); + const char *rname = state_rname(new_bit); + if (!valid_state(curr, this, new_bit, excl_bit)) return 0; if (!valid_state(curr, this, new_bit, excl_bit + 1)) @@ -2085,9 +2115,11 @@ mark_lock_irq_enabled(struct task_struct *curr, struct held_lock *this, static int mark_lock_irq_enabled_read(struct task_struct *curr, struct held_lock *this, int new_bit, int excl_bit, - const char *name, const char *rname, int (*verbose)(struct lock_class *class)) { + const char *name = state_name(new_bit); + const char *rname = state_rname(new_bit); + if (!valid_state(curr, this, new_bit, excl_bit)) return 0; #if STRICT_READ_CHECKS @@ -2113,57 +2145,53 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, case LOCK_USED_IN_HARDIRQ: return mark_lock_irq_used_in(curr, this, new_bit, LOCK_ENABLED_HARDIRQ, - "hard", "hard-read", hardirq_verbose); + hardirq_verbose); case LOCK_USED_IN_SOFTIRQ: return mark_lock_irq_used_in(curr, this, new_bit, LOCK_ENABLED_SOFTIRQ, - "soft", "soft-read", softirq_verbose); + softirq_verbose); case LOCK_USED_IN_RECLAIM_FS: return mark_lock_irq_used_in(curr, this, new_bit, LOCK_ENABLED_RECLAIM_FS, - "reclaim-fs", "reclaim-fs-read", reclaim_verbose); case LOCK_USED_IN_HARDIRQ_READ: return mark_lock_irq_used_in_read(curr, this, new_bit, LOCK_ENABLED_HARDIRQ, - "hard", "hard-read", hardirq_verbose); + hardirq_verbose); case LOCK_USED_IN_SOFTIRQ_READ: return mark_lock_irq_used_in_read(curr, this, new_bit, LOCK_ENABLED_SOFTIRQ, - "soft", "soft-read", softirq_verbose); + softirq_verbose); case LOCK_USED_IN_RECLAIM_FS_READ: return mark_lock_irq_used_in_read(curr, this, new_bit, LOCK_ENABLED_RECLAIM_FS, - "reclaim-fs", "reclaim-fs-read", reclaim_verbose); case LOCK_ENABLED_HARDIRQ: return mark_lock_irq_enabled(curr, this, new_bit, LOCK_USED_IN_HARDIRQ, - "hard", "hard-read", hardirq_verbose); + hardirq_verbose); case LOCK_ENABLED_SOFTIRQ: return mark_lock_irq_enabled(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ, - "soft", "soft-read", softirq_verbose); + softirq_verbose); case LOCK_ENABLED_RECLAIM_FS: return mark_lock_irq_enabled(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS, - "reclaim-fs", "reclaim-fs-read", reclaim_verbose); case LOCK_ENABLED_HARDIRQ_READ: return mark_lock_irq_enabled_read(curr, this, new_bit, LOCK_USED_IN_HARDIRQ, - "hard", "hard-read", hardirq_verbose); + hardirq_verbose); case LOCK_ENABLED_SOFTIRQ_READ: return mark_lock_irq_enabled_read(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ, - "soft", "soft-read", softirq_verbose); + softirq_verbose); case LOCK_ENABLED_RECLAIM_FS_READ: return mark_lock_irq_enabled_read(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS, - "reclaim-fs", "reclaim-fs-read", reclaim_verbose); default: -- cgit v1.1 From f989209e2f604730888a6daa3b3ff30ed0c9d7c0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 16:09:59 +0100 Subject: lockdep: further simplify mark_lock_irq() helpers take away another parameter Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 47 +++++++++++++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 16 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 989a60b..306d0b8 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2025,14 +2025,35 @@ static inline const char *state_rname(enum lock_usage_bit bit) return state_rnames[bit >> 2]; } +static int exclusive_bit(int new_bit) +{ + /* + * USED_IN + * USED_IN_READ + * ENABLED + * ENABLED_READ + * + * bit 0 - write/read + * bit 1 - used_in/enabled + * bit 2+ state + */ + + int state = new_bit & ~3; + int dir = new_bit & 2; + + return state | (dir ^ 2); +} + static int mark_lock_irq_used_in(struct task_struct *curr, struct held_lock *this, - int new_bit, int excl_bit, + int new_bit, int (*verbose)(struct lock_class *class)) { const char *name = state_name(new_bit); const char *rname = state_rname(new_bit); + int excl_bit = exclusive_bit(new_bit); + if (!valid_state(curr, this, new_bit, excl_bit)) return 0; if (!valid_state(curr, this, new_bit, excl_bit + 1)) @@ -2059,12 +2080,14 @@ mark_lock_irq_used_in(struct task_struct *curr, struct held_lock *this, static int mark_lock_irq_used_in_read(struct task_struct *curr, struct held_lock *this, - int new_bit, int excl_bit, + int new_bit, int (*verbose)(struct lock_class *class)) { const char *name = state_name(new_bit); const char *rname = state_rname(new_bit); + int excl_bit = exclusive_bit(new_bit); + if (!valid_state(curr, this, new_bit, excl_bit)) return 0; /* @@ -2081,12 +2104,14 @@ mark_lock_irq_used_in_read(struct task_struct *curr, struct held_lock *this, static int mark_lock_irq_enabled(struct task_struct *curr, struct held_lock *this, - int new_bit, int excl_bit, + int new_bit, int (*verbose)(struct lock_class *class)) { const char *name = state_name(new_bit); const char *rname = state_rname(new_bit); + int excl_bit = exclusive_bit(new_bit); + if (!valid_state(curr, this, new_bit, excl_bit)) return 0; if (!valid_state(curr, this, new_bit, excl_bit + 1)) @@ -2114,12 +2139,14 @@ mark_lock_irq_enabled(struct task_struct *curr, struct held_lock *this, static int mark_lock_irq_enabled_read(struct task_struct *curr, struct held_lock *this, - int new_bit, int excl_bit, + int new_bit, int (*verbose)(struct lock_class *class)) { const char *name = state_name(new_bit); const char *rname = state_rname(new_bit); + int excl_bit = exclusive_bit(new_bit); + if (!valid_state(curr, this, new_bit, excl_bit)) return 0; #if STRICT_READ_CHECKS @@ -2144,54 +2171,42 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, switch(new_bit) { case LOCK_USED_IN_HARDIRQ: return mark_lock_irq_used_in(curr, this, new_bit, - LOCK_ENABLED_HARDIRQ, hardirq_verbose); case LOCK_USED_IN_SOFTIRQ: return mark_lock_irq_used_in(curr, this, new_bit, - LOCK_ENABLED_SOFTIRQ, softirq_verbose); case LOCK_USED_IN_RECLAIM_FS: return mark_lock_irq_used_in(curr, this, new_bit, - LOCK_ENABLED_RECLAIM_FS, reclaim_verbose); case LOCK_USED_IN_HARDIRQ_READ: return mark_lock_irq_used_in_read(curr, this, new_bit, - LOCK_ENABLED_HARDIRQ, hardirq_verbose); case LOCK_USED_IN_SOFTIRQ_READ: return mark_lock_irq_used_in_read(curr, this, new_bit, - LOCK_ENABLED_SOFTIRQ, softirq_verbose); case LOCK_USED_IN_RECLAIM_FS_READ: return mark_lock_irq_used_in_read(curr, this, new_bit, - LOCK_ENABLED_RECLAIM_FS, reclaim_verbose); case LOCK_ENABLED_HARDIRQ: return mark_lock_irq_enabled(curr, this, new_bit, - LOCK_USED_IN_HARDIRQ, hardirq_verbose); case LOCK_ENABLED_SOFTIRQ: return mark_lock_irq_enabled(curr, this, new_bit, - LOCK_USED_IN_SOFTIRQ, softirq_verbose); case LOCK_ENABLED_RECLAIM_FS: return mark_lock_irq_enabled(curr, this, new_bit, - LOCK_USED_IN_RECLAIM_FS, reclaim_verbose); case LOCK_ENABLED_HARDIRQ_READ: return mark_lock_irq_enabled_read(curr, this, new_bit, - LOCK_USED_IN_HARDIRQ, hardirq_verbose); case LOCK_ENABLED_SOFTIRQ_READ: return mark_lock_irq_enabled_read(curr, this, new_bit, - LOCK_USED_IN_SOFTIRQ, softirq_verbose); case LOCK_ENABLED_RECLAIM_FS_READ: return mark_lock_irq_enabled_read(curr, this, new_bit, - LOCK_USED_IN_RECLAIM_FS, reclaim_verbose); default: -- cgit v1.1 From cd95302d255264c5e6ebe1063320d80517bf2f83 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 16:38:21 +0100 Subject: lockdep: simplify mark_lock_irq() helpers #3 Kill another argument Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 65 +++++++++++++++++++++++--------------------------------- 1 file changed, 27 insertions(+), 38 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 306d0b8..e0a027d 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -1975,7 +1975,7 @@ void print_irqtrace_events(struct task_struct *curr) print_ip_sym(curr->softirq_disable_ip); } -static int hardirq_verbose(struct lock_class *class) +static int HARDIRQ_verbose(struct lock_class *class) { #if HARDIRQ_VERBOSE return class_filter(class); @@ -1983,7 +1983,7 @@ static int hardirq_verbose(struct lock_class *class) return 0; } -static int softirq_verbose(struct lock_class *class) +static int SOFTIRQ_verbose(struct lock_class *class) { #if SOFTIRQ_VERBOSE return class_filter(class); @@ -1991,7 +1991,7 @@ static int softirq_verbose(struct lock_class *class) return 0; } -static int reclaim_verbose(struct lock_class *class) +static int RECLAIM_FS_verbose(struct lock_class *class) { #if RECLAIM_VERBOSE return class_filter(class); @@ -2025,6 +2025,19 @@ static inline const char *state_rname(enum lock_usage_bit bit) return state_rnames[bit >> 2]; } +static int (*state_verbose_f[])(struct lock_class *class) = { +#define LOCKDEP_STATE(__STATE) \ + __STATE##_verbose, +#include "lockdep_states.h" +#undef LOCKDEP_STATE +}; + +static inline int state_verbose(enum lock_usage_bit bit, + struct lock_class *class) +{ + return state_verbose_f[bit >> 2](class); +} + static int exclusive_bit(int new_bit) { /* @@ -2046,8 +2059,7 @@ static int exclusive_bit(int new_bit) static int mark_lock_irq_used_in(struct task_struct *curr, struct held_lock *this, - int new_bit, - int (*verbose)(struct lock_class *class)) + int new_bit) { const char *name = state_name(new_bit); const char *rname = state_rname(new_bit); @@ -2072,7 +2084,7 @@ mark_lock_irq_used_in(struct task_struct *curr, struct held_lock *this, if (!check_usage_forwards(curr, this, excl_bit + 1, rname)) return 0; #endif - if (verbose(hlock_class(this))) + if (state_verbose(new_bit, hlock_class(this))) return 2; return 1; @@ -2080,8 +2092,7 @@ mark_lock_irq_used_in(struct task_struct *curr, struct held_lock *this, static int mark_lock_irq_used_in_read(struct task_struct *curr, struct held_lock *this, - int new_bit, - int (*verbose)(struct lock_class *class)) + int new_bit) { const char *name = state_name(new_bit); const char *rname = state_rname(new_bit); @@ -2096,7 +2107,7 @@ mark_lock_irq_used_in_read(struct task_struct *curr, struct held_lock *this, */ if (!check_usage_forwards(curr, this, excl_bit, name)) return 0; - if (verbose(hlock_class(this))) + if (state_verbose(new_bit, hlock_class(this))) return 2; return 1; @@ -2104,8 +2115,7 @@ mark_lock_irq_used_in_read(struct task_struct *curr, struct held_lock *this, static int mark_lock_irq_enabled(struct task_struct *curr, struct held_lock *this, - int new_bit, - int (*verbose)(struct lock_class *class)) + int new_bit) { const char *name = state_name(new_bit); const char *rname = state_rname(new_bit); @@ -2131,7 +2141,7 @@ mark_lock_irq_enabled(struct task_struct *curr, struct held_lock *this, if (!check_usage_backwards(curr, this, excl_bit + 1, rname)) return 0; #endif - if (verbose(hlock_class(this))) + if (state_verbose(new_bit, hlock_class(this))) return 2; return 1; @@ -2139,8 +2149,7 @@ mark_lock_irq_enabled(struct task_struct *curr, struct held_lock *this, static int mark_lock_irq_enabled_read(struct task_struct *curr, struct held_lock *this, - int new_bit, - int (*verbose)(struct lock_class *class)) + int new_bit) { const char *name = state_name(new_bit); const char *rname = state_rname(new_bit); @@ -2170,44 +2179,24 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, switch(new_bit) { case LOCK_USED_IN_HARDIRQ: - return mark_lock_irq_used_in(curr, this, new_bit, - hardirq_verbose); case LOCK_USED_IN_SOFTIRQ: - return mark_lock_irq_used_in(curr, this, new_bit, - softirq_verbose); case LOCK_USED_IN_RECLAIM_FS: - return mark_lock_irq_used_in(curr, this, new_bit, - reclaim_verbose); + return mark_lock_irq_used_in(curr, this, new_bit); case LOCK_USED_IN_HARDIRQ_READ: - return mark_lock_irq_used_in_read(curr, this, new_bit, - hardirq_verbose); case LOCK_USED_IN_SOFTIRQ_READ: - return mark_lock_irq_used_in_read(curr, this, new_bit, - softirq_verbose); case LOCK_USED_IN_RECLAIM_FS_READ: - return mark_lock_irq_used_in_read(curr, this, new_bit, - reclaim_verbose); + return mark_lock_irq_used_in_read(curr, this, new_bit); case LOCK_ENABLED_HARDIRQ: - return mark_lock_irq_enabled(curr, this, new_bit, - hardirq_verbose); case LOCK_ENABLED_SOFTIRQ: - return mark_lock_irq_enabled(curr, this, new_bit, - softirq_verbose); case LOCK_ENABLED_RECLAIM_FS: - return mark_lock_irq_enabled(curr, this, new_bit, - reclaim_verbose); + return mark_lock_irq_enabled(curr, this, new_bit); case LOCK_ENABLED_HARDIRQ_READ: - return mark_lock_irq_enabled_read(curr, this, new_bit, - hardirq_verbose); case LOCK_ENABLED_SOFTIRQ_READ: - return mark_lock_irq_enabled_read(curr, this, new_bit, - softirq_verbose); case LOCK_ENABLED_RECLAIM_FS_READ: - return mark_lock_irq_enabled_read(curr, this, new_bit, - reclaim_verbose); + return mark_lock_irq_enabled_read(curr, this, new_bit); default: WARN_ON(1); -- cgit v1.1 From 780e820b2dfefdfead9243724c2d2b73f379fda6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 16:51:29 +0100 Subject: lockdep: merge the _READ mark_lock_irq() helpers The _READ helpers show remarkable similarity, merge them. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 61 +++++++++++++++++++++----------------------------------- 1 file changed, 23 insertions(+), 38 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e0a027d..2d95f9d 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2091,22 +2091,34 @@ mark_lock_irq_used_in(struct task_struct *curr, struct held_lock *this, } static int -mark_lock_irq_used_in_read(struct task_struct *curr, struct held_lock *this, +mark_lock_irq_read(struct task_struct *curr, struct held_lock *this, int new_bit) { const char *name = state_name(new_bit); const char *rname = state_rname(new_bit); int excl_bit = exclusive_bit(new_bit); + int dir = new_bit & 2; if (!valid_state(curr, this, new_bit, excl_bit)) return 0; - /* - * just marked it hardirq-read-safe, check that this lock - * took no hardirq-unsafe lock in the past: - */ - if (!check_usage_forwards(curr, this, excl_bit, name)) - return 0; + + if (!dir) { + /* + * just marked it hardirq-read-safe, check that this lock + * took no hardirq-unsafe lock in the past: + */ + if (!check_usage_forwards(curr, this, excl_bit, name)) + return 0; + } else if (STRICT_READ_CHECKS) { + /* + * just marked it hardirq-read-unsafe, check that no + * hardirq-safe lock in the system ever took it in the past: + */ + if (!check_usage_backwards(curr, this, excl_bit, name)) + return 0; + } + if (state_verbose(new_bit, hlock_class(this))) return 2; @@ -2147,31 +2159,6 @@ mark_lock_irq_enabled(struct task_struct *curr, struct held_lock *this, return 1; } -static int -mark_lock_irq_enabled_read(struct task_struct *curr, struct held_lock *this, - int new_bit) -{ - const char *name = state_name(new_bit); - const char *rname = state_rname(new_bit); - - int excl_bit = exclusive_bit(new_bit); - - if (!valid_state(curr, this, new_bit, excl_bit)) - return 0; -#if STRICT_READ_CHECKS - /* - * just marked it hardirq-read-unsafe, check that no - * hardirq-safe lock in the system ever took it in the past: - */ - if (!check_usage_backwards(curr, this, excl_bit, name)) - return 0; -#endif - if (verbose(hlock_class(this))) - return 2; - - return 1; -} - static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit) { @@ -2186,18 +2173,16 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, case LOCK_USED_IN_HARDIRQ_READ: case LOCK_USED_IN_SOFTIRQ_READ: case LOCK_USED_IN_RECLAIM_FS_READ: - return mark_lock_irq_used_in_read(curr, this, new_bit); + case LOCK_ENABLED_HARDIRQ_READ: + case LOCK_ENABLED_SOFTIRQ_READ: + case LOCK_ENABLED_RECLAIM_FS_READ: + return mark_lock_irq_read(curr, this, new_bit); case LOCK_ENABLED_HARDIRQ: case LOCK_ENABLED_SOFTIRQ: case LOCK_ENABLED_RECLAIM_FS: return mark_lock_irq_enabled(curr, this, new_bit); - case LOCK_ENABLED_HARDIRQ_READ: - case LOCK_ENABLED_SOFTIRQ_READ: - case LOCK_ENABLED_RECLAIM_FS_READ: - return mark_lock_irq_enabled_read(curr, this, new_bit); - default: WARN_ON(1); break; -- cgit v1.1 From 42c50d544e009cd9b1a8e74d7c5ce8d03ca917ad Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 16:58:16 +0100 Subject: lockdep: merge the !_READ mark_lock_irq() helpers These two are also remakably similar Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 58 +++++++++++++++----------------------------------------- 1 file changed, 15 insertions(+), 43 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 2d95f9d..1ba52e6 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2057,31 +2057,39 @@ static int exclusive_bit(int new_bit) return state | (dir ^ 2); } +typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, + enum lock_usage_bit bit, const char *name); + static int -mark_lock_irq_used_in(struct task_struct *curr, struct held_lock *this, +mark_lock_irq_write(struct task_struct *curr, struct held_lock *this, int new_bit) { const char *name = state_name(new_bit); const char *rname = state_rname(new_bit); int excl_bit = exclusive_bit(new_bit); + int dir = new_bit & 2; + + check_usage_f usage = dir ? + check_usage_backwards : check_usage_forwards; if (!valid_state(curr, this, new_bit, excl_bit)) return 0; if (!valid_state(curr, this, new_bit, excl_bit + 1)) return 0; + /* * just marked it hardirq-safe, check that this lock * took no hardirq-unsafe lock in the past: */ - if (!check_usage_forwards(curr, this, excl_bit, name)) + if (!usage(curr, this, excl_bit, name)) return 0; #if STRICT_READ_CHECKS /* * just marked it hardirq-safe, check that this lock * took no hardirq-unsafe-read lock in the past: */ - if (!check_usage_forwards(curr, this, excl_bit + 1, rname)) + if (!usage(curr, this, excl_bit + 1, rname)) return 0; #endif if (state_verbose(new_bit, hlock_class(this))) @@ -2125,40 +2133,6 @@ mark_lock_irq_read(struct task_struct *curr, struct held_lock *this, return 1; } -static int -mark_lock_irq_enabled(struct task_struct *curr, struct held_lock *this, - int new_bit) -{ - const char *name = state_name(new_bit); - const char *rname = state_rname(new_bit); - - int excl_bit = exclusive_bit(new_bit); - - if (!valid_state(curr, this, new_bit, excl_bit)) - return 0; - if (!valid_state(curr, this, new_bit, excl_bit + 1)) - return 0; - /* - * just marked it hardirq-unsafe, check that no hardirq-safe - * lock in the system ever took it in the past: - */ - if (!check_usage_backwards(curr, this, excl_bit, name)) - return 0; -#if STRICT_READ_CHECKS - /* - * just marked it hardirq-unsafe, check that no - * hardirq-safe-read lock in the system ever took - * it in the past: - */ - if (!check_usage_backwards(curr, this, excl_bit + 1, rname)) - return 0; -#endif - if (state_verbose(new_bit, hlock_class(this))) - return 2; - - return 1; -} - static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit) { @@ -2168,7 +2142,10 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, case LOCK_USED_IN_HARDIRQ: case LOCK_USED_IN_SOFTIRQ: case LOCK_USED_IN_RECLAIM_FS: - return mark_lock_irq_used_in(curr, this, new_bit); + case LOCK_ENABLED_HARDIRQ: + case LOCK_ENABLED_SOFTIRQ: + case LOCK_ENABLED_RECLAIM_FS: + return mark_lock_irq_write(curr, this, new_bit); case LOCK_USED_IN_HARDIRQ_READ: case LOCK_USED_IN_SOFTIRQ_READ: @@ -2178,11 +2155,6 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, case LOCK_ENABLED_RECLAIM_FS_READ: return mark_lock_irq_read(curr, this, new_bit); - case LOCK_ENABLED_HARDIRQ: - case LOCK_ENABLED_SOFTIRQ: - case LOCK_ENABLED_RECLAIM_FS: - return mark_lock_irq_enabled(curr, this, new_bit); - default: WARN_ON(1); break; -- cgit v1.1 From 9d3651a23dc1f7ed7d207f9118459d3a73d485a7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 17:18:32 +0100 Subject: lockdep: fully reduce mark_lock_irq() Now what its only two functions, they again look rather similar. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 86 ++++++-------------------------------------------------- 1 file changed, 8 insertions(+), 78 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 1ba52e6..000d53a 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2061,13 +2061,13 @@ typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, enum lock_usage_bit bit, const char *name); static int -mark_lock_irq_write(struct task_struct *curr, struct held_lock *this, - int new_bit) +mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit) { const char *name = state_name(new_bit); const char *rname = state_rname(new_bit); int excl_bit = exclusive_bit(new_bit); + int read = new_bit & 1; int dir = new_bit & 2; check_usage_f usage = dir ? @@ -2075,57 +2075,17 @@ mark_lock_irq_write(struct task_struct *curr, struct held_lock *this, if (!valid_state(curr, this, new_bit, excl_bit)) return 0; - if (!valid_state(curr, this, new_bit, excl_bit + 1)) - return 0; - /* - * just marked it hardirq-safe, check that this lock - * took no hardirq-unsafe lock in the past: - */ - if (!usage(curr, this, excl_bit, name)) - return 0; -#if STRICT_READ_CHECKS - /* - * just marked it hardirq-safe, check that this lock - * took no hardirq-unsafe-read lock in the past: - */ - if (!usage(curr, this, excl_bit + 1, rname)) + if (!read && !valid_state(curr, this, new_bit, excl_bit + 1)) return 0; -#endif - if (state_verbose(new_bit, hlock_class(this))) - return 2; - - return 1; -} - -static int -mark_lock_irq_read(struct task_struct *curr, struct held_lock *this, - int new_bit) -{ - const char *name = state_name(new_bit); - const char *rname = state_rname(new_bit); - - int excl_bit = exclusive_bit(new_bit); - int dir = new_bit & 2; - if (!valid_state(curr, this, new_bit, excl_bit)) + if ((!read || (!dir || STRICT_READ_CHECKS)) && + !usage(curr, this, excl_bit, name)) return 0; - if (!dir) { - /* - * just marked it hardirq-read-safe, check that this lock - * took no hardirq-unsafe lock in the past: - */ - if (!check_usage_forwards(curr, this, excl_bit, name)) - return 0; - } else if (STRICT_READ_CHECKS) { - /* - * just marked it hardirq-read-unsafe, check that no - * hardirq-safe lock in the system ever took it in the past: - */ - if (!check_usage_backwards(curr, this, excl_bit, name)) - return 0; - } + if ((!read && STRICT_READ_CHECKS) && + !usage(curr, this, excl_bit + 1, rname)) + return 0; if (state_verbose(new_bit, hlock_class(this))) return 2; @@ -2133,36 +2093,6 @@ mark_lock_irq_read(struct task_struct *curr, struct held_lock *this, return 1; } -static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, - enum lock_usage_bit new_bit) -{ - int ret = 1; - - switch(new_bit) { - case LOCK_USED_IN_HARDIRQ: - case LOCK_USED_IN_SOFTIRQ: - case LOCK_USED_IN_RECLAIM_FS: - case LOCK_ENABLED_HARDIRQ: - case LOCK_ENABLED_SOFTIRQ: - case LOCK_ENABLED_RECLAIM_FS: - return mark_lock_irq_write(curr, this, new_bit); - - case LOCK_USED_IN_HARDIRQ_READ: - case LOCK_USED_IN_SOFTIRQ_READ: - case LOCK_USED_IN_RECLAIM_FS_READ: - case LOCK_ENABLED_HARDIRQ_READ: - case LOCK_ENABLED_SOFTIRQ_READ: - case LOCK_ENABLED_RECLAIM_FS_READ: - return mark_lock_irq_read(curr, this, new_bit); - - default: - WARN_ON(1); - break; - } - - return ret; -} - enum mark_type { #define LOCKDEP_STATE(__STATE) __STATE, #include "lockdep_states.h" -- cgit v1.1 From cf2ad4d13c4ac6366c730fcf6c6be00db12fb75f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 27 Jan 2009 13:58:08 +0100 Subject: lockdep: remove macro usage from mark_held_locks() Now that we have nice numerical relations for the states, remove the macro magics. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 000d53a..f40d916 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2099,14 +2099,6 @@ enum mark_type { #undef LOCKDEP_STATE }; -#define MARK_HELD_CASE(__STATE) \ - case __STATE: \ - if (hlock->read) \ - usage_bit = LOCK_ENABLED_##__STATE##_READ; \ - else \ - usage_bit = LOCK_ENABLED_##__STATE; \ - break; - /* * Mark all held locks with a usage bit: */ @@ -2120,13 +2112,11 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark) for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; - switch (mark) { -#define LOCKDEP_STATE(__STATE) MARK_HELD_CASE(__STATE) -#include "lockdep_states.h" -#undef LOCKDEP_STATE - default: - BUG(); - } + usage_bit = 2 + (mark << 2); /* ENABLED */ + if (hlock->read) + usage_bit += 1; /* READ */ + + BUG_ON(usage_bit >= LOCK_USAGE_STATES); if (!mark_lock(curr, hlock, usage_bit)) return 0; -- cgit v1.1 From 38aa2714382d886f77f2565277fce293122808b0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 27 Jan 2009 14:53:50 +0100 Subject: lockdep: add comments to mark_lock_irq() re-add some of the comments that got lost in the refactoring. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index f40d916..02e6e06 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2054,6 +2054,9 @@ static int exclusive_bit(int new_bit) int state = new_bit & ~3; int dir = new_bit & 2; + /* + * keep state, bit flip the direction and strip read. + */ return state | (dir ^ 2); } @@ -2070,22 +2073,42 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit) int read = new_bit & 1; int dir = new_bit & 2; + /* + * mark USED_IN has to look forwards -- to ensure no dependency + * has ENABLED state, which would allow recursion deadlocks. + * + * mark ENABLED has to look backwards -- to ensure no dependee + * has USED_IN state, which, again, would allow recursion deadlocks. + */ check_usage_f usage = dir ? check_usage_backwards : check_usage_forwards; + /* + * Validate that this particular lock does not have conflicting + * usage states. + */ if (!valid_state(curr, this, new_bit, excl_bit)) return 0; - if (!read && !valid_state(curr, this, new_bit, excl_bit + 1)) - return 0; - - if ((!read || (!dir || STRICT_READ_CHECKS)) && + /* + * Validate that the lock dependencies don't have conflicting usage + * states. + */ + if ((!read || !dir || STRICT_READ_CHECKS) && !usage(curr, this, excl_bit, name)) return 0; - if ((!read && STRICT_READ_CHECKS) && - !usage(curr, this, excl_bit + 1, rname)) - return 0; + /* + * Check for read in write conflicts + */ + if (!read) { + if (!valid_state(curr, this, new_bit, excl_bit + 1)) + return 0; + + if (STRICT_READ_CHECKS && + !usage(curr, this, excl_bit + 1, rname)) + return 0; + } if (state_verbose(new_bit, hlock_class(this))) return 2; -- cgit v1.1 From 3ff176ca47911630d1555f150d36daa2d0819ea9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 17:40:42 +0100 Subject: lockdep: simplify get_user_chars() there's too much repetition of code.. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 65 +++++++++++++++++++++----------------------------------- 1 file changed, 24 insertions(+), 41 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 02e6e06..1b4ee3c 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -467,54 +467,37 @@ const char * __get_key_name(struct lockdep_subclass_key *key, char *str) return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); } -void -get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, - char *c4, char *c5, char *c6) +static inline unsigned long lock_flag(enum lock_usage_bit bit) { - *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.', *c5 = '.', *c6 = '.'; - - if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) - *c1 = '+'; - else - if (class->usage_mask & LOCKF_ENABLED_HARDIRQ) - *c1 = '-'; - - if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) - *c2 = '+'; - else - if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ) - *c2 = '-'; + return 1UL << bit; +} - if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ) - *c3 = '-'; - if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) { - *c3 = '+'; - if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ) - *c3 = '?'; - } +static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) +{ + char c = '.'; - if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ) - *c4 = '-'; - if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) { - *c4 = '+'; - if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ) - *c4 = '?'; + if (class->usage_mask & lock_flag(bit + 2)) + c = '+'; + if (class->usage_mask & lock_flag(bit)) { + c = '-'; + if (class->usage_mask & lock_flag(bit + 2)) + c = '?'; } - if (class->usage_mask & LOCKF_USED_IN_RECLAIM_FS) - *c5 = '+'; - else - if (class->usage_mask & LOCKF_ENABLED_RECLAIM_FS) - *c5 = '-'; + return c; +} - if (class->usage_mask & LOCKF_ENABLED_RECLAIM_FS_READ) - *c6 = '-'; - if (class->usage_mask & LOCKF_USED_IN_RECLAIM_FS_READ) { - *c6 = '+'; - if (class->usage_mask & LOCKF_ENABLED_RECLAIM_FS_READ) - *c6 = '?'; - } +void +get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, + char *c4, char *c5, char *c6) +{ + *c1 = get_usage_char(class, LOCK_USED_IN_HARDIRQ); + *c2 = get_usage_char(class, LOCK_USED_IN_SOFTITQ); + *c3 = get_usage_char(class, LOCK_USED_IN_HARDIRQ_READ); + *c4 = get_usage_char(class, LOCK_USED_IN_SOFTITQ_READ); + *c5 = get_usage_char(class, LOCK_USED_IN_RECLAIM_FS); + *c6 = get_usage_char(class, LOCK_USED_IN_RECLAIM_FS_READ); } static void print_lock_name(struct lock_class *class) -- cgit v1.1 From f510b233cfc7bfd57b6007071c52aa42e3d16b06 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 17:53:47 +0100 Subject: lockdep: get_user_chars() redo Generic, states independent, get_user_chars(). Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- Documentation/lockdep-design.txt | 30 +++++++++++++++++------------- kernel/lockdep.c | 24 ++++++++++++------------ kernel/lockdep_internals.h | 7 ++++--- kernel/lockdep_proc.c | 6 +++--- 4 files changed, 36 insertions(+), 31 deletions(-) diff --git a/Documentation/lockdep-design.txt b/Documentation/lockdep-design.txt index 4887730..938ea22 100644 --- a/Documentation/lockdep-design.txt +++ b/Documentation/lockdep-design.txt @@ -27,33 +27,37 @@ lock-class. State ----- -The validator tracks lock-class usage history into 5 separate state bits: +The validator tracks lock-class usage history into 4n + 1 separate state bits: -- 'ever held in hardirq context' [ == hardirq-safe ] -- 'ever held in softirq context' [ == softirq-safe ] -- 'ever held with hardirqs enabled' [ == hardirq-unsafe ] -- 'ever held with softirqs and hardirqs enabled' [ == softirq-unsafe ] +- 'ever held in STATE context' +- 'ever head as readlock in STATE context' +- 'ever head with STATE enabled' +- 'ever head as readlock with STATE enabled' + +Where STATE can be either one of (kernel/lockdep_states.h) + - hardirq + - softirq + - reclaim_fs - 'ever used' [ == !unused ] -When locking rules are violated, these 4 state bits are presented in the -locking error messages, inside curlies. A contrived example: +When locking rules are violated, these state bits are presented in the +locking error messages, inside curlies. A contrived example: modprobe/2287 is trying to acquire lock: - (&sio_locks[i].lock){--..}, at: [] mutex_lock+0x21/0x24 + (&sio_locks[i].lock){-.-...}, at: [] mutex_lock+0x21/0x24 but task is already holding lock: - (&sio_locks[i].lock){--..}, at: [] mutex_lock+0x21/0x24 + (&sio_locks[i].lock){-.-...}, at: [] mutex_lock+0x21/0x24 -The bit position indicates hardirq, softirq, hardirq-read, -softirq-read respectively, and the character displayed in each -indicates: +The bit position indicates STATE, STATE-read, for each of the states listed +above, and the character displayed in each indicates: '.' acquired while irqs disabled '+' acquired in irq context '-' acquired with irqs enabled - '?' read acquired in irq context with irqs enabled. + '?' acquired in irq context with irqs enabled. Unused mutexes cannot be part of the cause of an error. diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 1b4ee3c..22ced8d 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -487,25 +487,25 @@ static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) return c; } -void -get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, - char *c4, char *c5, char *c6) +void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) { - *c1 = get_usage_char(class, LOCK_USED_IN_HARDIRQ); - *c2 = get_usage_char(class, LOCK_USED_IN_SOFTITQ); - *c3 = get_usage_char(class, LOCK_USED_IN_HARDIRQ_READ); - *c4 = get_usage_char(class, LOCK_USED_IN_SOFTITQ_READ); + int i = 0; - *c5 = get_usage_char(class, LOCK_USED_IN_RECLAIM_FS); - *c6 = get_usage_char(class, LOCK_USED_IN_RECLAIM_FS_READ); +#define LOCKDEP_STATE(__STATE) \ + usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ + usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); +#include "lockdep_states.h" +#undef LOCKDEP_STATE + + usage[i] = '\0'; } static void print_lock_name(struct lock_class *class) { - char str[KSYM_NAME_LEN], c1, c2, c3, c4, c5, c6; + char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; const char *name; - get_usage_chars(class, &c1, &c2, &c3, &c4, &c5, &c6); + get_usage_chars(class, usage); name = class->name; if (!name) { @@ -518,7 +518,7 @@ static void print_lock_name(struct lock_class *class) if (class->subclass) printk("/%d", class->subclass); } - printk("){%c%c%c%c%c%c}", c1, c2, c3, c4, c5, c6); + printk("){%s}", usage); } static void print_lockdep_cache(struct lockdep_map *lock) diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 7e653e6..a2cc7e9 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h @@ -70,9 +70,10 @@ enum { extern struct list_head all_lock_classes; extern struct lock_chain lock_chains[]; -extern void -get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, - char *c4, char *c5, char *c6); +#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2) + +extern void get_usage_chars(struct lock_class *class, + char usage[LOCK_USAGE_CHARS]); extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index bd474fd..b51064c 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c @@ -84,7 +84,7 @@ static int l_show(struct seq_file *m, void *v) { struct lock_class *class = v; struct lock_list *entry; - char c1, c2, c3, c4, c5, c6; + char usage[LOCK_USAGE_CHARS]; if (v == SEQ_START_TOKEN) { seq_printf(m, "all lock classes:\n"); @@ -100,8 +100,8 @@ static int l_show(struct seq_file *m, void *v) seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); #endif - get_usage_chars(class, &c1, &c2, &c3, &c4, &c5, &c6); - seq_printf(m, " %c%c%c%c%c%c", c1, c2, c3, c4, c5, c6); + get_usage_chars(class, usage); + seq_printf(m, " %s", usage); seq_printf(m, ": "); print_name(m, class); -- cgit v1.1 From 4f367d8adca947bed4385740a13d1efb1a06fba1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 18:10:42 +0100 Subject: lockdep: simplify check_prev_add_irq() Remove the manual state iteration thingy. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 154 ++++++++++++++++++++++--------------------------------- 1 file changed, 61 insertions(+), 93 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 22ced8d..42dfc28 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -1268,68 +1268,84 @@ check_usage(struct task_struct *curr, struct held_lock *prev, bit_backwards, bit_forwards, irqclass); } -static int -check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, - struct held_lock *next) +static const char *state_names[] = { +#define LOCKDEP_STATE(__STATE) \ + STR(__STATE), +#include "lockdep_states.h" +#undef LOCKDEP_STATE +}; + +static const char *state_rnames[] = { +#define LOCKDEP_STATE(__STATE) \ + STR(__STATE)"-READ", +#include "lockdep_states.h" +#undef LOCKDEP_STATE +}; + +static inline const char *state_name(enum lock_usage_bit bit) { - /* - * Prove that the new dependency does not connect a hardirq-safe - * lock with a hardirq-unsafe lock - to achieve this we search - * the backwards-subgraph starting at , and the - * forwards-subgraph starting at : - */ - if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ, - LOCK_ENABLED_HARDIRQ, "hard")) - return 0; + return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2]; +} +static int exclusive_bit(int new_bit) +{ /* - * Prove that the new dependency does not connect a hardirq-safe-read - * lock with a hardirq-unsafe lock - to achieve this we search - * the backwards-subgraph starting at , and the - * forwards-subgraph starting at : + * USED_IN + * USED_IN_READ + * ENABLED + * ENABLED_READ + * + * bit 0 - write/read + * bit 1 - used_in/enabled + * bit 2+ state */ - if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ, - LOCK_ENABLED_HARDIRQ, "hard-read")) - return 0; + + int state = new_bit & ~3; + int dir = new_bit & 2; /* - * Prove that the new dependency does not connect a softirq-safe - * lock with a softirq-unsafe lock - to achieve this we search - * the backwards-subgraph starting at , and the - * forwards-subgraph starting at : + * keep state, bit flip the direction and strip read. */ - if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ, - LOCK_ENABLED_SOFTIRQ, "soft")) - return 0; + return state | (dir ^ 2); +} + +static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, + struct held_lock *next, enum lock_usage_bit bit) +{ /* - * Prove that the new dependency does not connect a softirq-safe-read - * lock with a softirq-unsafe lock - to achieve this we search + * Prove that the new dependency does not connect a hardirq-safe + * lock with a hardirq-unsafe lock - to achieve this we search * the backwards-subgraph starting at , and the * forwards-subgraph starting at : */ - if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ, - LOCK_ENABLED_SOFTIRQ, "soft")) + if (!check_usage(curr, prev, next, bit, + exclusive_bit(bit), state_name(bit))) return 0; + bit++; /* _READ */ + /* - * Prove that the new dependency does not connect a reclaim-fs-safe - * lock with a reclaim-fs-unsafe lock - to achieve this we search + * Prove that the new dependency does not connect a hardirq-safe-read + * lock with a hardirq-unsafe lock - to achieve this we search * the backwards-subgraph starting at , and the * forwards-subgraph starting at : */ - if (!check_usage(curr, prev, next, LOCK_USED_IN_RECLAIM_FS, - LOCK_ENABLED_RECLAIM_FS, "reclaim-fs")) + if (!check_usage(curr, prev, next, bit, + exclusive_bit(bit), state_name(bit))) return 0; - /* - * Prove that the new dependency does not connect a reclaim-fs-safe-read - * lock with a reclaim-fs-unsafe lock - to achieve this we search - * the backwards-subgraph starting at , and the - * forwards-subgraph starting at : - */ - if (!check_usage(curr, prev, next, LOCK_USED_IN_RECLAIM_FS_READ, - LOCK_ENABLED_RECLAIM_FS, "reclaim-fs-read")) + return 1; +} + +static int +check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, + struct held_lock *next) +{ +#define LOCKDEP_STATE(__STATE) \ + if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ return 0; +#include "lockdep_states.h" +#undef LOCKDEP_STATE return 1; } @@ -1984,30 +2000,6 @@ static int RECLAIM_FS_verbose(struct lock_class *class) #define STRICT_READ_CHECKS 1 -static const char *state_names[] = { -#define LOCKDEP_STATE(__STATE) \ - STR(__STATE), -#include "lockdep_states.h" -#undef LOCKDEP_STATE -}; - -static inline const char *state_name(enum lock_usage_bit bit) -{ - return state_names[bit >> 2]; -} - -static const char *state_rnames[] = { -#define LOCKDEP_STATE(__STATE) \ - STR(__STATE)"-READ", -#include "lockdep_states.h" -#undef LOCKDEP_STATE -}; - -static inline const char *state_rname(enum lock_usage_bit bit) -{ - return state_rnames[bit >> 2]; -} - static int (*state_verbose_f[])(struct lock_class *class) = { #define LOCKDEP_STATE(__STATE) \ __STATE##_verbose, @@ -2021,37 +2013,12 @@ static inline int state_verbose(enum lock_usage_bit bit, return state_verbose_f[bit >> 2](class); } -static int exclusive_bit(int new_bit) -{ - /* - * USED_IN - * USED_IN_READ - * ENABLED - * ENABLED_READ - * - * bit 0 - write/read - * bit 1 - used_in/enabled - * bit 2+ state - */ - - int state = new_bit & ~3; - int dir = new_bit & 2; - - /* - * keep state, bit flip the direction and strip read. - */ - return state | (dir ^ 2); -} - typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, enum lock_usage_bit bit, const char *name); static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit) { - const char *name = state_name(new_bit); - const char *rname = state_rname(new_bit); - int excl_bit = exclusive_bit(new_bit); int read = new_bit & 1; int dir = new_bit & 2; @@ -2078,7 +2045,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit) * states. */ if ((!read || !dir || STRICT_READ_CHECKS) && - !usage(curr, this, excl_bit, name)) + !usage(curr, this, excl_bit, state_name(new_bit))) return 0; /* @@ -2089,7 +2056,8 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit) return 0; if (STRICT_READ_CHECKS && - !usage(curr, this, excl_bit + 1, rname)) + !usage(curr, this, excl_bit + 1, + state_name(new_bit + 1))) return 0; } -- cgit v1.1 From b4b136f44b3b7adb9265fd5566d0ea9b99b1cd5f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 29 Jan 2009 14:50:36 +0100 Subject: lockdep: use stringify.h Arnd pointed out we have the stringify macro magic already in-kernel. Signed-off-by: Peter Zijlstra CC: Arnd Bergmann Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 42dfc28..fc84a30 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -41,6 +41,7 @@ #include #include #include +#include #include @@ -445,14 +446,11 @@ atomic_t nr_find_usage_backwards_recursions; * Locking printouts: */ -#define __STR(foo) #foo -#define STR(foo) __STR(foo) - #define __USAGE(__STATE) \ - [LOCK_USED_IN_##__STATE] = "IN-"STR(__STATE)"-W", \ - [LOCK_ENABLED_##__STATE] = STR(__STATE)"-ON-W", \ - [LOCK_USED_IN_##__STATE##_READ] = "IN-"STR(__STATE)"-R", \ - [LOCK_ENABLED_##__STATE##_READ] = STR(__STATE)"-ON-R", + [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \ + [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \ + [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\ + [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R", static const char *usage_str[] = { @@ -1270,14 +1268,14 @@ check_usage(struct task_struct *curr, struct held_lock *prev, static const char *state_names[] = { #define LOCKDEP_STATE(__STATE) \ - STR(__STATE), + __stringify(__STATE), #include "lockdep_states.h" #undef LOCKDEP_STATE }; static const char *state_rnames[] = { #define LOCKDEP_STATE(__STATE) \ - STR(__STATE)"-READ", + __stringify(__STATE)"-READ", #include "lockdep_states.h" #undef LOCKDEP_STATE }; -- cgit v1.1 From 9833f8cb952b9aa3f98a71e7bef8820cee3261a0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 14 Feb 2009 16:59:04 +0100 Subject: lockstat: warn about disabled lock debugging Avoid confusion and clearly state lock debugging got disabled. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep_proc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index b51064c..d7135aa 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c @@ -601,6 +601,10 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data) static void seq_header(struct seq_file *m) { seq_printf(m, "lock_stat version 0.3\n"); + + if (unlikely(!debug_locks)) + seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n"); + seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1)); seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s " "%14s %14s\n", -- cgit v1.1 From 868a23a8043f2a3042dae60105c89bd4680187ba Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 15 Feb 2009 00:25:21 +0100 Subject: lockdep: build fix for !PROVE_LOCKING The __GFP_FS annotations fail to build with CONFIG_LOCKDEP=y, CONFIG_PROVE_LOCKING=n, ammend that. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index fc84a30..022d2ed 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2396,6 +2396,10 @@ static inline int separate_irq_context(struct task_struct *curr, return 0; } +void lockdep_trace_alloc(gfp_t gfp_mask) +{ +} + #endif /* -- cgit v1.1 From 6700ec65c207068a81a535e9dca616fefac21671 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 15 Feb 2009 21:18:17 +0100 Subject: lockdep: annotate reclaim context (__GFP_NOFS), fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Impact: fix build warning Fix: mm/vmscan.c: In function ‘kswapd’: mm/vmscan.c:1969: warning: ISO C90 forbids mixed declarations and code node_to_cpumask_ptr(cpumask, pgdat->node_id), has a side-effect: it defines the 'cpumask' local variable as well, so it has to go into the variable definition section. Sidenote: it might make sense to make this purpose of these macros more apparent, by naming them the standard way, such as: DEFINE_node_to_cpumask_ptr(cpumask, pgdat->node_id); (But that is outside the scope of this patch.) Cc: Rusty Russell Cc: Mike Travis Cc: Andrew Morton Cc: Nick Piggin Signed-off-by: Ingo Molnar --- mm/vmscan.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 303eb65..cf84413 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1963,11 +1963,10 @@ static int kswapd(void *p) struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; + node_to_cpumask_ptr(cpumask, pgdat->node_id); lockdep_set_current_reclaim_state(GFP_KERNEL); - node_to_cpumask_ptr(cpumask, pgdat->node_id); - if (!cpumask_empty(cpumask)) set_cpus_allowed_ptr(tsk, cpumask); current->reclaim_state = &reclaim_state; -- cgit v1.1 From 19cefdffbfe0f7e280f21e80875937e8700e99e2 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 15 Mar 2009 06:03:11 +0100 Subject: lockdep: annotate reclaim context (__GFP_NOFS), fix SLOB MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Impact: build fix fix typo in mm/slob.c: mm/slob.c:469: error: ‘flags’ undeclared (first use in this function) mm/slob.c:469: error: (Each undeclared identifier is reported only once mm/slob.c:469: error: for each function it appears in.) Cc: Nick Piggin Cc: Peter Zijlstra LKML-Reference: <20090128135457.350751756@chello.nl> Signed-off-by: Ingo Molnar --- mm/slob.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/slob.c b/mm/slob.c index 1264799..4b1c0c1 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -464,7 +464,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) unsigned int *m; int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); - lockdep_trace_alloc(flags); + lockdep_trace_alloc(gfp); if (size < PAGE_SIZE - align) { if (!size) -- cgit v1.1 From 2f8501815256af8498904e68bd0984b1afffd6f8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Mar 2009 11:13:20 +0100 Subject: lockdep: fix deadlock in lockdep_trace_alloc Heiko reported that we grab the graph lock with irqs enabled. Fix this by providng the same wrapper as all other lockdep entry functions have. Reported-by: Heiko Carstens Signed-off-by: Peter Zijlstra Cc: Nick Piggin LKML-Reference: <1237544000.24626.52.camel@twins> Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 022d2ed..3673a3f 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2260,7 +2260,7 @@ void trace_softirqs_off(unsigned long ip) debug_atomic_inc(&redundant_softirqs_off); } -void lockdep_trace_alloc(gfp_t gfp_mask) +static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags) { struct task_struct *curr = current; @@ -2279,12 +2279,29 @@ void lockdep_trace_alloc(gfp_t gfp_mask) if (!(gfp_mask & __GFP_FS)) return; - if (DEBUG_LOCKS_WARN_ON(irqs_disabled())) + if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags))) return; mark_held_locks(curr, RECLAIM_FS); } +static void check_flags(unsigned long flags); + +void lockdep_trace_alloc(gfp_t gfp_mask) +{ + unsigned long flags; + + if (unlikely(current->lockdep_recursion)) + return; + + raw_local_irq_save(flags); + check_flags(flags); + current->lockdep_recursion = 1; + __lockdep_trace_alloc(gfp_mask, flags); + current->lockdep_recursion = 0; + raw_local_irq_restore(flags); +} + static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) { /* -- cgit v1.1