diff options
author | Peter Zijlstra <peterz@infradead.org> | 2016-11-30 14:32:25 +1100 |
---|---|---|
committer | Dave Chinner <david@fromorbit.com> | 2016-11-30 14:32:25 +1100 |
commit | f8319483f57f1ca22370f4150bb990aca7728a67 (patch) | |
tree | 348cff60f15f3282e5489fba4dfd46b6c958e533 /kernel/locking | |
parent | 3816199506c7826983096fc65ed46f2733a47bb8 (diff) | |
download | op-kernel-dev-f8319483f57f1ca22370f4150bb990aca7728a67.zip op-kernel-dev-f8319483f57f1ca22370f4150bb990aca7728a67.tar.gz |
locking/lockdep: Provide a type check for lock_is_held
Christoph requested lockdep_assert_held() variants that distinguish
between held-for-read or held-for-write.
Provide:
int lock_is_held_type(struct lockdep_map *lock, int read)
which takes the same argument as lock_acquire(.read) and matches it to
the held_lock instance.
Use of this function should be gated by the debug_locks variable. When
that is 0 the return value of the lock_is_held_type() function is
undefined. This is done to allow both negative and positive tests for
holding locks.
By default we provide (positive) lockdep_assert_held{,_exclusive,_read}()
macros.
Requested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Jens Axboe <axboe@fb.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/lockdep.c | 20 |
1 files changed, 12 insertions, 8 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 589d763..cff580a 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3188,7 +3188,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr, return 0; } -static int __lock_is_held(struct lockdep_map *lock); +static int __lock_is_held(struct lockdep_map *lock, int read); /* * This gets called for every mutex_lock*()/spin_lock*() operation. @@ -3329,7 +3329,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, } chain_key = iterate_chain_key(chain_key, class_idx); - if (nest_lock && !__lock_is_held(nest_lock)) + if (nest_lock && !__lock_is_held(nest_lock, -1)) return print_lock_nested_lock_not_held(curr, hlock, ip); if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) @@ -3576,7 +3576,7 @@ found_it: return 1; } -static int __lock_is_held(struct lockdep_map *lock) +static int __lock_is_held(struct lockdep_map *lock, int read) { struct task_struct *curr = current; int i; @@ -3584,8 +3584,12 @@ static int __lock_is_held(struct lockdep_map *lock) for (i = 0; i < curr->lockdep_depth; i++) { struct held_lock *hlock = curr->held_locks + i; - if (match_held_lock(hlock, lock)) - return 1; + if (match_held_lock(hlock, lock)) { + if (read == -1 || hlock->read == read) + return 1; + + return 0; + } } return 0; @@ -3769,7 +3773,7 @@ void lock_release(struct lockdep_map *lock, int nested, } EXPORT_SYMBOL_GPL(lock_release); -int lock_is_held(struct lockdep_map *lock) +int lock_is_held_type(struct lockdep_map *lock, int read) { unsigned long flags; int ret = 0; @@ -3781,13 +3785,13 @@ int lock_is_held(struct lockdep_map *lock) check_flags(flags); current->lockdep_recursion = 1; - ret = __lock_is_held(lock); + ret = __lock_is_held(lock, read); current->lockdep_recursion = 0; raw_local_irq_restore(flags); return ret; } -EXPORT_SYMBOL_GPL(lock_is_held); +EXPORT_SYMBOL_GPL(lock_is_held_type); struct pin_cookie lock_pin_lock(struct lockdep_map *lock) { |