summaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorByungchul Park <byungchul.park@lge.com>2017-08-07 16:12:53 +0900
committerIngo Molnar <mingo@kernel.org>2017-08-10 12:29:08 +0200
commit23f873d8f9526ed7e49a1a02a45f8afb9ae5fb84 (patch)
treec926e87ad1197db61dca6677b5e7e7184271698a /kernel/locking
parentb09be676e0ff25bd6d2e7637e26d349f9109ad75 (diff)
downloadop-kernel-dev-23f873d8f9526ed7e49a1a02a45f8afb9ae5fb84.zip
op-kernel-dev-23f873d8f9526ed7e49a1a02a45f8afb9ae5fb84.tar.gz
locking/lockdep: Detect and handle hist_lock ring buffer overwrite
The ring buffer can be overwritten by hardirq/softirq/work contexts. That cases must be considered on rollback or commit. For example, |<------ hist_lock ring buffer size ----->| ppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii wrapped > iiiiiiiiiiiiiiiiiiiiiii.................... where 'p' represents an acquisition in process context, 'i' represents an acquisition in irq context. On irq exit, crossrelease tries to rollback idx to original position, but it should not because the entry already has been invalid by overwriting 'i'. Avoid rollback or commit for entries overwritten. Signed-off-by: Byungchul Park <byungchul.park@lge.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: akpm@linux-foundation.org Cc: boqun.feng@gmail.com Cc: kernel-team@lge.com Cc: kirill@shutemov.name Cc: npiggin@gmail.com Cc: walken@google.com Cc: willy@infradead.org Link: http://lkml.kernel.org/r/1502089981-21272-7-git-send-email-byungchul.park@lge.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/lockdep.c52
1 files changed, 47 insertions, 5 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 56f69cc..eda8114 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4681,6 +4681,17 @@ EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
static atomic_t cross_gen_id; /* Can be wrapped */
/*
+ * Make an entry of the ring buffer invalid.
+ */
+static inline void invalidate_xhlock(struct hist_lock *xhlock)
+{
+ /*
+ * Normally, xhlock->hlock.instance must be !NULL.
+ */
+ xhlock->hlock.instance = NULL;
+}
+
+/*
* Lock history stacks; we have 3 nested lock history stacks:
*
* Hard IRQ
@@ -4712,14 +4723,28 @@ static atomic_t cross_gen_id; /* Can be wrapped */
*/
void crossrelease_hist_start(enum xhlock_context_t c)
{
- if (current->xhlocks)
- current->xhlock_idx_hist[c] = current->xhlock_idx;
+ struct task_struct *cur = current;
+
+ if (cur->xhlocks) {
+ cur->xhlock_idx_hist[c] = cur->xhlock_idx;
+ cur->hist_id_save[c] = cur->hist_id;
+ }
}
void crossrelease_hist_end(enum xhlock_context_t c)
{
- if (current->xhlocks)
- current->xhlock_idx = current->xhlock_idx_hist[c];
+ struct task_struct *cur = current;
+
+ if (cur->xhlocks) {
+ unsigned int idx = cur->xhlock_idx_hist[c];
+ struct hist_lock *h = &xhlock(idx);
+
+ cur->xhlock_idx = idx;
+
+ /* Check if the ring was overwritten. */
+ if (h->hist_id != cur->hist_id_save[c])
+ invalidate_xhlock(h);
+ }
}
static int cross_lock(struct lockdep_map *lock)
@@ -4765,6 +4790,7 @@ static inline int depend_after(struct held_lock *hlock)
* Check if the xhlock is valid, which would be false if,
*
* 1. Has not used after initializaion yet.
+ * 2. Got invalidated.
*
* Remind hist_lock is implemented as a ring buffer.
*/
@@ -4796,6 +4822,7 @@ static void add_xhlock(struct held_lock *hlock)
/* Initialize hist_lock's members */
xhlock->hlock = *hlock;
+ xhlock->hist_id = current->hist_id++;
xhlock->trace.nr_entries = 0;
xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
@@ -4934,6 +4961,7 @@ static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock)
static void commit_xhlocks(struct cross_lock *xlock)
{
unsigned int cur = current->xhlock_idx;
+ unsigned int prev_hist_id = xhlock(cur).hist_id;
unsigned int i;
if (!graph_lock())
@@ -4952,6 +4980,17 @@ static void commit_xhlocks(struct cross_lock *xlock)
break;
/*
+ * Filter out the cases that the ring buffer was
+ * overwritten and the previous entry has a bigger
+ * hist_id than the following one, which is impossible
+ * otherwise.
+ */
+ if (unlikely(before(xhlock->hist_id, prev_hist_id)))
+ break;
+
+ prev_hist_id = xhlock->hist_id;
+
+ /*
* commit_xhlock() returns 0 with graph_lock already
* released if fail.
*/
@@ -5024,9 +5063,12 @@ void lockdep_init_task(struct task_struct *task)
int i;
task->xhlock_idx = UINT_MAX;
+ task->hist_id = 0;
- for (i = 0; i < XHLOCK_CTX_NR; i++)
+ for (i = 0; i < XHLOCK_CTX_NR; i++) {
task->xhlock_idx_hist[i] = UINT_MAX;
+ task->hist_id_save[i] = 0;
+ }
task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
GFP_KERNEL);
OpenPOWER on IntegriCloud