summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-03-03 13:57:56 +0100
committerIngo Molnar <mingo@kernel.org>2017-03-16 09:57:09 +0100
commitbf7b3ac2e36ac054f93e5dd8d85dfd754b5e1c09 (patch)
tree17010ad6d0694cb065462dd0110c43f2eee0d39e /kernel
parent383776fa7527745224446337f2dcfb0f0d1b8b56 (diff)
downloadop-kernel-dev-bf7b3ac2e36ac054f93e5dd8d85dfd754b5e1c09.zip
op-kernel-dev-bf7b3ac2e36ac054f93e5dd8d85dfd754b5e1c09.tar.gz
locking/ww_mutex: Improve test to cover acquire context changes
Currently each thread starts an acquire context only once, and performs all its loop iterations under it. This means that the Wound/Wait relations between threads are fixed. To make things a little more realistic and cover more of the functionality with the test, open a new acquire context for each loop. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/test-ww_mutex.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 6b7abb3..90d8d88 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -398,12 +398,11 @@ static void stress_inorder_work(struct work_struct *work)
if (!order)
return;
- ww_acquire_init(&ctx, &ww_class);
-
do {
int contended = -1;
int n, err;
+ ww_acquire_init(&ctx, &ww_class);
retry:
err = 0;
for (n = 0; n < nlocks; n++) {
@@ -433,9 +432,9 @@ retry:
__func__, err);
break;
}
- } while (--stress->nloops);
- ww_acquire_fini(&ctx);
+ ww_acquire_fini(&ctx);
+ } while (--stress->nloops);
kfree(order);
kfree(stress);
@@ -470,9 +469,9 @@ static void stress_reorder_work(struct work_struct *work)
kfree(order);
order = NULL;
- ww_acquire_init(&ctx, &ww_class);
-
do {
+ ww_acquire_init(&ctx, &ww_class);
+
list_for_each_entry(ll, &locks, link) {
err = ww_mutex_lock(ll->lock, &ctx);
if (!err)
@@ -495,9 +494,9 @@ static void stress_reorder_work(struct work_struct *work)
dummy_load(stress);
list_for_each_entry(ll, &locks, link)
ww_mutex_unlock(ll->lock);
- } while (--stress->nloops);
- ww_acquire_fini(&ctx);
+ ww_acquire_fini(&ctx);
+ } while (--stress->nloops);
out:
list_for_each_entry_safe(ll, ln, &locks, link)
OpenPOWER on IntegriCloud