diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2016-05-17 02:17:59 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2016-05-17 02:17:59 -0400 |
commit | 0e0162bb8c008fa7742f69d4d4982c8a37b88f95 (patch) | |
tree | 4b230ab63b5698a44d2948e70a6cc22405c351e9 /kernel/workqueue.c | |
parent | ae05327a00fd47c34dfe25294b359a3f3fef96e8 (diff) | |
parent | 38b78a5f18584db6fa7441e0f4531b283b0e6725 (diff) | |
download | op-kernel-dev-0e0162bb8c008fa7742f69d4d4982c8a37b88f95.zip op-kernel-dev-0e0162bb8c008fa7742f69d4d4982c8a37b88f95.tar.gz |
Merge branch 'ovl-fixes' into for-linus
Backmerge to resolve a conflict in ovl_lookup_real();
"ovl_lookup_real(): use lookup_one_len_unlocked()" instead,
but it was too late in the cycle to rebase.
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 29 |
1 files changed, 29 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2232ae3..3bfdff0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -666,6 +666,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work, */ smp_wmb(); set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); + /* + * The following mb guarantees that previous clear of a PENDING bit + * will not be reordered with any speculative LOADS or STORES from + * work->current_func, which is executed afterwards. This possible + * reordering can lead to a missed execution on attempt to qeueue + * the same @work. E.g. consider this case: + * + * CPU#0 CPU#1 + * ---------------------------- -------------------------------- + * + * 1 STORE event_indicated + * 2 queue_work_on() { + * 3 test_and_set_bit(PENDING) + * 4 } set_..._and_clear_pending() { + * 5 set_work_data() # clear bit + * 6 smp_mb() + * 7 work->current_func() { + * 8 LOAD event_indicated + * } + * + * Without an explicit full barrier speculative LOAD on line 8 can + * be executed before CPU#0 does STORE on line 1. If that happens, + * CPU#0 observes the PENDING bit is still set and new execution of + * a @work is not queued in a hope, that CPU#1 will eventually + * finish the queued @work. Meanwhile CPU#1 does not see + * event_indicated is set, because speculative LOAD was executed + * before actual STORE. + */ + smp_mb(); } static void clear_work_data(struct work_struct *work) |