From 486320aac47a70ba38fa073d099952762d964f7b Mon Sep 17 00:00:00 2001 From: kib Date: Wed, 2 Mar 2016 18:46:17 +0000 Subject: If callout_stop_safe() noted that the callout is currently executing, but next invocation is cancelled while migrating, sleepq_check_timeout() needs to be informed that the callout is stopped. Otherwise the thread switches off CPU and never become runnable, since running callout could have already raced with us, while the migrating and cancelled callout could be one which is expected to set TDP_TIMOFAIL flag for us. This contradicts with the expected behaviour of callout_stop() for other callers, which e.g. decrement references from the callout callbacks. Add a new flag CS_MIGRBLOCK requesting report of the situation as 'successfully stopped'. Reviewed by: jhb (previous version) Tested by: cognet, pho PR: 200992 Sponsored by: The FreeBSD Foundation MFC after: 2 weeks Differential revision: https://reviews.freebsd.org/D5221 --- sys/kern/kern_timeout.c | 10 +++++----- sys/kern/subr_sleepqueue.c | 3 ++- sys/sys/callout.h | 8 +++++++- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index 9c9d25f..b13a63f 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -1155,14 +1155,14 @@ callout_schedule(struct callout *c, int to_ticks) } int -_callout_stop_safe(struct callout *c, int safe, void (*drain)(void *)) +_callout_stop_safe(struct callout *c, int flags, void (*drain)(void *)) { struct callout_cpu *cc, *old_cc; struct lock_class *class; int direct, sq_locked, use_lock; int not_on_a_list; - if (safe) + if ((flags & CS_DRAIN) != 0) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock, "calling %s", __func__); @@ -1170,7 +1170,7 @@ _callout_stop_safe(struct callout *c, int safe, void (*drain)(void *)) * Some old subsystems don't hold Giant while running a callout_stop(), * so just discard this check for the moment. */ - if (!safe && c->c_lock != NULL) { + if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) { if (c->c_lock == &Giant.lock_object) use_lock = mtx_owned(&Giant); else { @@ -1253,7 +1253,7 @@ again: return (-1); } - if (safe) { + if ((flags & CS_DRAIN) != 0) { /* * The current callout is running (or just * about to run) and blocking is allowed, so @@ -1370,7 +1370,7 @@ again: cc_exec_drain(cc, direct) = drain; } CC_UNLOCK(cc); - return (0); + return ((flags & CS_MIGRBLOCK) != 0); } CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", c, c->c_func, c->c_arg); diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c index bbbec920..12908f6 100644 --- a/sys/kern/subr_sleepqueue.c +++ b/sys/kern/subr_sleepqueue.c @@ -586,7 +586,8 @@ sleepq_check_timeout(void) * another CPU, so synchronize with it to avoid having it * accidentally wake up a subsequent sleep. */ - else if (callout_stop(&td->td_slpcallout) == 0) { + else if (_callout_stop_safe(&td->td_slpcallout, CS_MIGRBLOCK, NULL) + == 0) { td->td_flags |= TDF_TIMEOUT; TD_SET_SLEEPING(td); mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL); diff --git a/sys/sys/callout.h b/sys/sys/callout.h index 3e71c87..689b547 100644 --- a/sys/sys/callout.h +++ b/sys/sys/callout.h @@ -62,6 +62,12 @@ struct callout_handle { struct callout *callout; }; +/* Flags for callout_stop_safe() */ +#define CS_DRAIN 0x0001 /* callout_drain(), wait allowed */ +#define CS_MIGRBLOCK 0x0002 /* Block migration, return value + indicates that the callout was + executing */ + #ifdef _KERNEL /* * Note the flags field is actually *two* fields. The c_flags @@ -81,7 +87,7 @@ struct callout_handle { */ #define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE) #define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE) -#define callout_drain(c) _callout_stop_safe(c, 1, NULL) +#define callout_drain(c) _callout_stop_safe(c, CS_DRAIN, NULL) void callout_init(struct callout *, int); void _callout_init_lock(struct callout *, struct lock_object *, int); #define callout_init_mtx(c, mtx, flags) \ -- cgit v1.1