summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorcperciva <cperciva@FreeBSD.org>2004-04-06 23:08:49 +0000
committercperciva <cperciva@FreeBSD.org>2004-04-06 23:08:49 +0000
commite0793884f346394f3f82ae3270cb59491471d87c (patch)
tree5247a488f47c3b9407e8aca280eb04d121099b50
parenta356af4fc799bfdc962b4e80af259fed267f5015 (diff)
downloadFreeBSD-src-e0793884f346394f3f82ae3270cb59491471d87c.zip
FreeBSD-src-e0793884f346394f3f82ae3270cb59491471d87c.tar.gz
Introduce a callout_drain() function. This acts in the same manner as
callout_stop(), except that if the callout being stopped is currently in progress, it blocks attempts to reset the callout and waits until the callout is completed before it returns. This makes it possible to clean up callout-using code safely, e.g., without potentially freeing memory which is still being used by a callout. Reviewed by: mux, gallatin, rwatson, jhb
-rw-r--r--share/man/man9/timeout.929
-rw-r--r--sys/kern/kern_timeout.c91
-rw-r--r--sys/sys/callout.h3
3 files changed, 111 insertions, 12 deletions
diff --git a/share/man/man9/timeout.9 b/share/man/man9/timeout.9
index 917e1c8..078b60a 100644
--- a/share/man/man9/timeout.9
+++ b/share/man/man9/timeout.9
@@ -45,6 +45,7 @@
.Nm callout_handle_init ,
.Nm callout_init ,
.Nm callout_stop ,
+.Nm callout_drain ,
.Nm callout_reset
.Nd execute a function after a specified length of time
.Sh SYNOPSIS
@@ -68,6 +69,8 @@ struct callout_handle handle = CALLOUT_HANDLE_INITIALIZER(&handle)
.Fn callout_init "struct callout *c" "int mpsafe"
.Ft int
.Fn callout_stop "struct callout *c"
+.Ft int
+.Fn callout_drain "struct callout *c"
.Ft void
.Fn callout_reset "struct callout *c" "int ticks" "timeout_t *func" "void *arg"
.Sh DESCRIPTION
@@ -162,7 +165,8 @@ Thus they are protected from re-entrancy.
.Pp
The functions
.Fn callout_init ,
-.Fn callout_stop
+.Fn callout_stop ,
+.Fn callout_drain
and
.Fn callout_reset
are low-level routines for clients who wish to allocate their own
@@ -171,7 +175,8 @@ callout structures.
The function
.Fn callout_init
initializes a callout so it can be passed to
-.Fn callout_stop
+.Fn callout_stop ,
+.Fn callout_drain
or
.Fn callout_reset
without any side effects.
@@ -194,6 +199,14 @@ If the callout has already been serviced or is currently being serviced,
then zero will be returned.
.Pp
The function
+.Fn callout_drain
+is identical to
+.Fn callout_stop
+except that it will wait for the callout to be completed if it is
+already in progress. This MUST NOT be called while holding any
+locks on which the callout might block, or deadlock will result.
+.Pp
+The function
.Fn callout_reset
first calls
.Fn callout_stop
@@ -209,16 +222,10 @@ that can be passed to
.Fn untimeout .
The
.Fn callout_stop
-function returns non-zero if the callout was still pending when it was
+and
+.Fn callout_drain
+functions return non-zero if the callout was still pending when it was
called or zero otherwise.
-.Sh BUGS
-This API has no way to cancel a callout and ensure that if it was
-canceled too late that the callout has actually finished.
-.Fn callout_stop
-only guarantees that the callout has started when it returns 0.
-It does not guarantee that the callout has finished.
-This can create a race when one wishes to ensure that no threads are
-executing before returning from a driver detach routine.
.Sh HISTORY
The current timeout and untimeout routines are based on the work of
.An Adam M. Costello
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index feb702d..8cd3467 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/callout.h>
+#include <sys/condvar.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
@@ -71,6 +72,32 @@ struct mtx dont_sleep_in_callout;
#endif
static struct callout *nextsoftcheck; /* Next callout to be checked. */
+/*
+ * Locked by callout_lock:
+ * curr_callout - If a callout is in progress, it is curr_callout.
+ * If curr_callout is non-NULL, threads waiting on
+ * callout_wait will be woken up as soon as the
+ * relevant callout completes.
+ * wakeup_needed - If a thread is waiting on callout_wait, then
+ * wakeup_needed is nonzero. Increased only when
+ * cutt_callout is non-NULL.
+ * wakeup_ctr - Incremented every time a thread wants to wait
+ * for a callout to complete. Modified only when
+ * curr_callout is non-NULL.
+ */
+static struct callout *curr_callout;
+static int wakeup_needed;
+static int wakeup_ctr;
+/*
+ * Locked by callout_wait_lock:
+ * callout_wait - If wakeup_needed is set, callout_wait will be
+ * triggered after the current callout finishes.
+ * wakeup_done_ctr - Set to the current value of wakeup_ctr after
+ * callout_wait is triggered.
+ */
+static struct mtx callout_wait_lock;
+static struct cv callout_wait;
+static int wakeup_done_ctr;
/*
* kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
@@ -122,6 +149,12 @@ kern_timeout_callwheel_init(void)
#ifdef DIAGNOSTIC
mtx_init(&dont_sleep_in_callout, "dont_sleep_in_callout", NULL, MTX_DEF);
#endif
+ mtx_init(&callout_wait_lock, "callout_wait_lock", NULL, MTX_DEF);
+ cv_init(&callout_wait, "callout_wait");
+ curr_callout = NULL;
+ wakeup_needed = 0;
+ wakeup_ctr = 0;
+ wakeup_done_ctr = 0;
}
/*
@@ -150,6 +183,7 @@ softclock(void *dummy)
int depth;
int mpcalls;
int gcalls;
+ int wakeup_cookie;
#ifdef DIAGNOSTIC
struct bintime bt1, bt2;
struct timespec ts2;
@@ -208,6 +242,7 @@ softclock(void *dummy)
c->c_flags =
(c->c_flags & ~CALLOUT_PENDING);
}
+ curr_callout = c;
mtx_unlock_spin(&callout_lock);
if (!(c_flags & CALLOUT_MPSAFE)) {
mtx_lock(&Giant);
@@ -241,6 +276,21 @@ softclock(void *dummy)
if (!(c_flags & CALLOUT_MPSAFE))
mtx_unlock(&Giant);
mtx_lock_spin(&callout_lock);
+ curr_callout = NULL;
+ if (wakeup_needed) {
+ /*
+ * There might be someone waiting
+ * for the callout to complete.
+ */
+ wakeup_cookie = wakeup_ctr;
+ mtx_unlock_spin(&callout_lock);
+ mtx_lock(&callout_wait_lock);
+ cv_broadcast(&callout_wait);
+ wakeup_done_ctr = wakeup_cookie;
+ mtx_unlock(&callout_wait_lock);
+ mtx_lock_spin(&callout_lock);
+ wakeup_needed = 0;
+ };
steps = 0;
c = nextsoftcheck;
}
@@ -344,6 +394,17 @@ callout_reset(c, to_ticks, ftn, arg)
{
mtx_lock_spin(&callout_lock);
+
+ if (c == curr_callout && wakeup_needed) {
+ /*
+ * We're being asked to reschedule a callout which is
+ * currently in progress, and someone has called
+ * callout_drain to kill that callout. Don't reschedule.
+ */
+ mtx_unlock_spin(&callout_lock);
+ return;
+ };
+
if (c->c_flags & CALLOUT_PENDING)
callout_stop(c);
@@ -364,18 +425,46 @@ callout_reset(c, to_ticks, ftn, arg)
mtx_unlock_spin(&callout_lock);
}
+/* For binary compatibility */
+#undef callout_stop
int
callout_stop(c)
struct callout *c;
{
+ return(_callout_stop_safe(c, 0));
+}
+
+int
+_callout_stop_safe(c, safe)
+ struct callout *c;
+ int safe;
+{
+ int wakeup_cookie;
+
mtx_lock_spin(&callout_lock);
/*
* Don't attempt to delete a callout that's not on the queue.
*/
if (!(c->c_flags & CALLOUT_PENDING)) {
c->c_flags &= ~CALLOUT_ACTIVE;
- mtx_unlock_spin(&callout_lock);
+ if (c == curr_callout && safe) {
+ /* We need to wait until the callout is finished */
+ wakeup_needed = 1;
+ wakeup_cookie = wakeup_ctr++;
+ mtx_unlock_spin(&callout_lock);
+ mtx_lock(&callout_wait_lock);
+ /*
+ * Check to make sure that softclock() didn't
+ * do the wakeup in between our dropping
+ * callout_lock and picking up callout_wait_lock
+ */
+ if (wakeup_cookie - wakeup_done_ctr > 0)
+ cv_wait(&callout_wait, &callout_wait_lock);
+
+ mtx_unlock(&callout_wait_lock);
+ } else
+ mtx_unlock_spin(&callout_lock);
return (0);
}
c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
diff --git a/sys/sys/callout.h b/sys/sys/callout.h
index cab6692..d4dcbe8 100644
--- a/sys/sys/callout.h
+++ b/sys/sys/callout.h
@@ -81,6 +81,9 @@ void callout_init(struct callout *, int);
#define callout_pending(c) ((c)->c_flags & CALLOUT_PENDING)
void callout_reset(struct callout *, int, void (*)(void *), void *);
int callout_stop(struct callout *);
+#define callout_stop(c) _callout_stop_safe(c, 0)
+#define callout_drain(c) _callout_stop_safe(c, 1)
+int _callout_stop_safe(struct callout *, int);
#endif
OpenPOWER on IntegriCloud