summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_timeout.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2011-01-08 18:51:15 +0000
committerattilio <attilio@FreeBSD.org>2011-01-08 18:51:15 +0000
commit7a8b4abb9736bc89cda97907a13d0fa958f16edf (patch)
treed133ffd7afd8f0fc446c762c45ba91141b801fc6 /sys/kern/kern_timeout.c
parent25001a6e2e10451d55d9e088f5a7604dfafb6e74 (diff)
downloadFreeBSD-src-7a8b4abb9736bc89cda97907a13d0fa958f16edf.zip
FreeBSD-src-7a8b4abb9736bc89cda97907a13d0fa958f16edf.tar.gz
Revert r216805.
That revision is introducing a bug which is more visible than problems it is trying to fix. As long as my time is very limited in this period I am going to commit back this patch just once it is fully fixed. Reported by: dim, Nicholas Esborn
Diffstat (limited to 'sys/kern/kern_timeout.c')
-rw-r--r--sys/kern/kern_timeout.c142
1 files changed, 23 insertions, 119 deletions
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index 1984d44..fa04087 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -56,10 +56,6 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/smp.h>
-#ifdef SMP
-#include <machine/cpu.h>
-#endif
-
SDT_PROVIDER_DEFINE(callout_execute);
SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start);
SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0,
@@ -111,15 +107,11 @@ struct callout_cpu {
struct callout *cc_next;
struct callout *cc_curr;
void *cc_cookie;
- void (*cc_migration_func)(void *);
- void *cc_migration_arg;
int cc_ticks;
int cc_softticks;
int cc_cancel;
int cc_waiting;
int cc_firsttick;
- int cc_migration_cpu;
- int cc_migration_ticks;
};
#ifdef SMP
@@ -131,10 +123,8 @@ struct callout_cpu cc_cpu;
#define CC_CPU(cpu) &cc_cpu
#define CC_SELF() &cc_cpu
#endif
-#define CPUBLOCK MAXCPU
#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
-#define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
static int timeout_cpu;
void (*callout_new_inserted)(int cpu, int ticks) = NULL;
@@ -198,7 +188,6 @@ callout_cpu_init(struct callout_cpu *cc)
for (i = 0; i < callwheelsize; i++) {
TAILQ_INIT(&cc->cc_callwheel[i]);
}
- cc->cc_migration_cpu = CPUBLOCK;
if (cc->cc_callout == NULL)
return;
for (i = 0; i < ncallout; i++) {
@@ -322,13 +311,6 @@ callout_lock(struct callout *c)
for (;;) {
cpu = c->c_cpu;
-#ifdef SMP
- if (cpu == CPUBLOCK) {
- while (c->c_cpu == CPUBLOCK)
- cpu_spinwait();
- continue;
- }
-#endif
cc = CC_CPU(cpu);
CC_LOCK(cc);
if (cpu == c->c_cpu)
@@ -338,29 +320,6 @@ callout_lock(struct callout *c)
return (cc);
}
-static void
-callout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks,
- void (*func)(void *), void *arg, int cpu)
-{
-
- CC_LOCK_ASSERT(cc);
-
- if (to_ticks <= 0)
- to_ticks = 1;
- c->c_arg = arg;
- c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
- c->c_func = func;
- c->c_time = ticks + to_ticks;
- TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask],
- c, c_links.tqe);
- if ((c->c_time - cc->cc_firsttick) < 0 &&
- callout_new_inserted != NULL) {
- cc->cc_firsttick = c->c_time;
- (*callout_new_inserted)(cpu,
- to_ticks + (ticks - cc->cc_ticks));
- }
-}
-
/*
* The callout mechanism is based on the work of Adam M. Costello and
* George Varghese, published in a technical report entitled "Redesigning
@@ -431,14 +390,11 @@ softclock(void *arg)
steps = 0;
}
} else {
- struct callout_cpu *new_cc;
void (*c_func)(void *);
- void (*new_func)(void *);
- void *c_arg, *new_arg;
+ void *c_arg;
struct lock_class *class;
struct lock_object *c_lock;
int c_flags, sharedlock;
- int new_cpu, new_ticks;
cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
TAILQ_REMOVE(bucket, c, c_links.tqe);
@@ -540,40 +496,7 @@ softclock(void *arg)
c_links.sle);
}
cc->cc_curr = NULL;
-
- /*
- * If the callout was scheduled for
- * migration just perform it now.
- */
- if (cc->cc_migration_cpu != CPUBLOCK) {
-
- /*
- * There must not be any waiting
- * thread now because the callout
- * has a blocked CPU.
- * Also, the callout must not be
- * freed, but that is not easy to
- * assert.
- */
- MPASS(cc->cc_waiting == 0);
- new_cpu = cc->cc_migration_cpu;
- new_ticks = cc->cc_migration_ticks;
- new_func = cc->cc_migration_func;
- new_arg = cc->cc_migration_arg;
- cc->cc_migration_cpu = CPUBLOCK;
- cc->cc_migration_ticks = 0;
- cc->cc_migration_func = NULL;
- cc->cc_migration_arg = NULL;
- CC_UNLOCK(cc);
- new_cc = CC_CPU(new_cpu);
- CC_LOCK(new_cc);
- MPASS(c->c_cpu == CPUBLOCK);
- c->c_cpu = new_cpu;
- callout_cc_add(c, new_cc, new_ticks,
- new_func, new_arg, new_cpu);
- CC_UNLOCK(new_cc);
- CC_LOCK(cc);
- } else if (cc->cc_waiting) {
+ if (cc->cc_waiting) {
/*
* There is someone waiting
* for the callout to complete.
@@ -694,6 +617,7 @@ callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *),
*/
if (c->c_flags & CALLOUT_LOCAL_ALLOC)
cpu = c->c_cpu;
+retry:
cc = callout_lock(c);
if (cc->cc_curr == c) {
/*
@@ -725,34 +649,31 @@ callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *),
cancelled = 1;
c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
}
-#ifdef SMP
/*
- * If the lock must migrate we have to block the callout locking
- * until migration is completed.
- * If the callout is currently running, just defer the migration
- * to a more appropriate moment.
+ * If the lock must migrate we have to check the state again as
+ * we can't hold both the new and old locks simultaneously.
*/
if (c->c_cpu != cpu) {
- c->c_cpu = CPUBLOCK;
- if (cc->cc_curr == c) {
- cc->cc_migration_cpu = cpu;
- cc->cc_migration_ticks = to_ticks;
- cc->cc_migration_func = ftn;
- cc->cc_migration_arg = arg;
- CTR5(KTR_CALLOUT,
- "migration of %p func %p arg %p in %d to %u deferred",
- c, c->c_func, c->c_arg, to_ticks, cpu);
- CC_UNLOCK(cc);
- return (cancelled);
- }
- CC_UNLOCK(cc);
- cc = CC_CPU(cpu);
- CC_LOCK(cc);
c->c_cpu = cpu;
+ CC_UNLOCK(cc);
+ goto retry;
}
-#endif
- callout_cc_add(c, cc, to_ticks, ftn, arg, cpu);
+ if (to_ticks <= 0)
+ to_ticks = 1;
+
+ c->c_arg = arg;
+ c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
+ c->c_func = ftn;
+ c->c_time = ticks + to_ticks;
+ TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask],
+ c, c_links.tqe);
+ if ((c->c_time - cc->cc_firsttick) < 0 &&
+ callout_new_inserted != NULL) {
+ cc->cc_firsttick = c->c_time;
+ (*callout_new_inserted)(cpu,
+ to_ticks + (ticks - cc->cc_ticks));
+ }
CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
CC_UNLOCK(cc);
@@ -780,7 +701,7 @@ _callout_stop_safe(c, safe)
struct callout *c;
int safe;
{
- struct callout_cpu *cc, *old_cc;
+ struct callout_cpu *cc;
struct lock_class *class;
int use_lock, sq_locked;
@@ -800,23 +721,8 @@ _callout_stop_safe(c, safe)
use_lock = 0;
sq_locked = 0;
- old_cc = NULL;
again:
cc = callout_lock(c);
-
- /*
- * If the callout was migrating while the callout cpu lock was
- * dropped, just drop the sleepqueue lock and check the states
- * again.
- */
- if (sq_locked != 0 && cc != old_cc) {
- CC_UNLOCK(cc);
- sleepq_release(&old_cc->cc_waiting);
- sq_locked = 0;
- old_cc = NULL;
- goto again;
- }
-
/*
* If the callout isn't pending, it's not on the queue, so
* don't attempt to remove it from the queue. We can try to
@@ -868,7 +774,6 @@ again:
CC_UNLOCK(cc);
sleepq_lock(&cc->cc_waiting);
sq_locked = 1;
- old_cc = cc;
goto again;
}
cc->cc_waiting = 1;
@@ -879,7 +784,6 @@ again:
SLEEPQ_SLEEP, 0);
sleepq_wait(&cc->cc_waiting, 0);
sq_locked = 0;
- old_cc = NULL;
/* Reacquire locks previously released. */
PICKUP_GIANT();
OpenPOWER on IntegriCloud