summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_timeout.c
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2012-12-05 19:02:22 +0000
committerkib <kib@FreeBSD.org>2012-12-05 19:02:22 +0000
commitefc06bd8013ad630fe9083f423265d89b9d717ff (patch)
tree8f3d09a6d5dadbd1bc9f6e8fc4a41fc802629004 /sys/kern/kern_timeout.c
parent97d8ae3890bf8dc9a8e2b6a9aab818d9a7778b07 (diff)
downloadFreeBSD-src-efc06bd8013ad630fe9083f423265d89b9d717ff.zip
FreeBSD-src-efc06bd8013ad630fe9083f423265d89b9d717ff.tar.gz
The softclock_call_cc() is executing with the callout already removed
from the callwheel. Calculate the cc->cc_next before removing the callout, otherwise the code followed the invalid tailq links. After this, make softclock_call_cc() return void, since it always return cc->cc_next, which is immediately available to the softclock() anyway. This also allows to eliminate a label under #ifdef SMP. Remove the assignment of cc->cc_next from callout_cc_del(), since the function is called with the callout already removed from callwheel. If cancelling the migration, also clear the CALLOUT_DFRMIGRATION flag. Postpone the free of the timeout(9) allocated callouts after the migration checks are done. Add some more strict asserts about the state of the callout in callout_call_cc(). Reviewed by: attilio Reported and tested by: pho (previous version) MFC after: 2 weeks
Diffstat (limited to 'sys/kern/kern_timeout.c')
-rw-r--r--sys/kern/kern_timeout.c61
1 files changed, 32 insertions, 29 deletions
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index a7aeb34..c95c884 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -439,15 +439,13 @@ static void
callout_cc_del(struct callout *c, struct callout_cpu *cc)
{
- if (cc->cc_next == c)
- cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
- if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
- c->c_func = NULL;
- SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
- }
+ if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0)
+ return;
+ c->c_func = NULL;
+ SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
}
-static struct callout *
+static void
softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
int *lockcalls, int *gcalls)
{
@@ -469,7 +467,9 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
static timeout_t *lastfunc;
#endif
- cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
+ KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) ==
+ (CALLOUT_PENDING | CALLOUT_ACTIVE),
+ ("softclock_call_cc: pend|act %p %x", c, c->c_flags));
class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1;
c_lock = c->c_lock;
@@ -537,20 +537,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
class->lc_unlock(c_lock);
skip:
CC_LOCK(cc);
- /*
- * If the current callout is locally allocated (from
- * timeout(9)) then put it on the freelist.
- *
- * Note: we need to check the cached copy of c_flags because
- * if it was not local, then it's not safe to deref the
- * callout pointer.
- */
- if (c_flags & CALLOUT_LOCAL_ALLOC) {
- KASSERT(c->c_flags == CALLOUT_LOCAL_ALLOC,
- ("corrupted callout"));
- c->c_func = NULL;
- SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
- }
+ KASSERT(cc->cc_curr == c, ("mishandled cc_curr"));
cc->cc_curr = NULL;
if (cc->cc_waiting) {
/*
@@ -559,13 +546,17 @@ skip:
* If the callout was scheduled for
* migration just cancel it.
*/
- if (cc_cme_migrating(cc))
+ if (cc_cme_migrating(cc)) {
cc_cme_cleanup(cc);
+ c->c_flags &= ~CALLOUT_DFRMIGRATION;
+ }
cc->cc_waiting = 0;
CC_UNLOCK(cc);
wakeup(&cc->cc_waiting);
CC_LOCK(cc);
} else if (cc_cme_migrating(cc)) {
+ KASSERT((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0,
+ ("Migrating legacy callout %p", c));
#ifdef SMP
/*
* If the callout was scheduled for
@@ -585,7 +576,7 @@ skip:
"deferred cancelled %p func %p arg %p",
c, new_func, new_arg);
callout_cc_del(c, cc);
- goto nextc;
+ return;
}
c->c_flags &= ~CALLOUT_DFRMIGRATION;
@@ -604,10 +595,18 @@ skip:
panic("migration should not happen");
#endif
}
-#ifdef SMP
-nextc:
-#endif
- return (cc->cc_next);
+ /*
+ * If the current callout is locally allocated (from
+ * timeout(9)) then put it on the freelist.
+ *
+ * Note: we need to check the cached copy of c_flags because
+ * if it was not local, then it's not safe to deref the
+ * callout pointer.
+ */
+ KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 ||
+ c->c_flags == CALLOUT_LOCAL_ALLOC,
+ ("corrupted callout"));
+ callout_cc_del(c, cc);
}
/*
@@ -674,10 +673,12 @@ softclock(void *arg)
steps = 0;
}
} else {
+ cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
TAILQ_REMOVE(bucket, c, c_links.tqe);
- c = softclock_call_cc(c, cc, &mpcalls,
+ softclock_call_cc(c, cc, &mpcalls,
&lockcalls, &gcalls);
steps = 0;
+ c = cc->cc_next;
}
}
}
@@ -1022,6 +1023,8 @@ again:
CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
c, c->c_func, c->c_arg);
+ if (cc->cc_next == c)
+ cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
c_links.tqe);
callout_cc_del(c, cc);
OpenPOWER on IntegriCloud