summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2008-07-19 05:18:29 +0000
committerjeff <jeff@FreeBSD.org>2008-07-19 05:18:29 +0000
commit7ff6e9903f13d9d6684f9c61e286c4c653181385 (patch)
treea9f1ede82a397fda5e5784018b1e2ef9521a2641
parentb2f69d1b1e36e6bc223909dcc50635e58a5b2389 (diff)
downloadFreeBSD-src-7ff6e9903f13d9d6684f9c61e286c4c653181385.zip
FreeBSD-src-7ff6e9903f13d9d6684f9c61e286c4c653181385.tar.gz
Fix a race which could result in some timeout buckets being skipped.
- When a tick occurs on a cpu, iterate from cs_softticks until ticks. The per-cpu tick processing happens asynchronously with the actual adjustment of the 'ticks' variable. Sometimes the results may be visible before the local call and sometimes after. Previously this could cause a one tick window where we didn't evaluate the bucket. - In softclock fetch curticks before incrementing cc_softticks so we don't skip insertions which were made for the current time. Sponsored by: Nokia
-rw-r--r--sys/kern/kern_timeout.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index fc30bd8..ec540c5 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -222,19 +222,24 @@ SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL)
void
callout_tick(void)
{
- int need_softclock = 0;
struct callout_cpu *cc;
+ int need_softclock;
+ int bucket;
/*
* Process callouts at a very low cpu priority, so we don't keep the
* relatively high clock interrupt priority any longer than necessary.
*/
+ need_softclock = 0;
cc = CC_SELF();
mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
- if (!TAILQ_EMPTY(&cc->cc_callwheel[ticks & callwheelmask])) {
- need_softclock = 1;
- } else if (cc->cc_softticks + 1 == ticks)
- ++cc->cc_softticks;
+ for (; cc->cc_softticks < ticks; cc->cc_softticks++) {
+ bucket = cc->cc_softticks & callwheelmask;
+ if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) {
+ need_softclock = 1;
+ break;
+ }
+ }
mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
/*
* swi_sched acquires the thread lock, so we don't want to call it
@@ -308,12 +313,12 @@ softclock(void *arg)
cc = (struct callout_cpu *)arg;
CC_LOCK(cc);
while (cc->cc_softticks != ticks) {
- cc->cc_softticks++;
/*
* cc_softticks may be modified by hard clock, so cache
* it while we work on a given bucket.
*/
curticks = cc->cc_softticks;
+ cc->cc_softticks++;
bucket = &cc->cc_callwheel[curticks & callwheelmask];
c = TAILQ_FIRST(bucket);
while (c) {
OpenPOWER on IntegriCloud