summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_timeout.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2012-12-05 22:32:12 +0000
committerattilio <attilio@FreeBSD.org>2012-12-05 22:32:12 +0000
commit3145eea56c476d2082b5433a9c1e7b76cf2b15af (patch)
treeb928214909b12e15cdb44b7bc8d96d862ac946f5 /sys/kern/kern_timeout.c
parent61d867003f55a3bea36175d84810b929ae876619 (diff)
downloadFreeBSD-src-3145eea56c476d2082b5433a9c1e7b76cf2b15af.zip
FreeBSD-src-3145eea56c476d2082b5433a9c1e7b76cf2b15af.tar.gz
Fixup r243901:
- As the comment report, CALLOUT_LOCAL_ALLOC cannot be checked directly from the callout flags but might be checked by a cached value. Hence, do so before to actually remove the callout, when needed, in softclock_call_cc(). - In softclock_call_cc() also add a comment in the waiting and deferred migration case explaining that the dereference should be safe because of the migration dereference invariants. Additively: - In softclock_call_cc(), for the deferred migration case, move all the accesses to callout structure after the comment stating the callout must not be destroyed. - For consistency with this last tweak, use cached c_flags for the KASSERT() in the deferred migration case. It is not strictly necessary but this way all the callout accesses happen after the above mentioned comment, improving consistency. Pointy hat to: me Sponsored by: Isilon Systems / EMC Corporation Reviewed by: kib MFC after: 2 weeks X-MFC: 243901
Diffstat (limited to 'sys/kern/kern_timeout.c')
-rw-r--r--sys/kern/kern_timeout.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index c95c884..80933fa 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -548,6 +548,11 @@ skip:
*/
if (cc_cme_migrating(cc)) {
cc_cme_cleanup(cc);
+
+ /*
+ * It should be assert here that the callout is not
+ * destroyed but that is not easy.
+ */
c->c_flags &= ~CALLOUT_DFRMIGRATION;
}
cc->cc_waiting = 0;
@@ -555,7 +560,7 @@ skip:
wakeup(&cc->cc_waiting);
CC_LOCK(cc);
} else if (cc_cme_migrating(cc)) {
- KASSERT((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0,
+ KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0,
("Migrating legacy callout %p", c));
#ifdef SMP
/*
@@ -569,7 +574,10 @@ skip:
cc_cme_cleanup(cc);
/*
- * Handle deferred callout stops
+ * It should be assert here that the callout is not destroyed
+ * but that is not easy.
+ *
+ * As first thing, handle deferred callout stops.
*/
if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) {
CTR3(KTR_CALLOUT,
@@ -578,14 +586,8 @@ skip:
callout_cc_del(c, cc);
return;
}
-
c->c_flags &= ~CALLOUT_DFRMIGRATION;
- /*
- * It should be assert here that the
- * callout is not destroyed but that
- * is not easy.
- */
new_cc = callout_cpu_switch(c, cc, new_cpu);
callout_cc_add(c, new_cc, new_ticks, new_func, new_arg,
new_cpu);
@@ -606,7 +608,8 @@ skip:
KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 ||
c->c_flags == CALLOUT_LOCAL_ALLOC,
("corrupted callout"));
- callout_cc_del(c, cc);
+ if (c_flags & CALLOUT_LOCAL_ALLOC)
+ callout_cc_del(c, cc);
}
/*
OpenPOWER on IntegriCloud