summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormav <mav@FreeBSD.org>2013-09-24 07:03:16 +0000
committermav <mav@FreeBSD.org>2013-09-24 07:03:16 +0000
commita836e0c53672e740737d2dff33568f7a0bf2974e (patch)
tree83ada6af34b29d4844c503e4ca7b8e7b2eecddf0
parent9f82ff2278980f33d887a2bdcbf1ad255fec1596 (diff)
downloadFreeBSD-src-a836e0c53672e740737d2dff33568f7a0bf2974e.zip
FreeBSD-src-a836e0c53672e740737d2dff33568f7a0bf2974e.tar.gz
Make load average sampling asynchronous to hardclock ticks. This improves
measurement of load caused by time-related events still using hardclock. For example, without this change dummynet, scheduling events each hardclock tick, was always miscounted as load of 1. There is still aliasing with events delayed by the new precision mechanism, but it probably can't be avoided without moving this sampling from using callout to some lower-level code or handling it in some other special way. Reviewed by: davide Approved by: re (marius)
-rw-r--r--sys/kern/kern_synch.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 0a400e9..047fa46 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -570,8 +570,8 @@ loadav(void *arg)
* run at regular intervals.
*/
callout_reset_sbt(&loadav_callout,
- tick_sbt * (hz * 4 + (int)(random() % (hz * 2 + 1))), 0,
- loadav, NULL, C_DIRECT_EXEC | C_HARDCLOCK);
+ SBT_1US * (4000000 + (int)(random() % 2000001)), SBT_1US,
+ loadav, NULL, C_DIRECT_EXEC | C_PREL(32));
}
/* ARGSUSED */
OpenPOWER on IntegriCloud