summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_clock.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2008-04-02 11:20:30 +0000
committerjeff <jeff@FreeBSD.org>2008-04-02 11:20:30 +0000
commitb0655179358fbd6917b144860a6e1b180e5bc26b (patch)
treee234ebf7577ff7cdc7c7c1d947af6e559d370941 /sys/kern/kern_clock.c
parentc951adca24b1bfebb3ef74a81858fd6ed52f1713 (diff)
downloadFreeBSD-src-b0655179358fbd6917b144860a6e1b180e5bc26b.zip
FreeBSD-src-b0655179358fbd6917b144860a6e1b180e5bc26b.tar.gz
Implement per-cpu callout threads, wheels, and locks.
- Move callout thread creation from kern_intr.c to kern_timeout.c - Call callout_tick() on every processor via hardclock_cpu() rather than inspecting callout internal details in kern_clock.c. - Remove callout implementation details from callout.h - Package up all of the global variables into a per-cpu callout structure. - Start one thread per-cpu. Threads are not strictly bound. They prefer to execute on the native cpu but may migrate temporarily if interrupts are starving callout processing. - Run all callouts by default in the thread for cpu0 to maintain current ordering and concurrency guarantees. Many consumers may not properly handle concurrent execution. - The new callout_reset_on() api allows specifying a particular cpu to execute the callout on. This may migrate a callout to a new cpu. callout_reset() schedules on the last assigned cpu while callout_reset_curcpu() schedules on the current cpu. Reviewed by: phk Sponsored by: Nokia
Diffstat (limited to 'sys/kern/kern_clock.c')
-rw-r--r--sys/kern/kern_clock.c25
1 files changed, 2 insertions, 23 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index ded86c8..1c8e7bd 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -292,6 +292,7 @@ hardclock_cpu(int usermode)
if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
#endif
+ callout_tick();
}
/*
@@ -300,10 +301,9 @@ hardclock_cpu(int usermode)
void
hardclock(int usermode, uintfptr_t pc)
{
- int need_softclock = 0;
+ atomic_add_int((volatile int *)&ticks, 1);
hardclock_cpu(usermode);
-
tc_ticktock();
/*
* If no separate statistics clock is available, run it from here.
@@ -314,30 +314,9 @@ hardclock(int usermode, uintfptr_t pc)
profclock(usermode, pc);
statclock(usermode);
}
-
#ifdef DEVICE_POLLING
hardclock_device_poll(); /* this is very short and quick */
#endif /* DEVICE_POLLING */
-
- /*
- * Process callouts at a very low cpu priority, so we don't keep the
- * relatively high clock interrupt priority any longer than necessary.
- */
- mtx_lock_spin_flags(&callout_lock, MTX_QUIET);
- ticks++;
- if (!TAILQ_EMPTY(&callwheel[ticks & callwheelmask])) {
- need_softclock = 1;
- } else if (softticks + 1 == ticks)
- ++softticks;
- mtx_unlock_spin_flags(&callout_lock, MTX_QUIET);
-
- /*
- * swi_sched acquires the thread lock, so we don't want to call it
- * with callout_lock held; incorrect locking order.
- */
- if (need_softclock)
- swi_sched(softclock_ih, 0);
-
#ifdef SW_WATCHDOG
if (watchdog_enabled > 0 && --watchdog_ticks <= 0)
watchdog_fire();
OpenPOWER on IntegriCloud