summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_clock.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2000-10-06 02:20:21 +0000
committerjhb <jhb@FreeBSD.org>2000-10-06 02:20:21 +0000
commitfd275a78bd168fffc26552c4b2debf6f105a43ed (patch)
tree4da054c14beb1594089f5d38bd0b743156b65897 /sys/kern/kern_clock.c
parente638f1a0983968d7993bba04b23cd203d45680ce (diff)
downloadFreeBSD-src-fd275a78bd168fffc26552c4b2debf6f105a43ed.zip
FreeBSD-src-fd275a78bd168fffc26552c4b2debf6f105a43ed.tar.gz
- Change fast interrupts on x86 to push a full interrupt frame and to
return through doreti to handle ast's. This is necessary for the clock interrupts to work properly. - Change the clock interrupts on the x86 to be fast instead of threaded. This is needed because both hardclock() and statclock() need to run in the context of the current process, not in a separate thread context. - Kill the prevproc hack as it is no longer needed. - We really need Giant when we call psignal(), but we don't want to block during the clock interrupt. Instead, use two p_flag's in the proc struct to mark the current process as having a pending SIGVTALRM or a SIGPROF and let them be delivered during ast() when hardclock() has finished running. - Remove CLKF_BASEPRI, which was #ifdef'd out on the x86 anyways. It was broken on the x86 if it was turned on since cpl is gone. It's only use was to bogusly run softclock() directly during hardclock() rather than scheduling an SWI. - Remove the COM_LOCK simplelock and replace it with a clock_lock spin mutex. Since the spin mutex already handles disabling/restoring interrupts appropriately, this also lets us axe all the *_intr() fu. - Back out the hacks in the APIC_IO x86 cpu_initclocks() code to use temporary fast interrupts for the APIC trial. - Add two new process flags P_ALRMPEND and P_PROFPEND to mark the pending signals in hardclock() that are to be delivered in ast(). Submitted by: jakeb (making statclock safe in a fast interrupt) Submitted by: cp (concept of delaying signals until ast())
Diffstat (limited to 'sys/kern/kern_clock.c')
-rw-r--r--sys/kern/kern_clock.c42
1 files changed, 24 insertions, 18 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index bc6bf06..6b3153f 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -58,7 +58,9 @@
#include <sys/sysctl.h>
#include <machine/cpu.h>
+#include <machine/ipl.h>
#include <machine/limits.h>
+#include <machine/mutex.h>
#include <machine/smp.h>
#ifdef GPROF
@@ -161,11 +163,15 @@ hardclock(frame)
pstats = p->p_stats;
if (CLKF_USERMODE(frame) &&
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
- psignal(p, SIGVTALRM);
+ itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
+ p->p_flag |= P_ALRMPEND;
+ aston();
+ }
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
- psignal(p, SIGPROF);
+ itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
+ p->p_flag |= P_PROFPEND;
+ aston();
+ }
}
#if defined(SMP) && defined(BETTER_CLOCK)
@@ -186,15 +192,7 @@ hardclock(frame)
* relatively high clock interrupt priority any longer than necessary.
*/
if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
- if (CLKF_BASEPRI(frame)) {
- /*
- * Save the overhead of a software interrupt;
- * it will happen as soon as we return, so do it now.
- */
- (void)splsoftclock();
- softclock();
- } else
- setsoftclock();
+ setsoftclock();
} else if (softticks + 1 == ticks)
++softticks;
}
@@ -321,20 +319,24 @@ statclock(frame)
struct rusage *ru;
struct vmspace *vm;
+ mtx_enter(&sched_lock, MTX_SPIN);
+
if (CLKF_USERMODE(frame)) {
/*
* Came from user mode; CPU was in user state.
* If this process is being profiled, record the tick.
*/
- p = prevproc;
+ p = curproc;
if (p->p_flag & P_PROFIL)
addupc_intr(p, CLKF_PC(frame), 1);
#if defined(SMP) && defined(BETTER_CLOCK)
if (stathz != 0)
forward_statclock(pscnt);
#endif
- if (--pscnt > 0)
+ if (--pscnt > 0) {
+ mtx_exit(&sched_lock, MTX_SPIN);
return;
+ }
/*
* Charge the time as appropriate.
*/
@@ -361,8 +363,10 @@ statclock(frame)
if (stathz != 0)
forward_statclock(pscnt);
#endif
- if (--pscnt > 0)
+ if (--pscnt > 0) {
+ mtx_exit(&sched_lock, MTX_SPIN);
return;
+ }
/*
* Came from kernel mode, so we were:
* - handling an interrupt,
@@ -375,8 +379,8 @@ statclock(frame)
* so that we know how much of its real time was spent
* in ``non-process'' (i.e., interrupt) work.
*/
- p = prevproc;
- if (p->p_ithd) {
+ p = curproc;
+ if ((p->p_ithd != NULL) || CLKF_INTR(frame)) {
p->p_iticks++;
cp_time[CP_INTR]++;
} else {
@@ -402,6 +406,8 @@ statclock(frame)
if (ru->ru_maxrss < rss)
ru->ru_maxrss = rss;
}
+
+ mtx_exit(&sched_lock, MTX_SPIN);
}
/*
OpenPOWER on IntegriCloud