summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_clock.c
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2003-02-03 17:53:15 +0000
committerjake <jake@FreeBSD.org>2003-02-03 17:53:15 +0000
commit6b3763a173b458d3138f8a5ae81d7574349cc99d (patch)
tree4ac2b707510b8d738855f9fa4a6576e6ded49a77 /sys/kern/kern_clock.c
parent9689f0580db3e8ffcfba46b99c0b3a370eb9524c (diff)
downloadFreeBSD-src-6b3763a173b458d3138f8a5ae81d7574349cc99d.zip
FreeBSD-src-6b3763a173b458d3138f8a5ae81d7574349cc99d.tar.gz
Split statclock into statclock and profclock, and made the method for driving
statclock based on profhz when profiling is enabled MD, since most platforms don't use this anyway. This removes the need for statclock_process, whose only purpose was to subdivide profhz, and gets the profiling clock running outside of sched_lock on platforms that implement suswintr. Also changed the interface for starting and stopping the profiling clock to do just that, instead of changing the rate of statclock, since they can now be separate. Reviewed by: jhb, tmm Tested on: i386, sparc64
Diffstat (limited to 'sys/kern/kern_clock.c')
-rw-r--r--sys/kern/kern_clock.c156
1 files changed, 66 insertions, 90 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index c5e1b4a..5c6756b 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -122,10 +122,9 @@ long tk_rawcc;
int stathz;
int profhz;
-static int profprocs;
+int profprocs;
int ticks;
-static int psdiv, pscnt; /* prof => stat divider */
-int psratio; /* ratio: prof / stat */
+int psratio;
/*
* Initialize clock frequencies and start both clocks running.
@@ -141,7 +140,6 @@ initclocks(dummy)
* Set divisors to 1 (normal case) and let the machine-specific
* code do its bit.
*/
- psdiv = pscnt = 1;
cpu_initclocks();
#ifdef DEVICE_POLLING
@@ -157,32 +155,27 @@ initclocks(dummy)
}
/*
- * Each time the real-time timer fires, this function is called on all CPUs
- * with each CPU passing in its curthread as the first argument. If possible
- * a nice optimization in the future would be to allow the CPU receiving the
- * actual real-time timer interrupt to call this function on behalf of the
- * other CPUs rather than sending an IPI to all other CPUs so that they
- * can call this function. Note that hardclock() calls hardclock_process()
- * for the CPU receiving the timer interrupt, so only the other CPUs in the
- * system need to call this function (or have it called on their behalf.
+ * Each time the real-time timer fires, this function is called on all CPUs.
+ * Note that hardclock() calls hardclock_process() for the boot CPU, so only
+ * the other CPUs in the system need to call this function.
*/
void
-hardclock_process(td, user)
- struct thread *td;
- int user;
+hardclock_process(frame)
+ register struct clockframe *frame;
{
struct pstats *pstats;
+ struct thread *td = curthread;
struct proc *p = td->td_proc;
/*
* Run current process's virtual and profile time, as needed.
*/
- mtx_assert(&sched_lock, MA_OWNED);
+ mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
if (p->p_flag & P_KSES) {
/* XXXKSE What to do? */
} else {
pstats = p->p_stats;
- if (user &&
+ if (CLKF_USERMODE(frame) &&
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
p->p_sflag |= PS_ALRMPEND;
@@ -194,6 +187,7 @@ hardclock_process(td, user)
td->td_kse->ke_flags |= KEF_ASTPENDING;
}
}
+ mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
}
/*
@@ -206,9 +200,7 @@ hardclock(frame)
int need_softclock = 0;
CTR0(KTR_CLK, "hardclock fired");
- mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
- hardclock_process(curthread, CLKF_USERMODE(frame));
- mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
+ hardclock_process(frame);
tc_ticktock();
/*
@@ -216,8 +208,10 @@ hardclock(frame)
*
* XXX: this only works for UP
*/
- if (stathz == 0)
+ if (stathz == 0) {
+ profclock(frame);
statclock(frame);
+ }
#ifdef DEVICE_POLLING
hardclock_device_poll(); /* this is very short and quick */
@@ -312,7 +306,6 @@ void
startprofclock(p)
register struct proc *p;
{
- int s;
/*
* XXX; Right now sched_lock protects statclock(), but perhaps
@@ -322,12 +315,8 @@ startprofclock(p)
mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_PROFIL) == 0) {
p->p_sflag |= PS_PROFIL;
- if (++profprocs == 1 && stathz != 0) {
- s = splstatclock();
- psdiv = pscnt = psratio;
- setstatclockrate(profhz);
- splx(s);
- }
+ if (++profprocs == 1)
+ cpu_startprofclock();
}
mtx_unlock_spin(&sched_lock);
}
@@ -339,57 +328,41 @@ void
stopprofclock(p)
register struct proc *p;
{
- int s;
mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_PROFIL) {
p->p_sflag &= ~PS_PROFIL;
- if (--profprocs == 0 && stathz != 0) {
- s = splstatclock();
- psdiv = pscnt = 1;
- setstatclockrate(stathz);
- splx(s);
- }
+ if (--profprocs == 0)
+ cpu_stopprofclock();
}
mtx_unlock_spin(&sched_lock);
}
/*
- * Do process and kernel statistics. Most of the statistics are only
+ * Statistics clock. Grab profile sample, and if divider reaches 0,
+ * do process and kernel statistics. Most of the statistics are only
* used by user-level statistics programs. The main exceptions are
- * ke->ke_uticks, p->p_sticks, p->p_iticks, and p->p_estcpu. This function
- * should be called by all CPUs in the system for each statistics clock
- * interrupt. See the description of hardclock_process for more detail on
- * this function's relationship to statclock.
+ * ke->ke_uticks, p->p_sticks, p->p_iticks, and p->p_estcpu.
+ * This should be called by all active processors.
*/
void
-statclock_process(ke, pc, user)
- struct kse *ke;
- register_t pc;
- int user;
+statclock(frame)
+ register struct clockframe *frame;
{
-#ifdef GPROF
- struct gmonparam *g;
- int i;
-#endif
struct pstats *pstats;
- long rss;
struct rusage *ru;
struct vmspace *vm;
- struct proc *p = ke->ke_proc;
- struct thread *td = ke->ke_thread; /* current thread */
+ struct thread *td;
+ struct kse *ke;
+ struct proc *p;
+ long rss;
- KASSERT(ke == curthread->td_kse, ("statclock_process: td != curthread"));
- mtx_assert(&sched_lock, MA_OWNED);
- if (user) {
- /*
- * Came from user mode; CPU was in user state.
- * If this process is being profiled, record the tick.
- */
- if (p->p_sflag & PS_PROFIL)
- addupc_intr(ke, pc, 1);
- if (pscnt < psdiv)
- return;
+ td = curthread;
+ p = td->td_proc;
+
+ mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
+ ke = td->td_kse;
+ if (CLKF_USERMODE(frame)) {
/*
* Charge the time as appropriate.
*/
@@ -401,21 +374,6 @@ statclock_process(ke, pc, user)
else
cp_time[CP_USER]++;
} else {
-#ifdef GPROF
- /*
- * Kernel statistics are just like addupc_intr, only easier.
- */
- g = &_gmonparam;
- if (g->state == GMON_PROF_ON) {
- i = pc - g->lowpc;
- if (i < g->textsize) {
- i /= HISTFRACTION * sizeof(*g->kcount);
- g->kcount[i]++;
- }
- }
-#endif
- if (pscnt < psdiv)
- return;
/*
* Came from kernel mode, so we were:
* - handling an interrupt,
@@ -455,25 +413,43 @@ statclock_process(ke, pc, user)
if (ru->ru_maxrss < rss)
ru->ru_maxrss = rss;
}
+ mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
}
-/*
- * Statistics clock. Grab profile sample, and if divider reaches 0,
- * do process and kernel statistics. Most of the statistics are only
- * used by user-level statistics programs. The main exceptions are
- * ke->ke_uticks, p->p_sticks, p->p_iticks, and p->p_estcpu.
- */
void
-statclock(frame)
+profclock(frame)
register struct clockframe *frame;
{
+ struct thread *td;
+#ifdef GPROF
+ struct gmonparam *g;
+ int i;
+#endif
- CTR0(KTR_CLK, "statclock fired");
- mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
- if (--pscnt == 0)
- pscnt = psdiv;
- statclock_process(curthread->td_kse, CLKF_PC(frame), CLKF_USERMODE(frame));
- mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
+ if (CLKF_USERMODE(frame)) {
+ /*
+ * Came from user mode; CPU was in user state.
+ * If this process is being profiled, record the tick.
+ */
+ td = curthread;
+ if (td->td_proc->p_sflag & PS_PROFIL)
+ addupc_intr(td->td_kse, CLKF_PC(frame), 1);
+ }
+#ifdef GPROF
+ else {
+ /*
+ * Kernel statistics are just like addupc_intr, only easier.
+ */
+ g = &_gmonparam;
+ if (g->state == GMON_PROF_ON) {
+ i = CLKF_PC(frame) - g->lowpc;
+ if (i < g->textsize) {
+ i /= HISTFRACTION * sizeof(*g->kcount);
+ g->kcount[i]++;
+ }
+ }
+ }
+#endif
}
/*
OpenPOWER on IntegriCloud