summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2003-02-03 17:53:15 +0000
committerjake <jake@FreeBSD.org>2003-02-03 17:53:15 +0000
commit6b3763a173b458d3138f8a5ae81d7574349cc99d (patch)
tree4ac2b707510b8d738855f9fa4a6576e6ded49a77 /sys
parent9689f0580db3e8ffcfba46b99c0b3a370eb9524c (diff)
downloadFreeBSD-src-6b3763a173b458d3138f8a5ae81d7574349cc99d.zip
FreeBSD-src-6b3763a173b458d3138f8a5ae81d7574349cc99d.tar.gz
Split statclock into statclock and profclock, and made the method for driving
statclock based on profhz when profiling is enabled MD, since most platforms don't use this anyway. This removes the need for statclock_process, whose only purpose was to subdivide profhz, and gets the profiling clock running outside of sched_lock on platforms that implement suswintr. Also changed the interface for starting and stopping the profiling clock to do just that, instead of changing the rate of statclock, since they can now be separate. Reviewed by: jhb, tmm Tested on: i386, sparc64
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/clock.c17
-rw-r--r--sys/alpha/alpha/interrupt.c17
-rw-r--r--sys/amd64/amd64/apic_vector.S8
-rw-r--r--sys/amd64/amd64/critical.c17
-rw-r--r--sys/amd64/amd64/mp_machdep.c19
-rw-r--r--sys/amd64/amd64/mptable.c19
-rw-r--r--sys/amd64/amd64/tsc.c28
-rw-r--r--sys/amd64/include/clock.h2
-rw-r--r--sys/amd64/include/mptable.h19
-rw-r--r--sys/amd64/include/smp.h4
-rw-r--r--sys/amd64/isa/clock.c28
-rw-r--r--sys/i386/i386/apic_vector.s8
-rw-r--r--sys/i386/i386/critical.c17
-rw-r--r--sys/i386/i386/mp_machdep.c19
-rw-r--r--sys/i386/i386/mptable.c19
-rw-r--r--sys/i386/i386/tsc.c28
-rw-r--r--sys/i386/include/clock.h2
-rw-r--r--sys/i386/include/mptable.h19
-rw-r--r--sys/i386/include/smp.h4
-rw-r--r--sys/i386/isa/apic_vector.s8
-rw-r--r--sys/i386/isa/clock.c28
-rw-r--r--sys/ia64/ia64/clock.c13
-rw-r--r--sys/ia64/ia64/interrupt.c17
-rw-r--r--sys/isa/atrtc.c28
-rw-r--r--sys/kern/kern_clock.c156
-rw-r--r--sys/pc98/cbus/clock.c17
-rw-r--r--sys/pc98/cbus/pcrtc.c17
-rw-r--r--sys/pc98/pc98/clock.c17
-rw-r--r--sys/powerpc/aim/clock.c7
-rw-r--r--sys/powerpc/powerpc/clock.c7
-rw-r--r--sys/sparc64/sparc64/clock.c8
-rw-r--r--sys/sparc64/sparc64/tick.c18
-rw-r--r--sys/sys/kernel.h1
-rw-r--r--sys/sys/systm.h7
34 files changed, 389 insertions, 254 deletions
diff --git a/sys/alpha/alpha/clock.c b/sys/alpha/alpha/clock.c
index 2d34a99..3b113d7 100644
--- a/sys/alpha/alpha/clock.c
+++ b/sys/alpha/alpha/clock.c
@@ -446,17 +446,18 @@ handleclock(void *arg)
hardclock(arg);
}
-/*
- * We assume newhz is either stathz or profhz, and that neither will
- * change after being set up above. Could recalculate intervals here
- * but that would be a drag.
- */
void
-setstatclockrate(newhz)
- int newhz;
+cpu_startprofclock(void)
+{
+
+ /* nothing to do */
+}
+
+void
+cpu_stopprofclock(void)
{
- /* nothing we can do */
+ /* nothing to do */
}
/*
diff --git a/sys/alpha/alpha/interrupt.c b/sys/alpha/alpha/interrupt.c
index 19cd144..88eba10 100644
--- a/sys/alpha/alpha/interrupt.c
+++ b/sys/alpha/alpha/interrupt.c
@@ -472,16 +472,19 @@ alpha_clock_interrupt(struct trapframe *framep)
#endif
(*platform.clockintr)(framep);
/* divide hz (1024) by 8 to get stathz (128) */
- if ((++schedclk2 & 0x7) == 0)
+ if ((++schedclk2 & 0x7) == 0) {
+ if (profprocs != 0)
+ profclock((struct clockframe *)framep);
statclock((struct clockframe *)framep);
+ }
#ifdef SMP
} else {
- mtx_lock_spin(&sched_lock);
- hardclock_process(curthread, TRAPF_USERMODE(framep));
- if ((schedclk2 & 0x7) == 0)
- statclock_process(curkse, TRAPF_PC(framep),
- TRAPF_USERMODE(framep));
- mtx_unlock_spin(&sched_lock);
+ hardclock_process((struct clockframe *)framep);
+ if ((schedclk2 & 0x7) == 0) {
+ if (profprocs != 0)
+ profclock((struct clockframe *)framep);
+ statclock((struct clockframe *)framep);
+ }
}
#endif
critical_exit();
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index f8e0321..e4b6ea5 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -365,7 +365,7 @@ Xinvlrng:
iret
/*
- * Forward hardclock to another CPU. Pushes a trapframe and calls
+ * Forward hardclock to another CPU. Pushes a clockframe and calls
* forwarded_hardclock().
*/
.text
@@ -389,14 +389,16 @@ Xhardclock:
jmp 10f
1:
incl TD_INTR_NESTING_LEVEL(%ebx)
+ pushl $0 /* XXX convert trapframe to clockframe */
call forwarded_hardclock
+ addl $4, %esp /* XXX convert clockframe to trapframe */
decl TD_INTR_NESTING_LEVEL(%ebx)
10:
MEXITCOUNT
jmp doreti
/*
- * Forward statclock to another CPU. Pushes a trapframe and calls
+ * Forward statclock to another CPU. Pushes a clockframe and calls
* forwarded_statclock().
*/
.text
@@ -422,7 +424,9 @@ Xstatclock:
jmp 10f
1:
incl TD_INTR_NESTING_LEVEL(%ebx)
+ pushl $0 /* XXX convert trapframe to clockframe */
call forwarded_statclock
+ addl $4, %esp /* XXX convert clockframe to trapframe */
decl TD_INTR_NESTING_LEVEL(%ebx)
10:
MEXITCOUNT
diff --git a/sys/amd64/amd64/critical.c b/sys/amd64/amd64/critical.c
index de91426..7a505e1 100644
--- a/sys/amd64/amd64/critical.c
+++ b/sys/amd64/amd64/critical.c
@@ -15,6 +15,7 @@
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/ucontext.h>
+#include <machine/clock.h>
#include <machine/critical.h>
#ifdef SMP
@@ -89,6 +90,10 @@ cpu_thread_link(struct thread *td)
void
i386_unpend(void)
{
+ struct clockframe frame;
+
+ frame.cf_cs = SEL_KPL;
+ frame.cf_eip = (register_t)i386_unpend;
KASSERT(curthread->td_critnest == 0, ("unpend critnest != 0"));
KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled1"));
curthread->td_critnest = 1;
@@ -131,15 +136,13 @@ i386_unpend(void)
PCPU_SET(spending, mask & ~(1 << irq));
switch(irq) {
case 0: /* bit 0 - hardclock */
- mtx_lock_spin(&sched_lock);
- hardclock_process(curthread, 0);
- mtx_unlock_spin(&sched_lock);
+ hardclock_process(&frame);
break;
case 1: /* bit 1 - statclock */
- mtx_lock_spin(&sched_lock);
- statclock_process(curthread->td_kse,
- (register_t)i386_unpend, 0);
- mtx_unlock_spin(&sched_lock);
+ if (profprocs != 0)
+ profclock(&frame);
+ if (pscnt == psdiv)
+ statclock(&frame);
break;
}
KASSERT((read_eflags() & PSL_I) == 0,
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 39d10d6..7960c51 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -63,6 +63,7 @@
#include <machine/apic.h>
#include <machine/atomic.h>
+#include <machine/clock.h>
#include <machine/cpu.h>
#include <machine/cpufunc.h>
#include <machine/mpapic.h>
@@ -2603,17 +2604,17 @@ ap_init(void)
* For statclock, we send an IPI to all CPU's to have them call this
* function.
*
- * WARNING! unpend() will call statclock_process() directly and skip this
+ * WARNING! unpend() will call statclock() directly and skip this
* routine.
*/
void
-forwarded_statclock(struct trapframe frame)
+forwarded_statclock(struct clockframe frame)
{
- mtx_lock_spin(&sched_lock);
- statclock_process(curthread->td_kse, TRAPF_PC(&frame),
- TRAPF_USERMODE(&frame));
- mtx_unlock_spin(&sched_lock);
+ if (profprocs != 0)
+ profclock(&frame);
+ if (pscnt == psdiv)
+ statclock(&frame);
}
void
@@ -2642,12 +2643,10 @@ forward_statclock(void)
* routine.
*/
void
-forwarded_hardclock(struct trapframe frame)
+forwarded_hardclock(struct clockframe frame)
{
- mtx_lock_spin(&sched_lock);
- hardclock_process(curthread, TRAPF_USERMODE(&frame));
- mtx_unlock_spin(&sched_lock);
+ hardclock_process(&frame);
}
void
diff --git a/sys/amd64/amd64/mptable.c b/sys/amd64/amd64/mptable.c
index 39d10d6..7960c51 100644
--- a/sys/amd64/amd64/mptable.c
+++ b/sys/amd64/amd64/mptable.c
@@ -63,6 +63,7 @@
#include <machine/apic.h>
#include <machine/atomic.h>
+#include <machine/clock.h>
#include <machine/cpu.h>
#include <machine/cpufunc.h>
#include <machine/mpapic.h>
@@ -2603,17 +2604,17 @@ ap_init(void)
* For statclock, we send an IPI to all CPU's to have them call this
* function.
*
- * WARNING! unpend() will call statclock_process() directly and skip this
+ * WARNING! unpend() will call statclock() directly and skip this
* routine.
*/
void
-forwarded_statclock(struct trapframe frame)
+forwarded_statclock(struct clockframe frame)
{
- mtx_lock_spin(&sched_lock);
- statclock_process(curthread->td_kse, TRAPF_PC(&frame),
- TRAPF_USERMODE(&frame));
- mtx_unlock_spin(&sched_lock);
+ if (profprocs != 0)
+ profclock(&frame);
+ if (pscnt == psdiv)
+ statclock(&frame);
}
void
@@ -2642,12 +2643,10 @@ forward_statclock(void)
* routine.
*/
void
-forwarded_hardclock(struct trapframe frame)
+forwarded_hardclock(struct clockframe frame)
{
- mtx_lock_spin(&sched_lock);
- hardclock_process(curthread, TRAPF_USERMODE(&frame));
- mtx_unlock_spin(&sched_lock);
+ hardclock_process(&frame);
}
void
diff --git a/sys/amd64/amd64/tsc.c b/sys/amd64/amd64/tsc.c
index 98ef6f78..8e812a1 100644
--- a/sys/amd64/amd64/tsc.c
+++ b/sys/amd64/amd64/tsc.c
@@ -130,6 +130,8 @@ static void setup_8254_mixed_mode(void);
int adjkerntz; /* local offset from GMT in seconds */
int clkintr_pending;
int disable_rtc_set; /* disable resettodr() if != 0 */
+int pscnt = 1;
+int psdiv = 1;
int statclock_disable;
#ifndef TIMER_FREQ
#define TIMER_FREQ 1193182
@@ -380,7 +382,13 @@ static void
rtcintr(struct clockframe frame)
{
while (rtcin(RTC_INTR) & RTCIR_PERIOD) {
- statclock(&frame);
+ if (profprocs != 0) {
+ if (--pscnt == 0)
+ pscnt = psdiv;
+ profclock(&frame);
+ }
+ if (pscnt == psdiv)
+ statclock(&frame);
#ifdef SMP
forward_statclock();
#endif
@@ -1169,13 +1177,21 @@ setup_8254_mixed_mode()
#endif
void
-setstatclockrate(int newhz)
+cpu_startprofclock(void)
{
- if (newhz == RTC_PROFRATE)
- rtc_statusa = RTCSA_DIVIDER | RTCSA_PROF;
- else
- rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
+
+ rtc_statusa = RTCSA_DIVIDER | RTCSA_PROF;
+ writertc(RTC_STATUSA, rtc_statusa);
+ psdiv = pscnt = psratio;
+}
+
+void
+cpu_stopprofclock(void)
+{
+
+ rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
writertc(RTC_STATUSA, rtc_statusa);
+ psdiv = pscnt = 1;
}
static int
diff --git a/sys/amd64/include/clock.h b/sys/amd64/include/clock.h
index f6ed717..140a345 100644
--- a/sys/amd64/include/clock.h
+++ b/sys/amd64/include/clock.h
@@ -16,6 +16,8 @@
*/
extern int adjkerntz;
extern int disable_rtc_set;
+extern int pscnt;
+extern int psdiv;
extern int statclock_disable;
extern u_int timer_freq;
extern int timer0_max_count;
diff --git a/sys/amd64/include/mptable.h b/sys/amd64/include/mptable.h
index 39d10d6..7960c51 100644
--- a/sys/amd64/include/mptable.h
+++ b/sys/amd64/include/mptable.h
@@ -63,6 +63,7 @@
#include <machine/apic.h>
#include <machine/atomic.h>
+#include <machine/clock.h>
#include <machine/cpu.h>
#include <machine/cpufunc.h>
#include <machine/mpapic.h>
@@ -2603,17 +2604,17 @@ ap_init(void)
* For statclock, we send an IPI to all CPU's to have them call this
* function.
*
- * WARNING! unpend() will call statclock_process() directly and skip this
+ * WARNING! unpend() will call statclock() directly and skip this
* routine.
*/
void
-forwarded_statclock(struct trapframe frame)
+forwarded_statclock(struct clockframe frame)
{
- mtx_lock_spin(&sched_lock);
- statclock_process(curthread->td_kse, TRAPF_PC(&frame),
- TRAPF_USERMODE(&frame));
- mtx_unlock_spin(&sched_lock);
+ if (profprocs != 0)
+ profclock(&frame);
+ if (pscnt == psdiv)
+ statclock(&frame);
}
void
@@ -2642,12 +2643,10 @@ forward_statclock(void)
* routine.
*/
void
-forwarded_hardclock(struct trapframe frame)
+forwarded_hardclock(struct clockframe frame)
{
- mtx_lock_spin(&sched_lock);
- hardclock_process(curthread, TRAPF_USERMODE(&frame));
- mtx_unlock_spin(&sched_lock);
+ hardclock_process(&frame);
}
void
diff --git a/sys/amd64/include/smp.h b/sys/amd64/include/smp.h
index f2250ff..2f576a2 100644
--- a/sys/amd64/include/smp.h
+++ b/sys/amd64/include/smp.h
@@ -113,9 +113,9 @@ void revoke_apic_irq(int irq);
void bsp_apic_configure(void);
void init_secondary(void);
void forward_statclock(void);
-void forwarded_statclock(struct trapframe frame);
+void forwarded_statclock(struct clockframe frame);
void forward_hardclock(void);
-void forwarded_hardclock(struct trapframe frame);
+void forwarded_hardclock(struct clockframe frame);
void ipi_selected(u_int cpus, u_int ipi);
void ipi_all(u_int ipi);
void ipi_all_but_self(u_int ipi);
diff --git a/sys/amd64/isa/clock.c b/sys/amd64/isa/clock.c
index 98ef6f78..8e812a1 100644
--- a/sys/amd64/isa/clock.c
+++ b/sys/amd64/isa/clock.c
@@ -130,6 +130,8 @@ static void setup_8254_mixed_mode(void);
int adjkerntz; /* local offset from GMT in seconds */
int clkintr_pending;
int disable_rtc_set; /* disable resettodr() if != 0 */
+int pscnt = 1;
+int psdiv = 1;
int statclock_disable;
#ifndef TIMER_FREQ
#define TIMER_FREQ 1193182
@@ -380,7 +382,13 @@ static void
rtcintr(struct clockframe frame)
{
while (rtcin(RTC_INTR) & RTCIR_PERIOD) {
- statclock(&frame);
+ if (profprocs != 0) {
+ if (--pscnt == 0)
+ pscnt = psdiv;
+ profclock(&frame);
+ }
+ if (pscnt == psdiv)
+ statclock(&frame);
#ifdef SMP
forward_statclock();
#endif
@@ -1169,13 +1177,21 @@ setup_8254_mixed_mode()
#endif
void
-setstatclockrate(int newhz)
+cpu_startprofclock(void)
{
- if (newhz == RTC_PROFRATE)
- rtc_statusa = RTCSA_DIVIDER | RTCSA_PROF;
- else
- rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
+
+ rtc_statusa = RTCSA_DIVIDER | RTCSA_PROF;
+ writertc(RTC_STATUSA, rtc_statusa);
+ psdiv = pscnt = psratio;
+}
+
+void
+cpu_stopprofclock(void)
+{
+
+ rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
writertc(RTC_STATUSA, rtc_statusa);
+ psdiv = pscnt = 1;
}
static int
diff --git a/sys/i386/i386/apic_vector.s b/sys/i386/i386/apic_vector.s
index f8e0321..e4b6ea5 100644
--- a/sys/i386/i386/apic_vector.s
+++ b/sys/i386/i386/apic_vector.s
@@ -365,7 +365,7 @@ Xinvlrng:
iret
/*
- * Forward hardclock to another CPU. Pushes a trapframe and calls
+ * Forward hardclock to another CPU. Pushes a clockframe and calls
* forwarded_hardclock().
*/
.text
@@ -389,14 +389,16 @@ Xhardclock:
jmp 10f
1:
incl TD_INTR_NESTING_LEVEL(%ebx)
+ pushl $0 /* XXX convert trapframe to clockframe */
call forwarded_hardclock
+ addl $4, %esp /* XXX convert clockframe to trapframe */
decl TD_INTR_NESTING_LEVEL(%ebx)
10:
MEXITCOUNT
jmp doreti
/*
- * Forward statclock to another CPU. Pushes a trapframe and calls
+ * Forward statclock to another CPU. Pushes a clockframe and calls
* forwarded_statclock().
*/
.text
@@ -422,7 +424,9 @@ Xstatclock:
jmp 10f
1:
incl TD_INTR_NESTING_LEVEL(%ebx)
+ pushl $0 /* XXX convert trapframe to clockframe */
call forwarded_statclock
+ addl $4, %esp /* XXX convert clockframe to trapframe */
decl TD_INTR_NESTING_LEVEL(%ebx)
10:
MEXITCOUNT
diff --git a/sys/i386/i386/critical.c b/sys/i386/i386/critical.c
index de91426..7a505e1 100644
--- a/sys/i386/i386/critical.c
+++ b/sys/i386/i386/critical.c
@@ -15,6 +15,7 @@
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/ucontext.h>
+#include <machine/clock.h>
#include <machine/critical.h>
#ifdef SMP
@@ -89,6 +90,10 @@ cpu_thread_link(struct thread *td)
void
i386_unpend(void)
{
+ struct clockframe frame;
+
+ frame.cf_cs = SEL_KPL;
+ frame.cf_eip = (register_t)i386_unpend;
KASSERT(curthread->td_critnest == 0, ("unpend critnest != 0"));
KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled1"));
curthread->td_critnest = 1;
@@ -131,15 +136,13 @@ i386_unpend(void)
PCPU_SET(spending, mask & ~(1 << irq));
switch(irq) {
case 0: /* bit 0 - hardclock */
- mtx_lock_spin(&sched_lock);
- hardclock_process(curthread, 0);
- mtx_unlock_spin(&sched_lock);
+ hardclock_process(&frame);
break;
case 1: /* bit 1 - statclock */
- mtx_lock_spin(&sched_lock);
- statclock_process(curthread->td_kse,
- (register_t)i386_unpend, 0);
- mtx_unlock_spin(&sched_lock);
+ if (profprocs != 0)
+ profclock(&frame);
+ if (pscnt == psdiv)
+ statclock(&frame);
break;
}
KASSERT((read_eflags() & PSL_I) == 0,
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index 39d10d6..7960c51 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -63,6 +63,7 @@
#include <machine/apic.h>
#include <machine/atomic.h>
+#include <machine/clock.h>
#include <machine/cpu.h>
#include <machine/cpufunc.h>
#include <machine/mpapic.h>
@@ -2603,17 +2604,17 @@ ap_init(void)
* For statclock, we send an IPI to all CPU's to have them call this
* function.
*
- * WARNING! unpend() will call statclock_process() directly and skip this
+ * WARNING! unpend() will call statclock() directly and skip this
* routine.
*/
void
-forwarded_statclock(struct trapframe frame)
+forwarded_statclock(struct clockframe frame)
{
- mtx_lock_spin(&sched_lock);
- statclock_process(curthread->td_kse, TRAPF_PC(&frame),
- TRAPF_USERMODE(&frame));
- mtx_unlock_spin(&sched_lock);
+ if (profprocs != 0)
+ profclock(&frame);
+ if (pscnt == psdiv)
+ statclock(&frame);
}
void
@@ -2642,12 +2643,10 @@ forward_statclock(void)
* routine.
*/
void
-forwarded_hardclock(struct trapframe frame)
+forwarded_hardclock(struct clockframe frame)
{
- mtx_lock_spin(&sched_lock);
- hardclock_process(curthread, TRAPF_USERMODE(&frame));
- mtx_unlock_spin(&sched_lock);
+ hardclock_process(&frame);
}
void
diff --git a/sys/i386/i386/mptable.c b/sys/i386/i386/mptable.c
index 39d10d6..7960c51 100644
--- a/sys/i386/i386/mptable.c
+++ b/sys/i386/i386/mptable.c
@@ -63,6 +63,7 @@
#include <machine/apic.h>
#include <machine/atomic.h>
+#include <machine/clock.h>
#include <machine/cpu.h>
#include <machine/cpufunc.h>
#include <machine/mpapic.h>
@@ -2603,17 +2604,17 @@ ap_init(void)
* For statclock, we send an IPI to all CPU's to have them call this
* function.
*
- * WARNING! unpend() will call statclock_process() directly and skip this
+ * WARNING! unpend() will call statclock() directly and skip this
* routine.
*/
void
-forwarded_statclock(struct trapframe frame)
+forwarded_statclock(struct clockframe frame)
{
- mtx_lock_spin(&sched_lock);
- statclock_process(curthread->td_kse, TRAPF_PC(&frame),
- TRAPF_USERMODE(&frame));
- mtx_unlock_spin(&sched_lock);
+ if (profprocs != 0)
+ profclock(&frame);
+ if (pscnt == psdiv)
+ statclock(&frame);
}
void
@@ -2642,12 +2643,10 @@ forward_statclock(void)
* routine.
*/
void
-forwarded_hardclock(struct trapframe frame)
+forwarded_hardclock(struct clockframe frame)
{
- mtx_lock_spin(&sched_lock);
- hardclock_process(curthread, TRAPF_USERMODE(&frame));
- mtx_unlock_spin(&sched_lock);
+ hardclock_process(&frame);
}
void
diff --git a/sys/i386/i386/tsc.c b/sys/i386/i386/tsc.c
index 98ef6f78..8e812a1 100644
--- a/sys/i386/i386/tsc.c
+++ b/sys/i386/i386/tsc.c
@@ -130,6 +130,8 @@ static void setup_8254_mixed_mode(void);
int adjkerntz; /* local offset from GMT in seconds */
int clkintr_pending;
int disable_rtc_set; /* disable resettodr() if != 0 */
+int pscnt = 1;
+int psdiv = 1;
int statclock_disable;
#ifndef TIMER_FREQ
#define TIMER_FREQ 1193182
@@ -380,7 +382,13 @@ static void
rtcintr(struct clockframe frame)
{
while (rtcin(RTC_INTR) & RTCIR_PERIOD) {
- statclock(&frame);
+ if (profprocs != 0) {
+ if (--pscnt == 0)
+ pscnt = psdiv;
+ profclock(&frame);
+ }
+ if (pscnt == psdiv)
+ statclock(&frame);
#ifdef SMP
forward_statclock();
#endif
@@ -1169,13 +1177,21 @@ setup_8254_mixed_mode()
#endif
void
-setstatclockrate(int newhz)
+cpu_startprofclock(void)
{
- if (newhz == RTC_PROFRATE)
- rtc_statusa = RTCSA_DIVIDER | RTCSA_PROF;
- else
- rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
+
+ rtc_statusa = RTCSA_DIVIDER | RTCSA_PROF;
+ writertc(RTC_STATUSA, rtc_statusa);
+ psdiv = pscnt = psratio;
+}
+
+void
+cpu_stopprofclock(void)
+{
+
+ rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
writertc(RTC_STATUSA, rtc_statusa);
+ psdiv = pscnt = 1;
}
static int
diff --git a/sys/i386/include/clock.h b/sys/i386/include/clock.h
index f6ed717..140a345 100644
--- a/sys/i386/include/clock.h
+++ b/sys/i386/include/clock.h
@@ -16,6 +16,8 @@
*/
extern int adjkerntz;
extern int disable_rtc_set;
+extern int pscnt;
+extern int psdiv;
extern int statclock_disable;
extern u_int timer_freq;
extern int timer0_max_count;
diff --git a/sys/i386/include/mptable.h b/sys/i386/include/mptable.h
index 39d10d6..7960c51 100644
--- a/sys/i386/include/mptable.h
+++ b/sys/i386/include/mptable.h
@@ -63,6 +63,7 @@
#include <machine/apic.h>
#include <machine/atomic.h>
+#include <machine/clock.h>
#include <machine/cpu.h>
#include <machine/cpufunc.h>
#include <machine/mpapic.h>
@@ -2603,17 +2604,17 @@ ap_init(void)
* For statclock, we send an IPI to all CPU's to have them call this
* function.
*
- * WARNING! unpend() will call statclock_process() directly and skip this
+ * WARNING! unpend() will call statclock() directly and skip this
* routine.
*/
void
-forwarded_statclock(struct trapframe frame)
+forwarded_statclock(struct clockframe frame)
{
- mtx_lock_spin(&sched_lock);
- statclock_process(curthread->td_kse, TRAPF_PC(&frame),
- TRAPF_USERMODE(&frame));
- mtx_unlock_spin(&sched_lock);
+ if (profprocs != 0)
+ profclock(&frame);
+ if (pscnt == psdiv)
+ statclock(&frame);
}
void
@@ -2642,12 +2643,10 @@ forward_statclock(void)
* routine.
*/
void
-forwarded_hardclock(struct trapframe frame)
+forwarded_hardclock(struct clockframe frame)
{
- mtx_lock_spin(&sched_lock);
- hardclock_process(curthread, TRAPF_USERMODE(&frame));
- mtx_unlock_spin(&sched_lock);
+ hardclock_process(&frame);
}
void
diff --git a/sys/i386/include/smp.h b/sys/i386/include/smp.h
index f2250ff..2f576a2 100644
--- a/sys/i386/include/smp.h
+++ b/sys/i386/include/smp.h
@@ -113,9 +113,9 @@ void revoke_apic_irq(int irq);
void bsp_apic_configure(void);
void init_secondary(void);
void forward_statclock(void);
-void forwarded_statclock(struct trapframe frame);
+void forwarded_statclock(struct clockframe frame);
void forward_hardclock(void);
-void forwarded_hardclock(struct trapframe frame);
+void forwarded_hardclock(struct clockframe frame);
void ipi_selected(u_int cpus, u_int ipi);
void ipi_all(u_int ipi);
void ipi_all_but_self(u_int ipi);
diff --git a/sys/i386/isa/apic_vector.s b/sys/i386/isa/apic_vector.s
index f8e0321..e4b6ea5 100644
--- a/sys/i386/isa/apic_vector.s
+++ b/sys/i386/isa/apic_vector.s
@@ -365,7 +365,7 @@ Xinvlrng:
iret
/*
- * Forward hardclock to another CPU. Pushes a trapframe and calls
+ * Forward hardclock to another CPU. Pushes a clockframe and calls
* forwarded_hardclock().
*/
.text
@@ -389,14 +389,16 @@ Xhardclock:
jmp 10f
1:
incl TD_INTR_NESTING_LEVEL(%ebx)
+ pushl $0 /* XXX convert trapframe to clockframe */
call forwarded_hardclock
+ addl $4, %esp /* XXX convert clockframe to trapframe */
decl TD_INTR_NESTING_LEVEL(%ebx)
10:
MEXITCOUNT
jmp doreti
/*
- * Forward statclock to another CPU. Pushes a trapframe and calls
+ * Forward statclock to another CPU. Pushes a clockframe and calls
* forwarded_statclock().
*/
.text
@@ -422,7 +424,9 @@ Xstatclock:
jmp 10f
1:
incl TD_INTR_NESTING_LEVEL(%ebx)
+ pushl $0 /* XXX convert trapframe to clockframe */
call forwarded_statclock
+ addl $4, %esp /* XXX convert clockframe to trapframe */
decl TD_INTR_NESTING_LEVEL(%ebx)
10:
MEXITCOUNT
diff --git a/sys/i386/isa/clock.c b/sys/i386/isa/clock.c
index 98ef6f78..8e812a1 100644
--- a/sys/i386/isa/clock.c
+++ b/sys/i386/isa/clock.c
@@ -130,6 +130,8 @@ static void setup_8254_mixed_mode(void);
int adjkerntz; /* local offset from GMT in seconds */
int clkintr_pending;
int disable_rtc_set; /* disable resettodr() if != 0 */
+int pscnt = 1;
+int psdiv = 1;
int statclock_disable;
#ifndef TIMER_FREQ
#define TIMER_FREQ 1193182
@@ -380,7 +382,13 @@ static void
rtcintr(struct clockframe frame)
{
while (rtcin(RTC_INTR) & RTCIR_PERIOD) {
- statclock(&frame);
+ if (profprocs != 0) {
+ if (--pscnt == 0)
+ pscnt = psdiv;
+ profclock(&frame);
+ }
+ if (pscnt == psdiv)
+ statclock(&frame);
#ifdef SMP
forward_statclock();
#endif
@@ -1169,13 +1177,21 @@ setup_8254_mixed_mode()
#endif
void
-setstatclockrate(int newhz)
+cpu_startprofclock(void)
{
- if (newhz == RTC_PROFRATE)
- rtc_statusa = RTCSA_DIVIDER | RTCSA_PROF;
- else
- rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
+
+ rtc_statusa = RTCSA_DIVIDER | RTCSA_PROF;
+ writertc(RTC_STATUSA, rtc_statusa);
+ psdiv = pscnt = psratio;
+}
+
+void
+cpu_stopprofclock(void)
+{
+
+ rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
writertc(RTC_STATUSA, rtc_statusa);
+ psdiv = pscnt = 1;
}
static int
diff --git a/sys/ia64/ia64/clock.c b/sys/ia64/ia64/clock.c
index 41d8dc4..277c14b 100644
--- a/sys/ia64/ia64/clock.c
+++ b/sys/ia64/ia64/clock.c
@@ -259,12 +259,19 @@ fail:
* change after being set up above. Could recalculate intervals here
* but that would be a drag.
*/
+
+void
+cpu_startprofclock(void)
+{
+
+ /* nothing to do */
+}
+
void
-setstatclockrate(newhz)
- int newhz;
+cpu_stopprofclock(void)
{
- /* nothing we can do */
+ /* nothing to do */
}
/*
diff --git a/sys/ia64/ia64/interrupt.c b/sys/ia64/ia64/interrupt.c
index 720dbee..bc2d324 100644
--- a/sys/ia64/ia64/interrupt.c
+++ b/sys/ia64/ia64/interrupt.c
@@ -132,16 +132,19 @@ interrupt(u_int64_t vector, struct trapframe *framep)
#endif
hardclock((struct clockframe *)framep);
/* divide hz (1024) by 8 to get stathz (128) */
- if ((++schedclk2 & 0x7) == 0)
+ if ((++schedclk2 & 0x7) == 0) {
+ if (profprocs != 0)
+ profclock((struct clockframe *)framep);
statclock((struct clockframe *)framep);
+ }
#ifdef SMP
} else {
- mtx_lock_spin(&sched_lock);
- hardclock_process(curthread, TRAPF_USERMODE(framep));
- if ((schedclk2 & 0x7) == 0)
- statclock_process(curkse, TRAPF_PC(framep),
- TRAPF_USERMODE(framep));
- mtx_unlock_spin(&sched_lock);
+ hardclock_process((struct clockframe *)framep);
+ if ((schedclk2 & 0x7) == 0) {
+ if (profprocs != 0)
+ profclock((struct clockframe *)framep);
+ statclock((struct clockframe *)framep);
+ }
}
#endif
critical_exit();
diff --git a/sys/isa/atrtc.c b/sys/isa/atrtc.c
index 98ef6f78..8e812a1 100644
--- a/sys/isa/atrtc.c
+++ b/sys/isa/atrtc.c
@@ -130,6 +130,8 @@ static void setup_8254_mixed_mode(void);
int adjkerntz; /* local offset from GMT in seconds */
int clkintr_pending;
int disable_rtc_set; /* disable resettodr() if != 0 */
+int pscnt = 1;
+int psdiv = 1;
int statclock_disable;
#ifndef TIMER_FREQ
#define TIMER_FREQ 1193182
@@ -380,7 +382,13 @@ static void
rtcintr(struct clockframe frame)
{
while (rtcin(RTC_INTR) & RTCIR_PERIOD) {
- statclock(&frame);
+ if (profprocs != 0) {
+ if (--pscnt == 0)
+ pscnt = psdiv;
+ profclock(&frame);
+ }
+ if (pscnt == psdiv)
+ statclock(&frame);
#ifdef SMP
forward_statclock();
#endif
@@ -1169,13 +1177,21 @@ setup_8254_mixed_mode()
#endif
void
-setstatclockrate(int newhz)
+cpu_startprofclock(void)
{
- if (newhz == RTC_PROFRATE)
- rtc_statusa = RTCSA_DIVIDER | RTCSA_PROF;
- else
- rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
+
+ rtc_statusa = RTCSA_DIVIDER | RTCSA_PROF;
+ writertc(RTC_STATUSA, rtc_statusa);
+ psdiv = pscnt = psratio;
+}
+
+void
+cpu_stopprofclock(void)
+{
+
+ rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
writertc(RTC_STATUSA, rtc_statusa);
+ psdiv = pscnt = 1;
}
static int
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index c5e1b4a..5c6756b 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -122,10 +122,9 @@ long tk_rawcc;
int stathz;
int profhz;
-static int profprocs;
+int profprocs;
int ticks;
-static int psdiv, pscnt; /* prof => stat divider */
-int psratio; /* ratio: prof / stat */
+int psratio;
/*
* Initialize clock frequencies and start both clocks running.
@@ -141,7 +140,6 @@ initclocks(dummy)
* Set divisors to 1 (normal case) and let the machine-specific
* code do its bit.
*/
- psdiv = pscnt = 1;
cpu_initclocks();
#ifdef DEVICE_POLLING
@@ -157,32 +155,27 @@ initclocks(dummy)
}
/*
- * Each time the real-time timer fires, this function is called on all CPUs
- * with each CPU passing in its curthread as the first argument. If possible
- * a nice optimization in the future would be to allow the CPU receiving the
- * actual real-time timer interrupt to call this function on behalf of the
- * other CPUs rather than sending an IPI to all other CPUs so that they
- * can call this function. Note that hardclock() calls hardclock_process()
- * for the CPU receiving the timer interrupt, so only the other CPUs in the
- * system need to call this function (or have it called on their behalf.
+ * Each time the real-time timer fires, this function is called on all CPUs.
+ * Note that hardclock() calls hardclock_process() for the boot CPU, so only
+ * the other CPUs in the system need to call this function.
*/
void
-hardclock_process(td, user)
- struct thread *td;
- int user;
+hardclock_process(frame)
+ register struct clockframe *frame;
{
struct pstats *pstats;
+ struct thread *td = curthread;
struct proc *p = td->td_proc;
/*
* Run current process's virtual and profile time, as needed.
*/
- mtx_assert(&sched_lock, MA_OWNED);
+ mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
if (p->p_flag & P_KSES) {
/* XXXKSE What to do? */
} else {
pstats = p->p_stats;
- if (user &&
+ if (CLKF_USERMODE(frame) &&
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
p->p_sflag |= PS_ALRMPEND;
@@ -194,6 +187,7 @@ hardclock_process(td, user)
td->td_kse->ke_flags |= KEF_ASTPENDING;
}
}
+ mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
}
/*
@@ -206,9 +200,7 @@ hardclock(frame)
int need_softclock = 0;
CTR0(KTR_CLK, "hardclock fired");
- mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
- hardclock_process(curthread, CLKF_USERMODE(frame));
- mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
+ hardclock_process(frame);
tc_ticktock();
/*
@@ -216,8 +208,10 @@ hardclock(frame)
*
* XXX: this only works for UP
*/
- if (stathz == 0)
+ if (stathz == 0) {
+ profclock(frame);
statclock(frame);
+ }
#ifdef DEVICE_POLLING
hardclock_device_poll(); /* this is very short and quick */
@@ -312,7 +306,6 @@ void
startprofclock(p)
register struct proc *p;
{
- int s;
/*
* XXX; Right now sched_lock protects statclock(), but perhaps
@@ -322,12 +315,8 @@ startprofclock(p)
mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_PROFIL) == 0) {
p->p_sflag |= PS_PROFIL;
- if (++profprocs == 1 && stathz != 0) {
- s = splstatclock();
- psdiv = pscnt = psratio;
- setstatclockrate(profhz);
- splx(s);
- }
+ if (++profprocs == 1)
+ cpu_startprofclock();
}
mtx_unlock_spin(&sched_lock);
}
@@ -339,57 +328,41 @@ void
stopprofclock(p)
register struct proc *p;
{
- int s;
mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_PROFIL) {
p->p_sflag &= ~PS_PROFIL;
- if (--profprocs == 0 && stathz != 0) {
- s = splstatclock();
- psdiv = pscnt = 1;
- setstatclockrate(stathz);
- splx(s);
- }
+ if (--profprocs == 0)
+ cpu_stopprofclock();
}
mtx_unlock_spin(&sched_lock);
}
/*
- * Do process and kernel statistics. Most of the statistics are only
+ * Statistics clock. Grab profile sample, and if divider reaches 0,
+ * do process and kernel statistics. Most of the statistics are only
* used by user-level statistics programs. The main exceptions are
- * ke->ke_uticks, p->p_sticks, p->p_iticks, and p->p_estcpu. This function
- * should be called by all CPUs in the system for each statistics clock
- * interrupt. See the description of hardclock_process for more detail on
- * this function's relationship to statclock.
+ * ke->ke_uticks, p->p_sticks, p->p_iticks, and p->p_estcpu.
+ * This should be called by all active processors.
*/
void
-statclock_process(ke, pc, user)
- struct kse *ke;
- register_t pc;
- int user;
+statclock(frame)
+ register struct clockframe *frame;
{
-#ifdef GPROF
- struct gmonparam *g;
- int i;
-#endif
struct pstats *pstats;
- long rss;
struct rusage *ru;
struct vmspace *vm;
- struct proc *p = ke->ke_proc;
- struct thread *td = ke->ke_thread; /* current thread */
+ struct thread *td;
+ struct kse *ke;
+ struct proc *p;
+ long rss;
- KASSERT(ke == curthread->td_kse, ("statclock_process: td != curthread"));
- mtx_assert(&sched_lock, MA_OWNED);
- if (user) {
- /*
- * Came from user mode; CPU was in user state.
- * If this process is being profiled, record the tick.
- */
- if (p->p_sflag & PS_PROFIL)
- addupc_intr(ke, pc, 1);
- if (pscnt < psdiv)
- return;
+ td = curthread;
+ p = td->td_proc;
+
+ mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
+ ke = td->td_kse;
+ if (CLKF_USERMODE(frame)) {
/*
* Charge the time as appropriate.
*/
@@ -401,21 +374,6 @@ statclock_process(ke, pc, user)
else
cp_time[CP_USER]++;
} else {
-#ifdef GPROF
- /*
- * Kernel statistics are just like addupc_intr, only easier.
- */
- g = &_gmonparam;
- if (g->state == GMON_PROF_ON) {
- i = pc - g->lowpc;
- if (i < g->textsize) {
- i /= HISTFRACTION * sizeof(*g->kcount);
- g->kcount[i]++;
- }
- }
-#endif
- if (pscnt < psdiv)
- return;
/*
* Came from kernel mode, so we were:
* - handling an interrupt,
@@ -455,25 +413,43 @@ statclock_process(ke, pc, user)
if (ru->ru_maxrss < rss)
ru->ru_maxrss = rss;
}
+ mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
}
-/*
- * Statistics clock. Grab profile sample, and if divider reaches 0,
- * do process and kernel statistics. Most of the statistics are only
- * used by user-level statistics programs. The main exceptions are
- * ke->ke_uticks, p->p_sticks, p->p_iticks, and p->p_estcpu.
- */
void
-statclock(frame)
+profclock(frame)
register struct clockframe *frame;
{
+ struct thread *td;
+#ifdef GPROF
+ struct gmonparam *g;
+ int i;
+#endif
- CTR0(KTR_CLK, "statclock fired");
- mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
- if (--pscnt == 0)
- pscnt = psdiv;
- statclock_process(curthread->td_kse, CLKF_PC(frame), CLKF_USERMODE(frame));
- mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
+ if (CLKF_USERMODE(frame)) {
+ /*
+ * Came from user mode; CPU was in user state.
+ * If this process is being profiled, record the tick.
+ */
+ td = curthread;
+ if (td->td_proc->p_sflag & PS_PROFIL)
+ addupc_intr(td->td_kse, CLKF_PC(frame), 1);
+ }
+#ifdef GPROF
+ else {
+ /*
+ * Kernel statistics are just like addupc_intr, only easier.
+ */
+ g = &_gmonparam;
+ if (g->state == GMON_PROF_ON) {
+ i = CLKF_PC(frame) - g->lowpc;
+ if (i < g->textsize) {
+ i /= HISTFRACTION * sizeof(*g->kcount);
+ g->kcount[i]++;
+ }
+ }
+ }
+#endif
}
/*
diff --git a/sys/pc98/cbus/clock.c b/sys/pc98/cbus/clock.c
index 557afe8..8e2dc97 100644
--- a/sys/pc98/cbus/clock.c
+++ b/sys/pc98/cbus/clock.c
@@ -140,6 +140,8 @@ static void setup_8254_mixed_mode(void);
int adjkerntz; /* local offset from GMT in seconds */
int clkintr_pending;
int disable_rtc_set; /* disable resettodr() if != 0 */
+int pscnt = 1;
+int psdiv = 1;
int statclock_disable;
#ifndef TIMER_FREQ
#ifdef PC98
@@ -441,7 +443,13 @@ static void
rtcintr(struct clockframe frame)
{
while (rtcin(RTC_INTR) & RTCIR_PERIOD) {
- statclock(&frame);
+ if (profprocs != 0) {
+ if (--pscnt == 0)
+ pscnt = psdiv;
+ profclock(&frame);
+ }
+ if (pscnt == psdiv)
+ statclock(&frame);
#ifdef SMP
forward_statclock();
#endif
@@ -1518,7 +1526,7 @@ setup_8254_mixed_mode()
#endif
void
-setstatclockrate(int newhz)
+cpu_startprofclock(void)
{
#ifndef PC98
if (newhz == RTC_PROFRATE)
@@ -1529,6 +1537,11 @@ setstatclockrate(int newhz)
#endif
}
+void
+cpu_stopprofclock(void)
+{
+}
+
static int
sysctl_machdep_i8254_freq(SYSCTL_HANDLER_ARGS)
{
diff --git a/sys/pc98/cbus/pcrtc.c b/sys/pc98/cbus/pcrtc.c
index 557afe8..8e2dc97 100644
--- a/sys/pc98/cbus/pcrtc.c
+++ b/sys/pc98/cbus/pcrtc.c
@@ -140,6 +140,8 @@ static void setup_8254_mixed_mode(void);
int adjkerntz; /* local offset from GMT in seconds */
int clkintr_pending;
int disable_rtc_set; /* disable resettodr() if != 0 */
+int pscnt = 1;
+int psdiv = 1;
int statclock_disable;
#ifndef TIMER_FREQ
#ifdef PC98
@@ -441,7 +443,13 @@ static void
rtcintr(struct clockframe frame)
{
while (rtcin(RTC_INTR) & RTCIR_PERIOD) {
- statclock(&frame);
+ if (profprocs != 0) {
+ if (--pscnt == 0)
+ pscnt = psdiv;
+ profclock(&frame);
+ }
+ if (pscnt == psdiv)
+ statclock(&frame);
#ifdef SMP
forward_statclock();
#endif
@@ -1518,7 +1526,7 @@ setup_8254_mixed_mode()
#endif
void
-setstatclockrate(int newhz)
+cpu_startprofclock(void)
{
#ifndef PC98
if (newhz == RTC_PROFRATE)
@@ -1529,6 +1537,11 @@ setstatclockrate(int newhz)
#endif
}
+void
+cpu_stopprofclock(void)
+{
+}
+
static int
sysctl_machdep_i8254_freq(SYSCTL_HANDLER_ARGS)
{
diff --git a/sys/pc98/pc98/clock.c b/sys/pc98/pc98/clock.c
index 557afe8..8e2dc97 100644
--- a/sys/pc98/pc98/clock.c
+++ b/sys/pc98/pc98/clock.c
@@ -140,6 +140,8 @@ static void setup_8254_mixed_mode(void);
int adjkerntz; /* local offset from GMT in seconds */
int clkintr_pending;
int disable_rtc_set; /* disable resettodr() if != 0 */
+int pscnt = 1;
+int psdiv = 1;
int statclock_disable;
#ifndef TIMER_FREQ
#ifdef PC98
@@ -441,7 +443,13 @@ static void
rtcintr(struct clockframe frame)
{
while (rtcin(RTC_INTR) & RTCIR_PERIOD) {
- statclock(&frame);
+ if (profprocs != 0) {
+ if (--pscnt == 0)
+ pscnt = psdiv;
+ profclock(&frame);
+ }
+ if (pscnt == psdiv)
+ statclock(&frame);
#ifdef SMP
forward_statclock();
#endif
@@ -1518,7 +1526,7 @@ setup_8254_mixed_mode()
#endif
void
-setstatclockrate(int newhz)
+cpu_startprofclock(void)
{
#ifndef PC98
if (newhz == RTC_PROFRATE)
@@ -1529,6 +1537,11 @@ setstatclockrate(int newhz)
#endif
}
+void
+cpu_stopprofclock(void)
+{
+}
+
static int
sysctl_machdep_i8254_freq(SYSCTL_HANDLER_ARGS)
{
diff --git a/sys/powerpc/aim/clock.c b/sys/powerpc/aim/clock.c
index d1f13e2..a28d376 100644
--- a/sys/powerpc/aim/clock.c
+++ b/sys/powerpc/aim/clock.c
@@ -319,8 +319,13 @@ delay(int n)
* Nothing to do.
*/
void
-setstatclockrate(int arg)
+cpu_startprofclock(void)
{
/* Do nothing */
}
+
+void
+cpu_stopprofclock(void)
+{
+}
diff --git a/sys/powerpc/powerpc/clock.c b/sys/powerpc/powerpc/clock.c
index d1f13e2..a28d376 100644
--- a/sys/powerpc/powerpc/clock.c
+++ b/sys/powerpc/powerpc/clock.c
@@ -319,8 +319,13 @@ delay(int n)
* Nothing to do.
*/
void
-setstatclockrate(int arg)
+cpu_startprofclock(void)
{
/* Do nothing */
}
+
+void
+cpu_stopprofclock(void)
+{
+}
diff --git a/sys/sparc64/sparc64/clock.c b/sys/sparc64/sparc64/clock.c
index 1444270..37341fd 100644
--- a/sys/sparc64/sparc64/clock.c
+++ b/sys/sparc64/sparc64/clock.c
@@ -48,9 +48,13 @@ DELAY(int n)
}
void
-setstatclockrate(int newhz)
+cpu_startprofclock(void)
+{
+}
+
+void
+cpu_stopprofclock(void)
{
- /* TODO; */
}
int
diff --git a/sys/sparc64/sparc64/tick.c b/sys/sparc64/sparc64/tick.c
index a10ba01..f92fbb8 100644
--- a/sys/sparc64/sparc64/tick.c
+++ b/sys/sparc64/sparc64/tick.c
@@ -57,6 +57,7 @@ int tick_missed; /* statistics */
void
cpu_initclocks(void)
{
+ stathz = hz;
tick_start(tick_hardclock);
}
@@ -64,20 +65,13 @@ static __inline void
tick_process(struct clockframe *cf)
{
-#ifdef SMP
if (PCPU_GET(cpuid) == 0)
hardclock(cf);
- else {
- CTR1(KTR_CLK, "tick_process: AP, cpuid=%d", PCPU_GET(cpuid));
- mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
- hardclock_process(curthread, CLKF_USERMODE(cf));
- statclock_process(curthread->td_kse, CLKF_PC(cf),
- CLKF_USERMODE(cf));
- mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
- }
-#else
- hardclock(cf);
-#endif
+ else
+ hardclock_process(cf);
+ if (profprocs != 0)
+ profclock(cf);
+ statclock(cf);
}
void
diff --git a/sys/sys/kernel.h b/sys/sys/kernel.h
index 4089d59..ba486a9 100644
--- a/sys/sys/kernel.h
+++ b/sys/sys/kernel.h
@@ -75,6 +75,7 @@ extern int hz; /* system clock's frequency */
extern int psratio; /* ratio: prof / stat */
extern int stathz; /* statistics clock's frequency */
extern int profhz; /* profiling clock's frequency */
+extern int profprocs; /* number of process's profiling */
extern int ticks;
extern int lbolt; /* once a second sleep address */
diff --git a/sys/sys/systm.h b/sys/sys/systm.h
index a3fdcf5..363f713 100644
--- a/sys/sys/systm.h
+++ b/sys/sys/systm.h
@@ -199,14 +199,15 @@ int suword64(void *base, int64_t word);
void realitexpire(void *);
void hardclock(struct clockframe *frame);
-void hardclock_process(struct thread *td, int user);
+void hardclock_process(struct clockframe *frame);
void softclock(void *);
void statclock(struct clockframe *frame);
-void statclock_process(struct kse *ke, register_t pc, int user);
+void profclock(struct clockframe *frame);
void startprofclock(struct proc *);
void stopprofclock(struct proc *);
-void setstatclockrate(int hzrate);
+void cpu_startprofclock(void);
+void cpu_stopprofclock(void);
/* flags for suser() and suser_cred() */
#define PRISON_ROOT 1
OpenPOWER on IntegriCloud