summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_clock.c
diff options
context:
space:
mode:
authorpeter <peter@FreeBSD.org>2007-11-29 06:34:30 +0000
committerpeter <peter@FreeBSD.org>2007-11-29 06:34:30 +0000
commit8e9baed5537acc943e5fd7641e69438a2466c1ad (patch)
treeebdab9e1e3b077956286c642401a485f2a47e88b /sys/kern/kern_clock.c
parent723157380283ac617fbbbb23fb4ae6120293007b (diff)
downloadFreeBSD-src-8e9baed5537acc943e5fd7641e69438a2466c1ad.zip
FreeBSD-src-8e9baed5537acc943e5fd7641e69438a2466c1ad.tar.gz
Move the shared cp_time array (counts %sys, %user, %idle etc) to the
per-cpu area. cp_time[] goes away and a new function creates a merged cp_time-like array for things like linprocfs, sysctl etc. The atomic ops for updating cp_time[] in statclock go away, and the scope of the thread lock is reduced. sysctl kern.cp_time returns a backwards compatible cp_time[] array. A new kern.cp_times sysctl returns the individual per-cpu stats. I have pending changes to make top and vmstat optionally show per-cpu stats. I'm very aware that there are something like 5 or 6 other versions "out there" for doing this - but none were handy when I needed them. I did merge my changes with John Baldwin's, and ended up replacing a few chunks of my stuff with his, and stealing some other code. Reviewed by: jhb Partly obtained from: jhb
Diffstat (limited to 'sys/kern/kern_clock.c')
-rw-r--r--sys/kern/kern_clock.c81
1 files changed, 72 insertions, 9 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 506b5a2..54b7b8f 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -81,9 +81,6 @@ extern void hardclock_device_poll(void);
static void initclocks(void *dummy);
SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
-/* Some of these don't belong here, but it's easiest to concentrate them. */
-long cp_time[CPUSTATES];
-
/* Spin-lock protecting profiling statistics. */
static struct mtx time_lock;
@@ -91,10 +88,14 @@ static int
sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
{
int error;
+ long cp_time[CPUSTATES];
#ifdef SCTL_MASK32
int i;
unsigned int cp_time32[CPUSTATES];
+#endif
+ read_cpu_time(cp_time);
+#ifdef SCTL_MASK32
if (req->flags & SCTL_MASK32) {
if (!req->oldptr)
return SYSCTL_OUT(req, 0, sizeof(cp_time32));
@@ -114,6 +115,66 @@ sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD,
0,0, sysctl_kern_cp_time, "LU", "CPU time statistics");
+static long empty[CPUSTATES];
+
+static int
+sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
+{
+ struct pcpu *pcpu;
+ int error;
+ int i, c;
+ long *cp_time;
+#ifdef SCTL_MASK32
+ unsigned int cp_time32[CPUSTATES];
+#endif
+
+ if (!req->oldptr) {
+#ifdef SCTL_MASK32
+ if (req->flags & SCTL_MASK32)
+ return SYSCTL_OUT(req, 0, sizeof(cp_time32) * (mp_maxid + 1));
+ else
+#endif
+ return SYSCTL_OUT(req, 0, sizeof(long) * CPUSTATES * (mp_maxid + 1));
+ }
+ for (error = 0, c = 0; error == 0 && c <= mp_maxid; c++) {
+ if (!CPU_ABSENT(c)) {
+ pcpu = pcpu_find(c);
+ cp_time = pcpu->pc_cp_time;
+ } else {
+ cp_time = empty;
+ }
+#ifdef SCTL_MASK32
+ if (req->flags & SCTL_MASK32) {
+ for (i = 0; i < CPUSTATES; i++)
+ cp_time32[i] = (unsigned int)cp_time[i];
+ error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
+ } else
+#endif
+ error = SYSCTL_OUT(req, cp_time, sizeof(long) * CPUSTATES);
+ }
+ return error;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, cp_times, CTLTYPE_LONG|CTLFLAG_RD,
+ 0,0, sysctl_kern_cp_times, "LU", "per-CPU time statistics");
+
+void
+read_cpu_time(long *cp_time)
+{
+ struct pcpu *pc;
+ int i, j;
+
+ /* Sum up global cp_time[]. */
+ bzero(cp_time, sizeof(long) * CPUSTATES);
+ for (i = 0; i <= mp_maxid; i++) {
+ if (CPU_ABSENT(i))
+ continue;
+ pc = pcpu_find(i);
+ for (j = 0; j < CPUSTATES; j++)
+ cp_time[j] += pc->pc_cp_time[j];
+ }
+}
+
#ifdef SW_WATCHDOG
#include <sys/watchdog.h>
@@ -405,11 +466,12 @@ statclock(int usermode)
struct thread *td;
struct proc *p;
long rss;
+ long *cp_time;
td = curthread;
p = td->td_proc;
- thread_lock_flags(td, MTX_QUIET);
+ cp_time = (long *)PCPU_PTR(cp_time);
if (usermode) {
/*
* Charge the time as appropriate.
@@ -420,9 +482,9 @@ statclock(int usermode)
#endif
td->td_uticks++;
if (p->p_nice > NZERO)
- atomic_add_long(&cp_time[CP_NICE], 1);
+ cp_time[CP_NICE]++;
else
- atomic_add_long(&cp_time[CP_USER], 1);
+ cp_time[CP_USER]++;
} else {
/*
* Came from kernel mode, so we were:
@@ -439,7 +501,7 @@ statclock(int usermode)
if ((td->td_pflags & TDP_ITHREAD) ||
td->td_intr_nesting_level >= 2) {
td->td_iticks++;
- atomic_add_long(&cp_time[CP_INTR], 1);
+ cp_time[CP_INTR]++;
} else {
#ifdef KSE
if (p->p_flag & P_SA)
@@ -448,9 +510,9 @@ statclock(int usermode)
td->td_pticks++;
td->td_sticks++;
if (!TD_IS_IDLETHREAD(td))
- atomic_add_long(&cp_time[CP_SYS], 1);
+ cp_time[CP_SYS]++;
else
- atomic_add_long(&cp_time[CP_IDLE], 1);
+ cp_time[CP_IDLE]++;
}
}
@@ -466,6 +528,7 @@ statclock(int usermode)
ru->ru_maxrss = rss;
CTR4(KTR_SCHED, "statclock: %p(%s) prio %d stathz %d",
td, td->td_name, td->td_priority, (stathz)?stathz:hz);
+ thread_lock_flags(td, MTX_QUIET);
sched_clock(td);
thread_unlock(td);
}
OpenPOWER on IntegriCloud