summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_clock.c
diff options
context:
space:
mode:
authormav <mav@FreeBSD.org>2012-03-10 14:57:21 +0000
committermav <mav@FreeBSD.org>2012-03-10 14:57:21 +0000
commit1324baa4ebfff886fe68bbe354f9d6b82bfd7d82 (patch)
tree8b2b5ba74f47f4a3fabe3b9c70eb58891290db16 /sys/kern/kern_clock.c
parenta0d48d6f11620836250fbc0f703a3e2a00ddbe4c (diff)
downloadFreeBSD-src-1324baa4ebfff886fe68bbe354f9d6b82bfd7d82.zip
FreeBSD-src-1324baa4ebfff886fe68bbe354f9d6b82bfd7d82.tar.gz
Idle ticks optimization:
- Pass number of events to the statclock() and profclock() functions same as to hardclock() before to not call them many times in a loop. - Rename them into statclock_cnt() and profclock_cnt(). - Turn statclock() and profclock() into compatibility wrappers, still needed for arm. - Rename hardclock_anycpu() into hardclock_cnt() for unification. MFC after: 1 week
Diffstat (limited to 'sys/kern/kern_clock.c')
-rw-r--r--sys/kern/kern_clock.c47
1 files changed, 31 insertions, 16 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index cf9d6a8..fdc1302 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -483,7 +483,7 @@ hardclock(int usermode, uintfptr_t pc)
}
void
-hardclock_anycpu(int cnt, int usermode)
+hardclock_cnt(int cnt, int usermode)
{
struct pstats *pstats;
struct thread *td = curthread;
@@ -688,6 +688,13 @@ stopprofclock(p)
void
statclock(int usermode)
{
+
+ statclock_cnt(1, usermode);
+}
+
+void
+statclock_cnt(int cnt, int usermode)
+{
struct rusage *ru;
struct vmspace *vm;
struct thread *td;
@@ -703,11 +710,11 @@ statclock(int usermode)
/*
* Charge the time as appropriate.
*/
- td->td_uticks++;
+ td->td_uticks += cnt;
if (p->p_nice > NZERO)
- cp_time[CP_NICE]++;
+ cp_time[CP_NICE] += cnt;
else
- cp_time[CP_USER]++;
+ cp_time[CP_USER] += cnt;
} else {
/*
* Came from kernel mode, so we were:
@@ -723,15 +730,15 @@ statclock(int usermode)
*/
if ((td->td_pflags & TDP_ITHREAD) ||
td->td_intr_nesting_level >= 2) {
- td->td_iticks++;
- cp_time[CP_INTR]++;
+ td->td_iticks += cnt;
+ cp_time[CP_INTR] += cnt;
} else {
- td->td_pticks++;
- td->td_sticks++;
+ td->td_pticks += cnt;
+ td->td_sticks += cnt;
if (!TD_IS_IDLETHREAD(td))
- cp_time[CP_SYS]++;
+ cp_time[CP_SYS] += cnt;
else
- cp_time[CP_IDLE]++;
+ cp_time[CP_IDLE] += cnt;
}
}
@@ -739,22 +746,30 @@ statclock(int usermode)
MPASS(p->p_vmspace != NULL);
vm = p->p_vmspace;
ru = &td->td_ru;
- ru->ru_ixrss += pgtok(vm->vm_tsize);
- ru->ru_idrss += pgtok(vm->vm_dsize);
- ru->ru_isrss += pgtok(vm->vm_ssize);
+ ru->ru_ixrss += pgtok(vm->vm_tsize) * cnt;
+ ru->ru_idrss += pgtok(vm->vm_dsize) * cnt;
+ ru->ru_isrss += pgtok(vm->vm_ssize) * cnt;
rss = pgtok(vmspace_resident_count(vm));
if (ru->ru_maxrss < rss)
ru->ru_maxrss = rss;
KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock",
"prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz);
thread_lock_flags(td, MTX_QUIET);
- sched_clock(td);
+ for ( ; cnt > 0; cnt--)
+ sched_clock(td);
thread_unlock(td);
}
void
profclock(int usermode, uintfptr_t pc)
{
+
+ profclock_cnt(1, usermode, pc);
+}
+
+void
+profclock_cnt(int cnt, int usermode, uintfptr_t pc)
+{
struct thread *td;
#ifdef GPROF
struct gmonparam *g;
@@ -770,7 +785,7 @@ profclock(int usermode, uintfptr_t pc)
* bother trying to count it.
*/
if (td->td_proc->p_flag & P_PROFIL)
- addupc_intr(td, pc, 1);
+ addupc_intr(td, pc, cnt);
}
#ifdef GPROF
else {
@@ -781,7 +796,7 @@ profclock(int usermode, uintfptr_t pc)
if (g->state == GMON_PROF_ON && pc >= g->lowpc) {
i = PC_TO_I(g, pc);
if (i < g->textsize) {
- KCOUNT(g, i)++;
+ KCOUNT(g, i) += cnt;
}
}
}
OpenPOWER on IntegriCloud