summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_synch.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-01 01:12:45 +0000
committerjeff <jeff@FreeBSD.org>2007-06-01 01:12:45 +0000
commita7a8bac81f171b93c4770f51c694b15c84a2f9f8 (patch)
tree9c09dcc76185c3dc30b048a5f2eb972f3bb8a849 /sys/kern/kern_synch.c
parent062ed7352f59fb6db93a3f51ca6e64ec0cefca22 (diff)
downloadFreeBSD-src-a7a8bac81f171b93c4770f51c694b15c84a2f9f8.zip
FreeBSD-src-a7a8bac81f171b93c4770f51c694b15c84a2f9f8.tar.gz
- Move rusage from being per-process in struct pstats to per-thread in
td_ru. This removes the requirement for per-process synchronization in statclock() and mi_switch(). This was previously supported by sched_lock which is going away. All modifications to rusage are now done in the context of the owning thread. reads proceed without locks. - Aggregate exiting threads rusage in thread_exit() such that the exiting thread's rusage is not lost. - Provide a new routine, rufetch() to fetch an aggregate of all rusage structures from all threads in a process. This routine must be used in any place requiring a rusage from a process prior to it's exit. The exited process's rusage is still available via p_ru. - Aggregate tick statistics only on demand via rufetch() or when a thread exits. Tick statistics are kept in the thread and protected by sched_lock until it exits. Initial patch by: attilio Reviewed by: attilio, bde (some objections), arch (mostly silent)
Diffstat (limited to 'sys/kern/kern_synch.c')
-rw-r--r--sys/kern/kern_synch.c32
1 files changed, 5 insertions, 27 deletions
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index d61dddf..b75dcf2 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -401,40 +401,18 @@ mi_switch(int flags, struct thread *newtd)
}
if (flags & SW_VOL)
- p->p_stats->p_ru.ru_nvcsw++;
+ td->td_ru.ru_nvcsw++;
else
- p->p_stats->p_ru.ru_nivcsw++;
-
+ td->td_ru.ru_nivcsw++;
/*
* Compute the amount of time during which the current
- * process was running, and add that to its total so far.
+ * thread was running, and add that to its total so far.
*/
new_switchtime = cpu_ticks();
- p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
- p->p_rux.rux_uticks += td->td_uticks;
- td->td_uticks = 0;
- p->p_rux.rux_iticks += td->td_iticks;
- td->td_iticks = 0;
- p->p_rux.rux_sticks += td->td_sticks;
- td->td_sticks = 0;
-
+ td->td_runtime += new_switchtime - PCPU_GET(switchtime);
+ PCPU_SET(switchtime, new_switchtime);
td->td_generation++; /* bump preempt-detect counter */
-
- /*
- * Check if the process exceeds its cpu resource allocation. If
- * it reaches the max, arrange to kill the process in ast().
- */
- if (p->p_cpulimit != RLIM_INFINITY &&
- p->p_rux.rux_runtime >= p->p_cpulimit * cpu_tickrate()) {
- p->p_sflag |= PS_XCPU;
- td->td_flags |= TDF_ASTPENDING;
- }
-
- /*
- * Finish up stats for outgoing thread.
- */
cnt.v_swtch++;
- PCPU_SET(switchtime, new_switchtime);
PCPU_SET(switchticks, ticks);
CTR4(KTR_PROC, "mi_switch: old thread %ld (kse %p, pid %ld, %s)",
td->td_tid, td->td_sched, p->p_pid, p->p_comm);
OpenPOWER on IntegriCloud