summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/init_main.c4
-rw-r--r--sys/kern/kern_proc.c20
-rw-r--r--sys/kern/kern_resource.c6
-rw-r--r--sys/kern/kern_synch.c6
4 files changed, 24 insertions, 12 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 41b85e2..299acbe 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -514,6 +514,7 @@ proc0_post(void *dummy __unused)
struct timespec ts;
struct proc *p;
struct rusage ru;
+ struct thread *td;
/*
* Now we can look at the time, having had a chance to verify the
@@ -529,6 +530,9 @@ proc0_post(void *dummy __unused)
p->p_rux.rux_uticks = 0;
p->p_rux.rux_sticks = 0;
p->p_rux.rux_iticks = 0;
+ FOREACH_THREAD_IN_PROC(p, td) {
+ td->td_runtime = 0;
+ }
}
sx_sunlock(&allproc_lock);
PCPU_SET(switchtime, cpu_ticks());
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index be70eed..c398a8d 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -84,7 +84,8 @@ MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
static void doenterpgrp(struct proc *, struct pgrp *);
static void orphanpg(struct pgrp *pg);
static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp);
-static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp);
+static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp,
+ int preferthread);
static void pgadjustjobc(struct pgrp *pgrp, int entering);
static void pgdelete(struct pgrp *);
static int proc_ctor(void *mem, int size, void *arg, int flags);
@@ -765,11 +766,12 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
}
/*
- * Fill in information that is thread specific.
- * Must be called with p_slock locked.
+ * Fill in information that is thread specific. Must be called with p_slock
+ * locked. If 'preferthread' is set, overwrite certain process-related
+ * fields that are maintained for both threads and processes.
*/
static void
-fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
+fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread)
{
struct proc *p;
@@ -829,6 +831,9 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
kp->ki_pri.pri_class = td->td_pri_class;
kp->ki_pri.pri_user = td->td_user_pri;
+ if (preferthread)
+ kp->ki_runtime = cputick2usec(td->td_runtime);
+
/* We can't get this anymore but ps etc never used it anyway. */
kp->ki_rqindex = 0;
@@ -848,7 +853,7 @@ fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp)
fill_kinfo_proc_only(p, kp);
PROC_SLOCK(p);
if (FIRST_THREAD_IN_PROC(p) != NULL)
- fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp);
+ fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp, 0);
PROC_SUNLOCK(p);
}
@@ -918,7 +923,8 @@ sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
if (flags & KERN_PROC_NOTHREADS) {
PROC_SLOCK(p);
if (FIRST_THREAD_IN_PROC(p) != NULL)
- fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), &kinfo_proc);
+ fill_kinfo_thread(FIRST_THREAD_IN_PROC(p),
+ &kinfo_proc, 0);
PROC_SUNLOCK(p);
error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
sizeof(kinfo_proc));
@@ -926,7 +932,7 @@ sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
PROC_SLOCK(p);
if (FIRST_THREAD_IN_PROC(p) != NULL)
FOREACH_THREAD_IN_PROC(p, td) {
- fill_kinfo_thread(td, &kinfo_proc);
+ fill_kinfo_thread(td, &kinfo_proc, 1);
error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
sizeof(kinfo_proc));
if (error)
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index 4e22054..5f8ea8f 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -849,7 +849,7 @@ calcru(struct proc *p, struct timeval *up, struct timeval *sp)
}
/* Make sure the per-thread stats are current. */
FOREACH_THREAD_IN_PROC(p, td) {
- if (td->td_runtime == 0)
+ if (td->td_incruntime == 0)
continue;
thread_lock(td);
ruxagg(&p->p_rux, td);
@@ -1021,11 +1021,11 @@ ruxagg(struct rusage_ext *rux, struct thread *td)
THREAD_LOCK_ASSERT(td, MA_OWNED);
PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED);
- rux->rux_runtime += td->td_runtime;
+ rux->rux_runtime += td->td_incruntime;
rux->rux_uticks += td->td_uticks;
rux->rux_sticks += td->td_sticks;
rux->rux_iticks += td->td_iticks;
- td->td_runtime = 0;
+ td->td_incruntime = 0;
td->td_uticks = 0;
td->td_iticks = 0;
td->td_sticks = 0;
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 9d02497..ad13c2b 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -371,7 +371,7 @@ wakeup_one(ident)
void
mi_switch(int flags, struct thread *newtd)
{
- uint64_t new_switchtime;
+ uint64_t runtime, new_switchtime;
struct thread *td;
struct proc *p;
@@ -409,7 +409,9 @@ mi_switch(int flags, struct thread *newtd)
* thread was running, and add that to its total so far.
*/
new_switchtime = cpu_ticks();
- td->td_runtime += new_switchtime - PCPU_GET(switchtime);
+ runtime = new_switchtime - PCPU_GET(switchtime);
+ td->td_runtime += runtime;
+ td->td_incruntime += runtime;
PCPU_SET(switchtime, new_switchtime);
td->td_generation++; /* bump preempt-detect counter */
PCPU_INC(cnt.v_swtch);
OpenPOWER on IntegriCloud