summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/kern_thread.c25
-rw-r--r--sys/kern/sched_4bsd.c4
-rw-r--r--sys/kern/sched_ule.c4
3 files changed, 19 insertions, 14 deletions
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index d9df5c3..d4c5c4c 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -381,7 +381,7 @@ thread_free(struct thread *td)
void
thread_exit(void)
{
- uint64_t new_switchtime;
+ uint64_t runtime, new_switchtime;
struct thread *td;
struct thread *td2;
struct proc *p;
@@ -410,15 +410,6 @@ thread_exit(void)
*/
cpu_thread_exit(td); /* XXXSMP */
- /* Do the same timestamp bookkeeping that mi_switch() would do. */
- new_switchtime = cpu_ticks();
- p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
- PCPU_SET(switchtime, new_switchtime);
- PCPU_SET(switchticks, ticks);
- PCPU_INC(cnt.v_swtch);
- /* Save our resource usage in our process. */
- td->td_ru.ru_nvcsw++;
- rucollect(&p->p_ru, &td->td_ru);
/*
* The last thread is left attached to the process
* So that the whole bundle gets recycled. Skip
@@ -467,7 +458,21 @@ thread_exit(void)
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
#endif
PROC_UNLOCK(p);
+
+ /* Do the same timestamp bookkeeping that mi_switch() would do. */
+ new_switchtime = cpu_ticks();
+ runtime = new_switchtime - PCPU_GET(switchtime);
+ td->td_runtime += runtime;
+ td->td_incruntime += runtime;
+ PCPU_SET(switchtime, new_switchtime);
+ PCPU_SET(switchticks, ticks);
+ PCPU_INC(cnt.v_swtch);
+
+ /* Save our resource usage in our process. */
+ td->td_ru.ru_nvcsw++;
ruxagg(p, td);
+ rucollect(&p->p_ru, &td->td_ru);
+
thread_lock(td);
PROC_SUNLOCK(p);
td->td_state = TDS_INACTIVE;
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 0fd6481..16cc033 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -1572,14 +1572,14 @@ sched_throw(struct thread *td)
if (td == NULL) {
mtx_lock_spin(&sched_lock);
spinlock_exit();
+ PCPU_SET(switchtime, cpu_ticks());
+ PCPU_SET(switchticks, ticks);
} else {
lock_profile_release_lock(&sched_lock.lock_object);
MPASS(td->td_lock == &sched_lock);
}
mtx_assert(&sched_lock, MA_OWNED);
KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
- PCPU_SET(switchtime, cpu_ticks());
- PCPU_SET(switchticks, ticks);
cpu_throw(td, choosethread()); /* doesn't return */
}
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 87e3655..2065b9f 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -2587,6 +2587,8 @@ sched_throw(struct thread *td)
/* Correct spinlock nesting and acquire the correct lock. */
TDQ_LOCK(tdq);
spinlock_exit();
+ PCPU_SET(switchtime, cpu_ticks());
+ PCPU_SET(switchticks, ticks);
} else {
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
tdq_load_rem(tdq, td);
@@ -2595,8 +2597,6 @@ sched_throw(struct thread *td)
KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
newtd = choosethread();
TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
- PCPU_SET(switchtime, cpu_ticks());
- PCPU_SET(switchticks, ticks);
cpu_throw(td, newtd); /* doesn't return */
}
OpenPOWER on IntegriCloud