summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/compat/svr4/svr4_misc.c9
-rw-r--r--sys/kern/init_main.c5
-rw-r--r--sys/kern/kern_exit.c39
-rw-r--r--sys/kern/kern_resource.c9
-rw-r--r--sys/kern/kern_thread.c10
-rw-r--r--sys/sys/proc.h2
6 files changed, 26 insertions, 48 deletions
diff --git a/sys/compat/svr4/svr4_misc.c b/sys/compat/svr4/svr4_misc.c
index 71d994d..7277622 100644
--- a/sys/compat/svr4/svr4_misc.c
+++ b/sys/compat/svr4/svr4_misc.c
@@ -1226,19 +1226,21 @@ loop:
nfound++;
+ PROC_SLOCK(p);
/*
* See if we have a zombie. If so, WNOWAIT should be set,
* as otherwise we should have called kern_wait() up above.
*/
if ((p->p_state == PRS_ZOMBIE) &&
((uap->options & (SVR4_WEXITED|SVR4_WTRAPPED)))) {
+ PROC_SUNLOCK(p);
KASSERT(uap->options & SVR4_WNOWAIT,
("WNOWAIT is clear"));
/* Found a zombie, so cache info in local variables. */
pid = p->p_pid;
status = p->p_xstat;
- ru = *p->p_ru;
+ ru = p->p_ru;
calcru(p, &ru.ru_utime, &ru.ru_stime);
PROC_UNLOCK(p);
sx_sunlock(&proctree_lock);
@@ -1253,7 +1255,6 @@ loop:
* See if we have a stopped or continued process.
* XXX: This duplicates the same code in kern_wait().
*/
- PROC_SLOCK(p);
if ((p->p_flag & P_STOPPED_SIG) &&
(p->p_suspcount == p->p_numthreads) &&
(p->p_flag & P_WAITED) == 0 &&
@@ -1264,7 +1265,7 @@ loop:
sx_sunlock(&proctree_lock);
pid = p->p_pid;
status = W_STOPCODE(p->p_xstat);
- ru = *p->p_ru;
+ ru = p->p_ru;
calcru(p, &ru.ru_utime, &ru.ru_stime);
PROC_UNLOCK(p);
@@ -1285,7 +1286,7 @@ loop:
if (((uap->options & SVR4_WNOWAIT)) == 0)
p->p_flag &= ~P_CONTINUED;
pid = p->p_pid;
- ru = *p->p_ru;
+ ru = p->p_ru;
status = SIGCONT;
calcru(p, &ru.ru_utime, &ru.ru_stime);
PROC_UNLOCK(p);
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index ac00de0..54d78e6 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -500,6 +500,7 @@ proc0_post(void *dummy __unused)
{
struct timespec ts;
struct proc *p;
+ struct rusage ru;
/*
* Now we can look at the time, having had a chance to verify the
@@ -508,7 +509,11 @@ proc0_post(void *dummy __unused)
sx_slock(&allproc_lock);
FOREACH_PROC_IN_SYSTEM(p) {
microuptime(&p->p_stats->p_start);
+ rufetch(p, &ru); /* Clears thread stats */
p->p_rux.rux_runtime = 0;
+ p->p_rux.rux_uticks = 0;
+ p->p_rux.rux_sticks = 0;
+ p->p_rux.rux_iticks = 0;
}
sx_sunlock(&allproc_lock);
PCPU_SET(switchtime, cpu_ticks());
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index eefe6b0..58abd2e 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -116,7 +116,6 @@ exit1(struct thread *td, int rv)
struct ucred *tracecred;
#endif
struct plimit *plim;
- struct rusage *ru;
int locked;
/*
@@ -233,8 +232,6 @@ retry:
*/
EVENTHANDLER_INVOKE(process_exit, p);
- MALLOC(ru, struct rusage *, sizeof(struct rusage),
- M_ZOMBIE, M_WAITOK);
/*
* If parent is waiting for us to exit or exec,
* P_PPWAIT is set; we will wakeup the parent below.
@@ -447,16 +444,6 @@ retry:
p->p_xstat = rv;
p->p_xthread = td;
/*
- * All statistics have been aggregated into the final td_ru by
- * thread_exit(). Copy these into the proc here where wait*()
- * can find them.
- * XXX We will miss any statistics gathered between here and
- * thread_exit() except for those related to clock ticks.
- */
- *ru = td->td_ru;
- ru->ru_nvcsw++;
- p->p_ru = ru;
- /*
* Notify interested parties of our demise.
*/
KNOTE_LOCKED(&p->p_klist, NOTE_EXIT);
@@ -537,6 +524,11 @@ retry:
knlist_destroy(&p->p_klist);
/*
+ * Save our children's rusage information in our exit rusage.
+ */
+ ruadd(&p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
+
+ /*
* Make sure the scheduler takes this thread out of its tables etc.
* This will also release this thread's reference to the ucred.
* Other thread parts to release include pcb bits and such.
@@ -711,27 +703,15 @@ loop:
}
nfound++;
+ PROC_SLOCK(p);
if (p->p_state == PRS_ZOMBIE) {
-
- /*
- * It is possible that the last thread of this
- * process is still running on another CPU
- * in thread_exit() after having dropped the process
- * lock via PROC_UNLOCK() but before it has completed
- * cpu_throw(). In that case, the other thread must
- * still hold the proc slock, so simply by acquiring
- * proc slock once we will wait long enough for the
- * thread to exit in that case.
- * XXX This is questionable.
- */
- PROC_SLOCK(p);
PROC_SUNLOCK(p);
td->td_retval[0] = p->p_pid;
if (status)
*status = p->p_xstat; /* convert to int */
if (rusage) {
- *rusage = *p->p_ru;
+ *rusage = p->p_ru;
calcru(p, &rusage->ru_utime, &rusage->ru_stime);
}
@@ -776,11 +756,9 @@ loop:
p->p_xstat = 0; /* XXX: why? */
PROC_UNLOCK(p);
PROC_LOCK(q);
- ruadd(&q->p_stats->p_cru, &q->p_crux, p->p_ru,
+ ruadd(&q->p_stats->p_cru, &q->p_crux, &p->p_ru,
&p->p_rux);
PROC_UNLOCK(q);
- FREE(p->p_ru, M_ZOMBIE);
- p->p_ru = NULL;
/*
* Decrement the count of procs running with this uid.
@@ -819,7 +797,6 @@ loop:
sx_xunlock(&allproc_lock);
return (0);
}
- PROC_SLOCK(p);
if ((p->p_flag & P_STOPPED_SIG) &&
(p->p_suspcount == p->p_numthreads) &&
(p->p_flag & P_WAITED) == 0 &&
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index a0e39b9..5982066 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -1039,19 +1039,16 @@ rufetch(struct proc *p, struct rusage *ru)
{
struct thread *td;
- memset(ru, 0, sizeof(*ru));
PROC_SLOCK(p);
- if (p->p_ru == NULL) {
- KASSERT(p->p_numthreads > 0,
- ("rufetch: No threads or ru in proc %p", p));
+ *ru = p->p_ru;
+ if (p->p_numthreads > 0) {
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
ruxagg(&p->p_rux, td);
thread_unlock(td);
rucollect(ru, &td->td_ru);
}
- } else
- *ru = *p->p_ru;
+ }
PROC_SUNLOCK(p);
}
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 56de5c2..9c1c02d 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -391,9 +391,9 @@ thread_exit(void)
PCPU_SET(switchtime, new_switchtime);
PCPU_SET(switchticks, ticks);
PCPU_INC(cnt.v_swtch);
- /* Add the child usage to our own when the final thread exits. */
- if (p->p_numthreads == 1)
- ruadd(p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
+ /* Save our resource usage in our process. */
+ td->td_ru.ru_nvcsw++;
+ rucollect(&p->p_ru, &td->td_ru);
/*
* The last thread is left attached to the process
* So that the whole bundle gets recycled. Skip
@@ -411,9 +411,7 @@ thread_exit(void)
thread_unlink(td);
#endif
thread_unlock(td);
- /* Impart our resource usage on another thread */
td2 = FIRST_THREAD_IN_PROC(p);
- rucollect(&td2->td_ru, &td->td_ru);
sched_exit_thread(td2, td);
/*
@@ -462,7 +460,7 @@ thread_exit(void)
}
PROC_UNLOCK(p);
thread_lock(td);
- /* Aggregate our tick statistics into our parents rux. */
+ /* Save our tick information with both the thread and proc locked */
ruxagg(&p->p_rux, td);
PROC_SUNLOCK(p);
td->td_state = TDS_INACTIVE;
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 4f12015..b11f909 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -571,7 +571,7 @@ struct proc {
struct mdproc p_md; /* Any machine-dependent fields. */
struct callout p_itcallout; /* (h + c) Interval timer callout. */
u_short p_acflag; /* (c) Accounting flags. */
- struct rusage *p_ru; /* (a) Exit information. */
+ struct rusage p_ru; /* (a) Exit information. */
struct proc *p_peers; /* (r) */
struct proc *p_leader; /* (b) */
void *p_emuldata; /* (c) Emulator state data. */
OpenPOWER on IntegriCloud