summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_clock.c22
-rw-r--r--sys/kern/kern_exit.c2
-rw-r--r--sys/kern/subr_prof.c67
3 files changed, 66 insertions, 25 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 5c6756b..25e558a 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -313,6 +313,10 @@ startprofclock(p)
* cover psdiv, etc. as well.
*/
mtx_lock_spin(&sched_lock);
+ if (p->p_sflag & PS_STOPPROF) {
+ mtx_unlock_spin(&sched_lock);
+ return;
+ }
if ((p->p_sflag & PS_PROFIL) == 0) {
p->p_sflag |= PS_PROFIL;
if (++profprocs == 1)
@@ -329,9 +333,18 @@ stopprofclock(p)
register struct proc *p;
{
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+retry:
mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_PROFIL) {
- p->p_sflag &= ~PS_PROFIL;
+ if (p->p_profthreads) {
+ p->p_sflag |= PS_STOPPROF;
+ mtx_unlock_spin(&sched_lock);
+ msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
+ "stopprof", NULL);
+ goto retry;
+ }
+ p->p_sflag &= ~(PS_PROFIL|PS_STOPPROF);
if (--profprocs == 0)
cpu_stopprofclock();
}
@@ -400,7 +413,7 @@ statclock(frame)
}
}
- sched_clock(ke->ke_thread);
+ sched_clock(td);
/* Update resource usage integrals and maximums. */
if ((pstats = p->p_stats) != NULL &&
@@ -430,9 +443,12 @@ profclock(frame)
/*
* Came from user mode; CPU was in user state.
* If this process is being profiled, record the tick.
+ * if there is no related user location yet, don't
+ * bother trying to count it.
*/
td = curthread;
- if (td->td_proc->p_sflag & PS_PROFIL)
+ if ((td->td_proc->p_sflag & PS_PROFIL) &&
+ !(td->td_flags & TDF_UPCALLING))
addupc_intr(td->td_kse, CLKF_PC(frame), 1);
}
#ifdef GPROF
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 0b2c2e8..5721444 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -238,7 +238,6 @@ exit1(td, rv)
TAILQ_FOREACH(ep, &exit_list, next)
(*ep->function)(p);
- stopprofclock(p);
MALLOC(p->p_ru, struct rusage *, sizeof(struct rusage),
M_ZOMBIE, 0);
@@ -247,6 +246,7 @@ exit1(td, rv)
* P_PPWAIT is set; we will wakeup the parent below.
*/
PROC_LOCK(p);
+ stopprofclock(p);
p->p_flag &= ~(P_TRACED | P_PPWAIT);
SIGEMPTYSET(p->p_siglist);
PROC_UNLOCK(p);
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index 2c22a92..b8cd61e 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -358,7 +358,9 @@ sysctl_kern_prof(SYSCTL_HANDLER_ARGS)
return (0);
if (state == GMON_PROF_OFF) {
gp->state = state;
+ PROC_LOCK(&proc0);
stopprofclock(&proc0);
+ PROC_UNLOCK(&proc0);
stopguprof(gp);
} else if (state == GMON_PROF_ON) {
gp->state = GMON_PROF_OFF;
@@ -369,7 +371,9 @@ sysctl_kern_prof(SYSCTL_HANDLER_ARGS)
#ifdef GUPROF
} else if (state == GMON_PROF_HIRES) {
gp->state = GMON_PROF_OFF;
+ PROC_LOCK(&proc0);
stopprofclock(&proc0);
+ PROC_UNLOCK(&proc0);
startguprof(gp);
gp->state = state;
#endif
@@ -419,7 +423,7 @@ profil(td, uap)
struct thread *td;
register struct profil_args *uap;
{
- register struct uprof *upp;
+ struct uprof *upp;
int s;
int error = 0;
@@ -430,7 +434,9 @@ profil(td, uap)
goto done2;
}
if (uap->scale == 0) {
+ PROC_LOCK(td->td_proc);
stopprofclock(td->td_proc);
+ PROC_UNLOCK(td->td_proc);
goto done2;
}
upp = &td->td_proc->p_stats->p_prof;
@@ -472,15 +478,12 @@ done2:
* inaccurate.
*/
void
-addupc_intr(ke, pc, ticks)
- register struct kse *ke;
- register uintptr_t pc;
- u_int ticks;
+addupc_intr(struct kse *ke, uintptr_t pc, u_int ticks)
{
- register struct uprof *prof;
- register caddr_t addr;
- register u_int i;
- register int v;
+ struct uprof *prof;
+ caddr_t addr;
+ u_int i;
+ int v;
if (ticks == 0)
return;
@@ -502,34 +505,56 @@ addupc_intr(ke, pc, ticks)
/*
* Much like before, but we can afford to take faults here. If the
* update fails, we simply turn off profiling.
+ * XXXKSE, don't use kse unless we got sched lock.
*/
void
-addupc_task(ke, pc, ticks)
- register struct kse *ke;
- register uintptr_t pc;
- u_int ticks;
+addupc_task(struct kse *ke, uintptr_t pc, u_int ticks)
{
- struct proc *p = ke->ke_proc;
- register struct uprof *prof;
- register caddr_t addr;
- register u_int i;
+ struct proc *p = ke->ke_proc;
+ struct uprof *prof;
+ caddr_t addr;
+ u_int i;
u_short v;
+ int stop = 0;
if (ticks == 0)
return;
+ PROC_LOCK(p);
+ mtx_lock_spin(&sched_lock);
+ if (!(p->p_sflag & PS_PROFIL)) {
+ mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
+ return;
+ }
+ p->p_profthreads++;
+ mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
prof = &p->p_stats->p_prof;
if (pc < prof->pr_off ||
- (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
- return;
+ (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
+ goto out;
+ }
addr = prof->pr_base + i;
if (copyin(addr, &v, sizeof(v)) == 0) {
v += ticks;
if (copyout(&v, addr, sizeof(v)) == 0)
- return;
+ goto out;
+ }
+ stop = 1;
+
+out:
+ PROC_LOCK(p);
+ if (--p->p_profthreads == 0) {
+ if (p->p_sflag & PS_STOPPROF) {
+ wakeup(&p->p_profthreads);
+ stop = 0;
+ }
}
- stopprofclock(p);
+ if (stop)
+ stopprofclock(p);
+ PROC_UNLOCK(p);
}
#if defined(__i386__) && __GNUC__ >= 2
OpenPOWER on IntegriCloud