summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_prof.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2004-07-02 03:50:48 +0000
committerjhb <jhb@FreeBSD.org>2004-07-02 03:50:48 +0000
commitca6f6cfd39b42dad774ba17c48666bb06fd0740a (patch)
tree42012da010219d1aec73a52579d11b3eaafb6e3e /sys/kern/subr_prof.c
parent2c858bc6df28ae0a10387695a8c2001bd49d9b28 (diff)
downloadFreeBSD-src-ca6f6cfd39b42dad774ba17c48666bb06fd0740a.zip
FreeBSD-src-ca6f6cfd39b42dad774ba17c48666bb06fd0740a.tar.gz
Tidy up uprof locking. Mostly the fields are protected by both the proc
lock and sched_lock so they can be read with either lock held. Document the locking as well. The one remaining bogosity is that pr_addr and pr_ticks should be per-thread but profiling of multithreaded apps is currently undefined.
Diffstat (limited to 'sys/kern/subr_prof.c')
-rw-r--r--sys/kern/subr_prof.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index d2cfe65..442ca6d 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -419,17 +419,19 @@ profil(td, uap)
p = td->td_proc;
if (uap->scale == 0) {
- PROC_LOCK(td->td_proc);
- stopprofclock(td->td_proc);
- PROC_UNLOCK(td->td_proc);
+ PROC_LOCK(p);
+ stopprofclock(p);
+ PROC_UNLOCK(p);
return (0);
}
+ PROC_LOCK(p);
upp = &td->td_proc->p_stats->p_prof;
+ mtx_lock_spin(&sched_lock);
upp->pr_off = uap->offset;
upp->pr_scale = uap->scale;
upp->pr_base = uap->samples;
upp->pr_size = uap->size;
- PROC_LOCK(p);
+ mtx_unlock_spin(&sched_lock);
startprofclock(p);
PROC_UNLOCK(p);
@@ -469,16 +471,20 @@ addupc_intr(struct thread *td, uintptr_t pc, u_int ticks)
if (ticks == 0)
return;
prof = &td->td_proc->p_stats->p_prof;
+ mtx_lock_spin(&sched_lock);
if (pc < prof->pr_off ||
- (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
+ (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
+ mtx_unlock_spin(&sched_lock);
return; /* out of range; ignore */
+ }
addr = prof->pr_base + i;
+ mtx_unlock_spin(&sched_lock);
if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
- mtx_lock_spin(&sched_lock);
prof->pr_addr = pc;
prof->pr_ticks = ticks;
- td->td_flags |= TDF_OWEUPC | TDF_ASTPENDING ;
+ mtx_lock_spin(&sched_lock);
+ td->td_flags |= TDF_OWEUPC | TDF_ASTPENDING;
mtx_unlock_spin(&sched_lock);
}
}
@@ -506,7 +512,6 @@ addupc_task(struct thread *td, uintptr_t pc, u_int ticks)
return;
}
p->p_profthreads++;
- PROC_UNLOCK(p);
prof = &p->p_stats->p_prof;
if (pc < prof->pr_off ||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
@@ -514,15 +519,18 @@ addupc_task(struct thread *td, uintptr_t pc, u_int ticks)
}
addr = prof->pr_base + i;
+ PROC_UNLOCK(p);
if (copyin(addr, &v, sizeof(v)) == 0) {
v += ticks;
- if (copyout(&v, addr, sizeof(v)) == 0)
+ if (copyout(&v, addr, sizeof(v)) == 0) {
+ PROC_LOCK(p);
goto out;
+ }
}
stop = 1;
+ PROC_LOCK(p);
out:
- PROC_LOCK(p);
if (--p->p_profthreads == 0) {
if (p->p_flag & P_STOPPROF) {
wakeup(&p->p_profthreads);
OpenPOWER on IntegriCloud