diff options
author | jeff <jeff@FreeBSD.org> | 2007-05-20 22:11:50 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2007-05-20 22:11:50 +0000 |
commit | 027ae03b495b7cd9633acb01b6fddea042645da3 (patch) | |
tree | 42c8e52aead43f7431897ada079682da98afc241 /sys/kern/subr_prof.c | |
parent | bcfa98d0197abe20a89b307cefcf22f4e04ac8ff (diff) | |
download | FreeBSD-src-027ae03b495b7cd9633acb01b6fddea042645da3.zip FreeBSD-src-027ae03b495b7cd9633acb01b6fddea042645da3.tar.gz |
- Move clock synchronization into a seperate clock lock so the global
scheduler lock is not involved. sched_lock still protects the sched_clock
call. Another patch will remedy this.
Contributed by: Attilio Rao <attilio@FreeBSD.org>
Tested by: kris, jeff
Diffstat (limited to 'sys/kern/subr_prof.c')
-rw-r--r-- | sys/kern/subr_prof.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c index 3cec375..498d9d2 100644 --- a/sys/kern/subr_prof.c +++ b/sys/kern/subr_prof.c @@ -423,12 +423,12 @@ profil(td, uap) } PROC_LOCK(p); upp = &td->td_proc->p_stats->p_prof; - mtx_lock_spin(&sched_lock); + mtx_lock_spin(&time_lock); upp->pr_off = uap->offset; upp->pr_scale = uap->scale; upp->pr_base = uap->samples; upp->pr_size = uap->size; - mtx_unlock_spin(&sched_lock); + mtx_unlock_spin(&time_lock); startprofclock(p); PROC_UNLOCK(p); @@ -468,15 +468,15 @@ addupc_intr(struct thread *td, uintfptr_t pc, u_int ticks) if (ticks == 0) return; prof = &td->td_proc->p_stats->p_prof; - mtx_lock_spin(&sched_lock); + mtx_lock_spin(&time_lock); if (pc < prof->pr_off || (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) { - mtx_unlock_spin(&sched_lock); + mtx_unlock_spin(&time_lock); return; /* out of range; ignore */ } addr = prof->pr_base + i; - mtx_unlock_spin(&sched_lock); + mtx_unlock_spin(&time_lock); if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) { td->td_profil_addr = pc; td->td_profil_ticks = ticks; |