summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_4bsd.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-12-15 23:13:31 +0000
committerjeff <jeff@FreeBSD.org>2007-12-15 23:13:31 +0000
commit12adc443d67286deeee69e764d979c963403497d (patch)
tree5aa1ecb0fadd118191701a2b1c611fcee7216753 /sys/kern/sched_4bsd.c
parent96bf4f52953dddc12e3490919ba932dcfb5bc76d (diff)
downloadFreeBSD-src-12adc443d67286deeee69e764d979c963403497d.zip
FreeBSD-src-12adc443d67286deeee69e764d979c963403497d.tar.gz
- Re-implement lock profiling in such a way that it no longer breaks
the ABI when enabled. There is no longer an embedded lock_profile_object in each lock. Instead a list of lock_profile_objects is kept per-thread for each lock it may own. The cnt_hold statistic is now always 0 to facilitate this. - Support shared locking by tracking individual lock instances and statistics in the per-thread per-instance lock_profile_object. - Make the lock profiling hash table a per-cpu singly linked list with a per-cpu static lock_prof allocator. This removes the need for an array of spinlocks and reduces cache contention between cores. - Use a seperate hash for spinlocks and other locks so that only a critical_enter() is required and not a spinlock_enter() to modify the per-cpu tables. - Count time spent spinning in the lock statistics. - Remove the LOCK_PROFILE_SHARED option as it is always supported now. - Specifically drop and release the scheduler locks in both schedulers since we track owners now. In collaboration with: Kip Macy Sponsored by: Nokia
Diffstat (limited to 'sys/kern/sched_4bsd.c')
-rw-r--r--sys/kern/sched_4bsd.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index f6e702e..e1e5c91 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -878,9 +878,11 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
#endif
-
/* I feel sleepy */
+ lock_profile_release_lock(&sched_lock.lock_object);
cpu_switch(td, newtd, td->td_lock);
+ lock_profile_obtain_lock_success(&sched_lock.lock_object,
+ 0, 0, __FILE__, __LINE__);
/*
* Where am I? What year is it?
* We are in the same thread that went to sleep above,
@@ -1375,6 +1377,7 @@ sched_throw(struct thread *td)
mtx_lock_spin(&sched_lock);
spinlock_exit();
} else {
+ lock_profile_release_lock(&sched_lock.lock_object);
MPASS(td->td_lock == &sched_lock);
}
mtx_assert(&sched_lock, MA_OWNED);
@@ -1394,6 +1397,8 @@ sched_fork_exit(struct thread *td)
*/
td->td_oncpu = PCPU_GET(cpuid);
sched_lock.mtx_lock = (uintptr_t)td;
+ lock_profile_obtain_lock_success(&sched_lock.lock_object,
+ 0, 0, __FILE__, __LINE__);
THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
}
OpenPOWER on IntegriCloud