summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-12-15 23:13:31 +0000
committerjeff <jeff@FreeBSD.org>2007-12-15 23:13:31 +0000
commit12adc443d67286deeee69e764d979c963403497d (patch)
tree5aa1ecb0fadd118191701a2b1c611fcee7216753 /sys/kern/sched_ule.c
parent96bf4f52953dddc12e3490919ba932dcfb5bc76d (diff)
downloadFreeBSD-src-12adc443d67286deeee69e764d979c963403497d.zip
FreeBSD-src-12adc443d67286deeee69e764d979c963403497d.tar.gz
- Re-implement lock profiling in such a way that it no longer breaks
the ABI when enabled. There is no longer an embedded lock_profile_object in each lock. Instead a list of lock_profile_objects is kept per-thread for each lock it may own. The cnt_hold statistic is now always 0 to facilitate this. - Support shared locking by tracking individual lock instances and statistics in the per-thread per-instance lock_profile_object. - Make the lock profiling hash table a per-cpu singly linked list with a per-cpu static lock_prof allocator. This removes the need for an array of spinlocks and reduces cache contention between cores. - Use a seperate hash for spinlocks and other locks so that only a critical_enter() is required and not a spinlock_enter() to modify the per-cpu tables. - Count time spent spinning in the lock statistics. - Remove the LOCK_PROFILE_SHARED option as it is always supported now. - Specifically drop and release the scheduler locks in both schedulers since we track owners now. In collaboration with: Kip Macy Sponsored by: Nokia
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index e9d9468..6841bab 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1894,6 +1894,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
#endif
+ lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
cpu_switch(td, newtd, mtx);
/*
@@ -1903,6 +1904,8 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
*/
cpuid = PCPU_GET(cpuid);
tdq = TDQ_CPU(cpuid);
+ lock_profile_obtain_lock_success(
+ &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
#ifdef HWPMC_HOOKS
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
@@ -2618,6 +2621,7 @@ sched_throw(struct thread *td)
} else {
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
tdq_load_rem(tdq, td->td_sched);
+ lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
}
KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
newtd = choosethread();
@@ -2650,6 +2654,8 @@ sched_fork_exit(struct thread *td)
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
td->td_oncpu = cpuid;
TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
+ lock_profile_obtain_lock_success(
+ &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
}
static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0,
OpenPOWER on IntegriCloud