summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_sx.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-12-15 23:13:31 +0000
committerjeff <jeff@FreeBSD.org>2007-12-15 23:13:31 +0000
commit12adc443d67286deeee69e764d979c963403497d (patch)
tree5aa1ecb0fadd118191701a2b1c611fcee7216753 /sys/kern/kern_sx.c
parent96bf4f52953dddc12e3490919ba932dcfb5bc76d (diff)
downloadFreeBSD-src-12adc443d67286deeee69e764d979c963403497d.zip
FreeBSD-src-12adc443d67286deeee69e764d979c963403497d.tar.gz
- Re-implement lock profiling in such a way that it no longer breaks
the ABI when enabled. There is no longer an embedded lock_profile_object in each lock. Instead a list of lock_profile_objects is kept per-thread for each lock it may own. The cnt_hold statistic is now always 0 to facilitate this. - Support shared locking by tracking individual lock instances and statistics in the per-thread per-instance lock_profile_object. - Make the lock profiling hash table a per-cpu singly linked list with a per-cpu static lock_prof allocator. This removes the need for an array of spinlocks and reduces cache contention between cores. - Use a seperate hash for spinlocks and other locks so that only a critical_enter() is required and not a spinlock_enter() to modify the per-cpu tables. - Count time spent spinning in the lock statistics. - Remove the LOCK_PROFILE_SHARED option as it is always supported now. - Specifically drop and release the scheduler locks in both schedulers since we track owners now. In collaboration with: Kip Macy Sponsored by: Nokia
Diffstat (limited to 'sys/kern/kern_sx.c')
-rw-r--r--sys/kern/kern_sx.c34
1 files changed, 9 insertions, 25 deletions
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 1e3f135..bc172e5 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -302,11 +302,8 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
curthread->td_locks--;
WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
-#ifdef LOCK_PROFILING_SHARED
- if (SX_SHARERS(sx->sx_lock) == 1)
- lock_profile_release_lock(&sx->lock_object);
-#endif
__sx_sunlock(sx, file, line);
+ lock_profile_release_lock(&sx->lock_object);
}
void
@@ -450,6 +447,8 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
+ lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
+ &waittime);
#ifdef ADAPTIVE_SX
/*
* If the lock is write locked and the owner is
@@ -467,8 +466,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
"%s: spinning on %p held by %p",
__func__, sx, owner);
GIANT_SAVE();
- lock_profile_obtain_lock_failed(
- &sx->lock_object, &contested, &waittime);
while (SX_OWNER(sx->sx_lock) == x &&
TD_IS_RUNNING(owner))
cpu_spinwait();
@@ -555,8 +552,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
__func__, sx);
GIANT_SAVE();
- lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
- &waittime);
sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
@@ -648,10 +643,8 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
#endif
-#ifdef LOCK_PROFILING_SHARED
uint64_t waittime = 0;
int contested = 0;
-#endif
uintptr_t x;
int error = 0;
@@ -672,12 +665,6 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
MPASS(!(x & SX_LOCK_SHARED_WAITERS));
if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
x + SX_ONE_SHARER)) {
-#ifdef LOCK_PROFILING_SHARED
- if (SX_SHARERS(x) == 0)
- lock_profile_obtain_lock_success(
- &sx->lock_object, contested,
- waittime, file, line);
-#endif
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR4(KTR_LOCK,
"%s: %p succeed %p -> %p", __func__,
@@ -687,6 +674,8 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
}
continue;
}
+ lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
+ &waittime);
#ifdef ADAPTIVE_SX
/*
@@ -694,7 +683,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
* the owner stops running or the state of the lock
* changes.
*/
- else if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) {
+ if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) {
x = SX_OWNER(x);
owner = (struct thread *)x;
if (TD_IS_RUNNING(owner)) {
@@ -703,10 +692,6 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
"%s: spinning on %p held by %p",
__func__, sx, owner);
GIANT_SAVE();
-#ifdef LOCK_PROFILING_SHARED
- lock_profile_obtain_lock_failed(
- &sx->lock_object, &contested, &waittime);
-#endif
while (SX_OWNER(sx->sx_lock) == x &&
TD_IS_RUNNING(owner))
cpu_spinwait();
@@ -772,10 +757,6 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
__func__, sx);
GIANT_SAVE();
-#ifdef LOCK_PROFILING_SHARED
- lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
- &waittime);
-#endif
sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
@@ -795,6 +776,9 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
__func__, sx);
}
+ if (error == 0)
+ lock_profile_obtain_lock_success(&sx->lock_object, contested,
+ waittime, file, line);
GIANT_RESTORE();
return (error);
OpenPOWER on IntegriCloud