summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorkmacy <kmacy@FreeBSD.org>2007-02-27 06:42:05 +0000
committerkmacy <kmacy@FreeBSD.org>2007-02-27 06:42:05 +0000
commitb7672bad26dbe5a50937a2e258fe957b1bc19a8d (patch)
treec7228dd81a9d5cf5855323c889da39c667a4dd5a /sys/kern
parent128ac1d5958eb05b5b0c08882c80a3f90ef76d1b (diff)
downloadFreeBSD-src-b7672bad26dbe5a50937a2e258fe957b1bc19a8d.zip
FreeBSD-src-b7672bad26dbe5a50937a2e258fe957b1bc19a8d.tar.gz
Further improvements to LOCK_PROFILING:
- Fix missing initialization in kern_rwlock.c causing bogus times to be collected - Move updates to the lock hash to after the lock is released for spin mutexes, sleep mutexes, and sx locks - Add new kernel build option LOCK_PROFILE_FAST - only update lock profiling statistics when an acquisition is contended. This reduces the overhead of LOCK_PROFILING to increasing system time by 20%-25% which on "make -j8 kernel-toolchain" on a dual woodcrest is unmeasurable in terms of wall-clock time. Contrast this to enabling lock profiling without LOCK_PROFILE_FAST and I see a 5x-6x slowdown in wall-clock time.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_mutex.c17
-rw-r--r--sys/kern/kern_rwlock.c4
-rw-r--r--sys/kern/kern_sx.c25
3 files changed, 36 insertions, 10 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index b3b652e..00d0353 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -158,6 +158,8 @@ void
_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
{
+ struct lock_object lo;
+
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
@@ -169,9 +171,12 @@ _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
mtx_assert(m, MA_OWNED);
-
- lock_profile_release_lock(&m->mtx_object);
+#ifdef LOCK_PROFILING
+ memcpy(&lo, &m->mtx_object, sizeof(lo));
+ m->mtx_object.lo_flags &= ~LO_CONTESTED;
+#endif
_rel_sleep_lock(m, curthread, opts, file, line);
+ lock_profile_release_lock(&lo);
}
void
@@ -196,6 +201,8 @@ void
_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
{
+ struct lock_object lo;
+
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
@@ -206,8 +213,12 @@ _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
mtx_assert(m, MA_OWNED);
- lock_profile_release_lock(&m->mtx_object);
+#ifdef LOCK_PROFILING
+ memcpy(&lo, &m->mtx_object, sizeof(lo));
+ m->mtx_object.lo_flags &= ~LO_CONTESTED;
+#endif
_rel_spin_lock(m);
+ lock_profile_release_lock(&lo);
}
/*
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 18e9a54..9c7d4f8 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -150,8 +150,8 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
#ifdef SMP
volatile struct thread *owner;
#endif
- uint64_t waitstart;
- int contested;
+ uint64_t waitstart = 0;
+ int contested = 0;
uintptr_t x;
KASSERT(rw_wowner(rw) != curthread,
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index e0bff42..2381c06 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -228,7 +228,9 @@ _sx_try_xlock(struct sx *sx, const char *file, int line)
void
_sx_sunlock(struct sx *sx, const char *file, int line)
{
-
+ struct lock_object lo;
+ int count = -1;
+
_sx_assert(sx, SX_SLOCKED, file, line);
mtx_lock(sx->sx_lock);
@@ -238,8 +240,13 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
/* Release. */
sx->sx_cnt--;
- if (sx->sx_cnt == 0)
- lock_profile_release_lock(&sx->sx_object);
+#ifdef LOCK_PROFILING
+ if (sx->sx_cnt == 0) {
+ memcpy(&lo, &sx->sx_object, sizeof(lo));
+ sx->sx_object.lo_flags &= ~LO_CONTESTED;
+ count = 0;
+ }
+#endif
/*
* If we just released the last shared lock, wake any waiters up, giving
* exclusive lockers precedence. In order to make sure that exclusive
@@ -255,12 +262,16 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
mtx_unlock(sx->sx_lock);
+ if (count == 0)
+ lock_profile_release_lock(&lo);
+
}
void
_sx_xunlock(struct sx *sx, const char *file, int line)
{
-
+ struct lock_object lo;
+
_sx_assert(sx, SX_XLOCKED, file, line);
mtx_lock(sx->sx_lock);
MPASS(sx->sx_cnt == -1);
@@ -272,7 +283,10 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
sx->sx_cnt++;
sx->sx_xholder = NULL;
- lock_profile_release_lock(&sx->sx_object);
+#ifdef LOCK_PROFILING
+ memcpy(&lo, &sx->sx_object, sizeof(lo));
+ sx->sx_object.lo_flags &= ~LO_CONTESTED;
+#endif
/*
* Wake up waiters if there are any. Give precedence to slock waiters.
*/
@@ -284,6 +298,7 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
mtx_unlock(sx->sx_lock);
+ lock_profile_release_lock(&lo);
}
int
OpenPOWER on IntegriCloud