summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_sx.c
diff options
context:
space:
mode:
authorkmacy <kmacy@FreeBSD.org>2007-02-27 06:42:05 +0000
committerkmacy <kmacy@FreeBSD.org>2007-02-27 06:42:05 +0000
commitb7672bad26dbe5a50937a2e258fe957b1bc19a8d (patch)
treec7228dd81a9d5cf5855323c889da39c667a4dd5a /sys/kern/kern_sx.c
parent128ac1d5958eb05b5b0c08882c80a3f90ef76d1b (diff)
downloadFreeBSD-src-b7672bad26dbe5a50937a2e258fe957b1bc19a8d.zip
FreeBSD-src-b7672bad26dbe5a50937a2e258fe957b1bc19a8d.tar.gz
Further improvements to LOCK_PROFILING:
- Fix missing initialization in kern_rwlock.c causing bogus times to be collected - Move updates to the lock hash to after the lock is released for spin mutexes, sleep mutexes, and sx locks - Add new kernel build option LOCK_PROFILE_FAST - only update lock profiling statistics when an acquisition is contended. This reduces the overhead of LOCK_PROFILING to increasing system time by 20%-25% which on "make -j8 kernel-toolchain" on a dual woodcrest is unmeasurable in terms of wall-clock time. Contrast this to enabling lock profiling without LOCK_PROFILE_FAST and I see a 5x-6x slowdown in wall-clock time.
Diffstat (limited to 'sys/kern/kern_sx.c')
-rw-r--r--sys/kern/kern_sx.c25
1 files changed, 20 insertions, 5 deletions
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index e0bff42..2381c06 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -228,7 +228,9 @@ _sx_try_xlock(struct sx *sx, const char *file, int line)
void
_sx_sunlock(struct sx *sx, const char *file, int line)
{
-
+ struct lock_object lo;
+ int count = -1;
+
_sx_assert(sx, SX_SLOCKED, file, line);
mtx_lock(sx->sx_lock);
@@ -238,8 +240,13 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
/* Release. */
sx->sx_cnt--;
- if (sx->sx_cnt == 0)
- lock_profile_release_lock(&sx->sx_object);
+#ifdef LOCK_PROFILING
+ if (sx->sx_cnt == 0) {
+ memcpy(&lo, &sx->sx_object, sizeof(lo));
+ sx->sx_object.lo_flags &= ~LO_CONTESTED;
+ count = 0;
+ }
+#endif
/*
* If we just released the last shared lock, wake any waiters up, giving
* exclusive lockers precedence. In order to make sure that exclusive
@@ -255,12 +262,16 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
mtx_unlock(sx->sx_lock);
+ if (count == 0)
+ lock_profile_release_lock(&lo);
+
}
void
_sx_xunlock(struct sx *sx, const char *file, int line)
{
-
+ struct lock_object lo;
+
_sx_assert(sx, SX_XLOCKED, file, line);
mtx_lock(sx->sx_lock);
MPASS(sx->sx_cnt == -1);
@@ -272,7 +283,10 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
sx->sx_cnt++;
sx->sx_xholder = NULL;
- lock_profile_release_lock(&sx->sx_object);
+#ifdef LOCK_PROFILING
+ memcpy(&lo, &sx->sx_object, sizeof(lo));
+ sx->sx_object.lo_flags &= ~LO_CONTESTED;
+#endif
/*
* Wake up waiters if there are any. Give precedence to slock waiters.
*/
@@ -284,6 +298,7 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
mtx_unlock(sx->sx_lock);
+ lock_profile_release_lock(&lo);
}
int
OpenPOWER on IntegriCloud