summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_mutex.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-12-15 23:13:31 +0000
committerjeff <jeff@FreeBSD.org>2007-12-15 23:13:31 +0000
commit12adc443d67286deeee69e764d979c963403497d (patch)
tree5aa1ecb0fadd118191701a2b1c611fcee7216753 /sys/kern/kern_mutex.c
parent96bf4f52953dddc12e3490919ba932dcfb5bc76d (diff)
downloadFreeBSD-src-12adc443d67286deeee69e764d979c963403497d.zip
FreeBSD-src-12adc443d67286deeee69e764d979c963403497d.tar.gz
- Re-implement lock profiling in such a way that it no longer breaks
the ABI when enabled. There is no longer an embedded lock_profile_object in each lock. Instead a list of lock_profile_objects is kept per-thread for each lock it may own. The cnt_hold statistic is now always 0 to facilitate this. - Support shared locking by tracking individual lock instances and statistics in the per-thread per-instance lock_profile_object. - Make the lock profiling hash table a per-cpu singly linked list with a per-cpu static lock_prof allocator. This removes the need for an array of spinlocks and reduces cache contention between cores. - Use a seperate hash for spinlocks and other locks so that only a critical_enter() is required and not a spinlock_enter() to modify the per-cpu tables. - Count time spent spinning in the lock statistics. - Remove the LOCK_PROFILE_SHARED option as it is always supported now. - Specifically drop and release the scheduler locks in both schedulers since we track owners now. In collaboration with: Kip Macy Sponsored by: Nokia
Diffstat (limited to 'sys/kern/kern_mutex.c')
-rw-r--r--sys/kern/kern_mutex.c26
1 files changed, 6 insertions, 20 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 727871d..81be32d 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -123,20 +123,6 @@ struct lock_class lock_class_mtx_spin = {
struct mtx blocked_lock;
struct mtx Giant;
-#ifdef LOCK_PROFILING
-static inline void lock_profile_init(void)
-{
- int i;
- /* Initialize the mutex profiling locks */
- for (i = 0; i < LPROF_LOCK_SIZE; i++) {
- mtx_init(&lprof_locks[i], "mprof lock",
- NULL, MTX_SPIN|MTX_QUIET|MTX_NOPROFILE);
- }
-}
-#else
-static inline void lock_profile_init(void) {;}
-#endif
-
void
assert_mtx(struct lock_object *lock, int what)
{
@@ -425,7 +411,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
}
#endif
lock_profile_obtain_lock_success(&m->lock_object, contested,
- waittime, (file), (line));
+ waittime, file, line);
}
static void
@@ -514,7 +500,8 @@ retry:
m->mtx_recurse++;
break;
}
- lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
+ lock_profile_obtain_lock_failed(&m->lock_object,
+ &contested, &waittime);
/* Give interrupts a chance while we spin. */
spinlock_exit();
while (m->mtx_lock != MTX_UNOWNED) {
@@ -535,8 +522,9 @@ retry:
break;
_rel_spin_lock(m); /* does spinlock_exit() */
}
- lock_profile_obtain_lock_success(&m->lock_object, contested,
- waittime, (file), (line));
+ if (m->mtx_recurse == 0)
+ lock_profile_obtain_lock_success(&m->lock_object, contested,
+ waittime, (file), (line));
WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
}
@@ -794,8 +782,6 @@ mutex_init(void)
mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
mtx_lock(&Giant);
-
- lock_profile_init();
}
#ifdef DDB
OpenPOWER on IntegriCloud