summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_lock.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2008-02-06 00:04:09 +0000
committerattilio <attilio@FreeBSD.org>2008-02-06 00:04:09 +0000
commitacc2f89a7fb9267906b61ccf186132ea33aa2eda (patch)
tree1b1f73dd58702a12d8c524882db74b0a9ff444d9 /sys/kern/subr_lock.c
parent12170a7b3f1afc618eee92f338c6799312fc0f7e (diff)
downloadFreeBSD-src-acc2f89a7fb9267906b61ccf186132ea33aa2eda.zip
FreeBSD-src-acc2f89a7fb9267906b61ccf186132ea33aa2eda.tar.gz
Really, no explicit checks against against lock_class_* object should be
done in consumers code: using locks properties is much more appropriate. Fix current code doing these bogus checks. Note: Really, callout are not usable by all !(LC_SPINLOCK | LC_SLEEPABLE) primitives like rmlocks doesn't implement the generic lock layer functions, but they can be equipped for this, so the check is still valid. Tested by: matteo, kris (earlier version) Reviewed by: jhb
Diffstat (limited to 'sys/kern/subr_lock.c')
-rw-r--r--sys/kern/subr_lock.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/sys/kern/subr_lock.c b/sys/kern/subr_lock.c
index b236f8d..74a4148 100644
--- a/sys/kern/subr_lock.c
+++ b/sys/kern/subr_lock.c
@@ -494,7 +494,7 @@ lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
if (lock_prof_skipcount &&
(++lock_prof_count % lock_prof_skipcount) != 0)
return;
- spin = LOCK_CLASS(lo) == &lock_class_mtx_spin;
+ spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
if (spin && lock_prof_skipspin == 1)
return;
l = lock_profile_object_lookup(lo, spin, file, line);
@@ -523,7 +523,7 @@ lock_profile_release_lock(struct lock_object *lo)
if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
return;
- spin = LOCK_CLASS(lo) == &lock_class_mtx_spin;
+ spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
head = &curthread->td_lprof[spin];
critical_enter();
LIST_FOREACH(l, head, lpo_link)
OpenPOWER on IntegriCloud