summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_lock.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2014-11-04 16:35:56 +0000
committerjhb <jhb@FreeBSD.org>2014-11-04 16:35:56 +0000
commitabae099c34995505ef7feb6904d893c18a88994d (patch)
tree111501bfd5cb308ac99ba2270659ea0ca6fbf21a /sys/kern/kern_lock.c
parent0b0514e889cd64b550d56076b6b0940769f9e4d8 (diff)
downloadFreeBSD-src-abae099c34995505ef7feb6904d893c18a88994d.zip
FreeBSD-src-abae099c34995505ef7feb6904d893c18a88994d.tar.gz
Add a new thread state "spinning" to schedgraph and add tracepoints at the
start and stop of spinning waits in lock primitives.
Diffstat (limited to 'sys/kern/kern_lock.c')
-rw-r--r--sys/kern/kern_lock.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index d0e0bdd..3fc151d 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -582,6 +582,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, lk, owner);
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(td), "spinning",
+ "lockname:\"%s\"", lk->lock_object.lo_name);
/*
* If we are holding also an interlock drop it
@@ -597,11 +600,16 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
while (LK_HOLDER(lk->lk_lock) ==
(uintptr_t)owner && TD_IS_RUNNING(owner))
cpu_spinwait();
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(td), "running");
GIANT_RESTORE();
continue;
} else if (LK_CAN_ADAPT(lk, flags) &&
(x & LK_SHARE) != 0 && LK_SHARERS(x) &&
spintries < alk_retries) {
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(td), "spinning",
+ "lockname:\"%s\"", lk->lock_object.lo_name);
if (flags & LK_INTERLOCK) {
class->lc_unlock(ilk);
flags &= ~LK_INTERLOCK;
@@ -619,6 +627,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
break;
cpu_spinwait();
}
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(td), "running");
GIANT_RESTORE();
if (i != alk_loops)
continue;
@@ -814,6 +824,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, lk, owner);
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(td), "spinning",
+ "lockname:\"%s\"", lk->lock_object.lo_name);
/*
* If we are holding also an interlock drop it
@@ -829,6 +842,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
while (LK_HOLDER(lk->lk_lock) ==
(uintptr_t)owner && TD_IS_RUNNING(owner))
cpu_spinwait();
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(td), "running");
GIANT_RESTORE();
continue;
} else if (LK_CAN_ADAPT(lk, flags) &&
@@ -838,6 +853,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
!atomic_cmpset_ptr(&lk->lk_lock, x,
x | LK_EXCLUSIVE_SPINNERS))
continue;
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(td), "spinning",
+ "lockname:\"%s\"", lk->lock_object.lo_name);
if (flags & LK_INTERLOCK) {
class->lc_unlock(ilk);
flags &= ~LK_INTERLOCK;
@@ -854,6 +872,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
break;
cpu_spinwait();
}
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(td), "running");
GIANT_RESTORE();
if (i != alk_loops)
continue;
OpenPOWER on IntegriCloud