summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_rmlock.c
diff options
context:
space:
mode:
authordavide <davide@FreeBSD.org>2013-09-20 23:06:21 +0000
committerdavide <davide@FreeBSD.org>2013-09-20 23:06:21 +0000
commit5273d359fdd82fc7a93ff0f694844d25a2e17c59 (patch)
tree0f8acc1728a85e0911253a1fa7cfe9c10d2c05b4 /sys/kern/kern_rmlock.c
parent7ed30adae7eb0c6e156983ca0c96c04e1e6a8e0d (diff)
downloadFreeBSD-src-5273d359fdd82fc7a93ff0f694844d25a2e17c59.zip
FreeBSD-src-5273d359fdd82fc7a93ff0f694844d25a2e17c59.tar.gz
Fix lc_lock/lc_unlock() support for rmlocks held in shared mode. With
current lock classes KPI it was really difficult because there was no way to pass an rmtracker object to the lock/unlock routines. In order to accomplish the task, modify the aforementioned functions so that they can return (or pass as argument) an uinptr_t, which is in the rm case used to hold a pointer to struct rm_priotracker for current thread. As an added bonus, this fixes rm_sleep() in the rm shared case, which right now can communicate priotracker structure between lc_unlock()/lc_lock(). Suggested by: jhb Reviewed by: jhb Approved by: re (delphij)
Diffstat (limited to 'sys/kern/kern_rmlock.c')
-rw-r--r--sys/kern/kern_rmlock.c61
1 files changed, 44 insertions, 17 deletions
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
index ff397eb..ec0e7fa 100644
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -77,11 +77,11 @@ static void assert_rm(const struct lock_object *lock, int what);
#ifdef DDB
static void db_show_rm(const struct lock_object *lock);
#endif
-static void lock_rm(struct lock_object *lock, int how);
+static void lock_rm(struct lock_object *lock, uintptr_t how);
#ifdef KDTRACE_HOOKS
static int owner_rm(const struct lock_object *lock, struct thread **owner);
#endif
-static int unlock_rm(struct lock_object *lock);
+static uintptr_t unlock_rm(struct lock_object *lock);
struct lock_class lock_class_rm = {
.lc_name = "rm",
@@ -118,34 +118,61 @@ assert_rm(const struct lock_object *lock, int what)
rm_assert((const struct rmlock *)lock, what);
}
-/*
- * These do not support read locks because it would be hard to make
- * the tracker work correctly with the current lock_class API as you
- * would need to have the tracker pointer available when calling
- * rm_rlock() in lock_rm().
- */
static void
-lock_rm(struct lock_object *lock, int how)
+lock_rm(struct lock_object *lock, uintptr_t how)
{
struct rmlock *rm;
+ struct rm_priotracker *tracker;
rm = (struct rmlock *)lock;
- if (how)
+ if (how == 0)
rm_wlock(rm);
-#ifdef INVARIANTS
- else
- panic("lock_rm called in read mode");
-#endif
+ else {
+ tracker = (struct rm_priotracker *)how;
+ rm_rlock(rm, tracker);
+ }
}
-static int
+static uintptr_t
unlock_rm(struct lock_object *lock)
{
+ struct thread *td;
+ struct pcpu *pc;
struct rmlock *rm;
+ struct rm_queue *queue;
+ struct rm_priotracker *tracker;
+ uintptr_t how;
rm = (struct rmlock *)lock;
- rm_wunlock(rm);
- return (1);
+ tracker = NULL;
+ how = 0;
+ rm_assert(rm, RA_LOCKED | RA_NOTRECURSED);
+ if (rm_wowned(rm))
+ rm_wunlock(rm);
+ else {
+ /*
+ * Find the right rm_priotracker structure for curthread.
+ * The guarantee about its uniqueness is given by the fact
+ * we already asserted the lock wasn't recursively acquired.
+ */
+ critical_enter();
+ td = curthread;
+ pc = pcpu_find(curcpu);
+ for (queue = pc->pc_rm_queue.rmq_next;
+ queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
+ tracker = (struct rm_priotracker *)queue;
+ if ((tracker->rmp_rmlock == rm) &&
+ (tracker->rmp_thread == td)) {
+ how = (uintptr_t)tracker;
+ break;
+ }
+ }
+ KASSERT(tracker != NULL,
+ ("rm_priotracker is non-NULL when lock held in read mode"));
+ critical_exit();
+ rm_runlock(rm, tracker);
+ }
+ return (how);
}
#ifdef KDTRACE_HOOKS
OpenPOWER on IntegriCloud