summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authordavide <davide@FreeBSD.org>2013-09-20 23:06:21 +0000
committerdavide <davide@FreeBSD.org>2013-09-20 23:06:21 +0000
commit5273d359fdd82fc7a93ff0f694844d25a2e17c59 (patch)
tree0f8acc1728a85e0911253a1fa7cfe9c10d2c05b4 /sys/kern
parent7ed30adae7eb0c6e156983ca0c96c04e1e6a8e0d (diff)
downloadFreeBSD-src-5273d359fdd82fc7a93ff0f694844d25a2e17c59.zip
FreeBSD-src-5273d359fdd82fc7a93ff0f694844d25a2e17c59.tar.gz
Fix lc_lock/lc_unlock() support for rmlocks held in shared mode. With
current lock classes KPI it was really difficult because there was no way to pass an rmtracker object to the lock/unlock routines. In order to accomplish the task, modify the aforementioned functions so that they can return (or pass as argument) an uinptr_t, which is in the rm case used to hold a pointer to struct rm_priotracker for current thread. As an added bonus, this fixes rm_sleep() in the rm shared case, which right now can communicate priotracker structure between lc_unlock()/lc_lock(). Suggested by: jhb Reviewed by: jhb Approved by: re (delphij)
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_condvar.c5
-rw-r--r--sys/kern/kern_lock.c8
-rw-r--r--sys/kern/kern_mutex.c16
-rw-r--r--sys/kern/kern_rmlock.c61
-rw-r--r--sys/kern/kern_rwlock.c8
-rw-r--r--sys/kern/kern_sx.c8
-rw-r--r--sys/kern/kern_synch.c3
7 files changed, 69 insertions, 40 deletions
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index 483ea2e..2700a25 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -97,7 +97,7 @@ _cv_wait(struct cv *cvp, struct lock_object *lock)
WITNESS_SAVE_DECL(lock_witness);
struct lock_class *class;
struct thread *td;
- int lock_state;
+ uintptr_t lock_state;
td = curthread;
lock_state = 0;
@@ -214,7 +214,8 @@ _cv_wait_sig(struct cv *cvp, struct lock_object *lock)
WITNESS_SAVE_DECL(lock_witness);
struct lock_class *class;
struct thread *td;
- int lock_state, rval;
+ uintptr_t lock_state;
+ int rval;
td = curthread;
lock_state = 0;
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 87dca63..74a5b19 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -142,12 +142,12 @@ static void assert_lockmgr(const struct lock_object *lock, int how);
#ifdef DDB
static void db_show_lockmgr(const struct lock_object *lock);
#endif
-static void lock_lockmgr(struct lock_object *lock, int how);
+static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
#ifdef KDTRACE_HOOKS
static int owner_lockmgr(const struct lock_object *lock,
struct thread **owner);
#endif
-static int unlock_lockmgr(struct lock_object *lock);
+static uintptr_t unlock_lockmgr(struct lock_object *lock);
struct lock_class lock_class_lockmgr = {
.lc_name = "lockmgr",
@@ -350,13 +350,13 @@ assert_lockmgr(const struct lock_object *lock, int what)
}
static void
-lock_lockmgr(struct lock_object *lock, int how)
+lock_lockmgr(struct lock_object *lock, uintptr_t how)
{
panic("lockmgr locks do not support sleep interlocking");
}
-static int
+static uintptr_t
unlock_lockmgr(struct lock_object *lock)
{
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index cd1ed7d..e61a187 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -101,14 +101,14 @@ static void assert_mtx(const struct lock_object *lock, int what);
#ifdef DDB
static void db_show_mtx(const struct lock_object *lock);
#endif
-static void lock_mtx(struct lock_object *lock, int how);
-static void lock_spin(struct lock_object *lock, int how);
+static void lock_mtx(struct lock_object *lock, uintptr_t how);
+static void lock_spin(struct lock_object *lock, uintptr_t how);
#ifdef KDTRACE_HOOKS
static int owner_mtx(const struct lock_object *lock,
struct thread **owner);
#endif
-static int unlock_mtx(struct lock_object *lock);
-static int unlock_spin(struct lock_object *lock);
+static uintptr_t unlock_mtx(struct lock_object *lock);
+static uintptr_t unlock_spin(struct lock_object *lock);
/*
* Lock classes for sleep and spin mutexes.
@@ -154,20 +154,20 @@ assert_mtx(const struct lock_object *lock, int what)
}
void
-lock_mtx(struct lock_object *lock, int how)
+lock_mtx(struct lock_object *lock, uintptr_t how)
{
mtx_lock((struct mtx *)lock);
}
void
-lock_spin(struct lock_object *lock, int how)
+lock_spin(struct lock_object *lock, uintptr_t how)
{
panic("spin locks can only use msleep_spin");
}
-int
+uintptr_t
unlock_mtx(struct lock_object *lock)
{
struct mtx *m;
@@ -178,7 +178,7 @@ unlock_mtx(struct lock_object *lock)
return (0);
}
-int
+uintptr_t
unlock_spin(struct lock_object *lock)
{
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
index ff397eb..ec0e7fa 100644
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -77,11 +77,11 @@ static void assert_rm(const struct lock_object *lock, int what);
#ifdef DDB
static void db_show_rm(const struct lock_object *lock);
#endif
-static void lock_rm(struct lock_object *lock, int how);
+static void lock_rm(struct lock_object *lock, uintptr_t how);
#ifdef KDTRACE_HOOKS
static int owner_rm(const struct lock_object *lock, struct thread **owner);
#endif
-static int unlock_rm(struct lock_object *lock);
+static uintptr_t unlock_rm(struct lock_object *lock);
struct lock_class lock_class_rm = {
.lc_name = "rm",
@@ -118,34 +118,61 @@ assert_rm(const struct lock_object *lock, int what)
rm_assert((const struct rmlock *)lock, what);
}
-/*
- * These do not support read locks because it would be hard to make
- * the tracker work correctly with the current lock_class API as you
- * would need to have the tracker pointer available when calling
- * rm_rlock() in lock_rm().
- */
static void
-lock_rm(struct lock_object *lock, int how)
+lock_rm(struct lock_object *lock, uintptr_t how)
{
struct rmlock *rm;
+ struct rm_priotracker *tracker;
rm = (struct rmlock *)lock;
- if (how)
+ if (how == 0)
rm_wlock(rm);
-#ifdef INVARIANTS
- else
- panic("lock_rm called in read mode");
-#endif
+ else {
+ tracker = (struct rm_priotracker *)how;
+ rm_rlock(rm, tracker);
+ }
}
-static int
+static uintptr_t
unlock_rm(struct lock_object *lock)
{
+ struct thread *td;
+ struct pcpu *pc;
struct rmlock *rm;
+ struct rm_queue *queue;
+ struct rm_priotracker *tracker;
+ uintptr_t how;
rm = (struct rmlock *)lock;
- rm_wunlock(rm);
- return (1);
+ tracker = NULL;
+ how = 0;
+ rm_assert(rm, RA_LOCKED | RA_NOTRECURSED);
+ if (rm_wowned(rm))
+ rm_wunlock(rm);
+ else {
+ /*
+ * Find the right rm_priotracker structure for curthread.
+ * The guarantee about its uniqueness is given by the fact
+ * we already asserted the lock wasn't recursively acquired.
+ */
+ critical_enter();
+ td = curthread;
+ pc = pcpu_find(curcpu);
+ for (queue = pc->pc_rm_queue.rmq_next;
+ queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
+ tracker = (struct rm_priotracker *)queue;
+ if ((tracker->rmp_rmlock == rm) &&
+ (tracker->rmp_thread == td)) {
+ how = (uintptr_t)tracker;
+ break;
+ }
+ }
+ KASSERT(tracker != NULL,
+ ("rm_priotracker is non-NULL when lock held in read mode"));
+ critical_exit();
+ rm_runlock(rm, tracker);
+ }
+ return (how);
}
#ifdef KDTRACE_HOOKS
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index bd40704..45993f2 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -83,11 +83,11 @@ SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
static void db_show_rwlock(const struct lock_object *lock);
#endif
static void assert_rw(const struct lock_object *lock, int what);
-static void lock_rw(struct lock_object *lock, int how);
+static void lock_rw(struct lock_object *lock, uintptr_t how);
#ifdef KDTRACE_HOOKS
static int owner_rw(const struct lock_object *lock, struct thread **owner);
#endif
-static int unlock_rw(struct lock_object *lock);
+static uintptr_t unlock_rw(struct lock_object *lock);
struct lock_class lock_class_rw = {
.lc_name = "rw",
@@ -141,7 +141,7 @@ assert_rw(const struct lock_object *lock, int what)
}
void
-lock_rw(struct lock_object *lock, int how)
+lock_rw(struct lock_object *lock, uintptr_t how)
{
struct rwlock *rw;
@@ -152,7 +152,7 @@ lock_rw(struct lock_object *lock, int how)
rw_rlock(rw);
}
-int
+uintptr_t
unlock_rw(struct lock_object *lock)
{
struct rwlock *rw;
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index ff5d95d..fd9a51f 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -116,11 +116,11 @@ static void assert_sx(const struct lock_object *lock, int what);
#ifdef DDB
static void db_show_sx(const struct lock_object *lock);
#endif
-static void lock_sx(struct lock_object *lock, int how);
+static void lock_sx(struct lock_object *lock, uintptr_t how);
#ifdef KDTRACE_HOOKS
static int owner_sx(const struct lock_object *lock, struct thread **owner);
#endif
-static int unlock_sx(struct lock_object *lock);
+static uintptr_t unlock_sx(struct lock_object *lock);
struct lock_class lock_class_sx = {
.lc_name = "sx",
@@ -156,7 +156,7 @@ assert_sx(const struct lock_object *lock, int what)
}
void
-lock_sx(struct lock_object *lock, int how)
+lock_sx(struct lock_object *lock, uintptr_t how)
{
struct sx *sx;
@@ -167,7 +167,7 @@ lock_sx(struct lock_object *lock, int how)
sx_slock(sx);
}
-int
+uintptr_t
unlock_sx(struct lock_object *lock)
{
struct sx *sx;
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index b0e1908..0a400e9 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -157,7 +157,8 @@ _sleep(void *ident, struct lock_object *lock, int priority,
struct thread *td;
struct proc *p;
struct lock_class *class;
- int catch, lock_state, pri, rval, sleepq_flags;
+ uintptr_t lock_state;
+ int catch, pri, rval, sleepq_flags;
WITNESS_SAVE_DECL(lock_witness);
td = curthread;
OpenPOWER on IntegriCloud