summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2012-11-03 15:57:37 +0000
committerattilio <attilio@FreeBSD.org>2012-11-03 15:57:37 +0000
commitc754915a07dad30e703ea916dcdd42cbd277ba0d (patch)
treeaecf10ec8867ab9a1e449167fd877415227cd562
parent3d05bb3fd002b937ae19b73aa9f6481480ce5ac1 (diff)
downloadFreeBSD-src-c754915a07dad30e703ea916dcdd42cbd277ba0d.zip
FreeBSD-src-c754915a07dad30e703ea916dcdd42cbd277ba0d.tar.gz
Merge r242395,242483 from mutex implementation:
give rwlock(9) the ability to crunch different type of structures, with the only constraint that they have a lock cookie named rw_lock. This name, then, becames reserved from the struct that wants to use the rwlock(9) KPI and other locking primitives cannot reuse it for their members. Namely such structs are the current struct rwlock and the new struct rwlock_padalign. The new structure will define an object which has the same layout of a struct rwlock but will be allocated in areas aligned to the cache line size and will be as big as a cache line. For further details check comments on above mentioned revisions. Reviewed by: jimharris, jeff
-rw-r--r--sys/kern/kern_rwlock.c97
-rw-r--r--sys/sys/_rwlock.h23
-rw-r--r--sys/sys/rwlock.h78
3 files changed, 155 insertions, 43 deletions
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 3a51874..60a7faa 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -61,6 +61,12 @@ __FBSDID("$FreeBSD$");
PMC_SOFT_DECLARE( , , lock, failed);
#endif
+/*
+ * Return the rwlock address when the lock cookie address is provided.
+ * This functionality assumes that struct rwlock* have a member named rw_lock.
+ */
+#define rwlock2rw(c) (__containerof(c, struct rwlock, rw_lock))
+
#ifdef ADAPTIVE_RWLOCKS
static int rowner_retries = 10;
static int rowner_loops = 10000;
@@ -123,7 +129,7 @@ struct lock_class lock_class_rw = {
#define rw_owner(rw) rw_wowner(rw)
#ifndef INVARIANTS
-#define _rw_assert(rw, what, file, line)
+#define __rw_assert(c, what, file, line)
#endif
void
@@ -175,10 +181,13 @@ owner_rw(const struct lock_object *lock, struct thread **owner)
#endif
void
-rw_init_flags(struct rwlock *rw, const char *name, int opts)
+_rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
{
+ struct rwlock *rw;
int flags;
+ rw = rwlock2rw(c);
+
MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
RW_RECURSE)) == 0);
ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
@@ -203,8 +212,11 @@ rw_init_flags(struct rwlock *rw, const char *name, int opts)
}
void
-rw_destroy(struct rwlock *rw)
+_rw_destroy(volatile uintptr_t *c)
{
+ struct rwlock *rw;
+
+ rw = rwlock2rw(c);
KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
@@ -217,7 +229,7 @@ rw_sysinit(void *arg)
{
struct rw_args *args = arg;
- rw_init(args->ra_rw, args->ra_desc);
+ rw_init((struct rwlock *)args->ra_rw, args->ra_desc);
}
void
@@ -225,22 +237,27 @@ rw_sysinit_flags(void *arg)
{
struct rw_args_flags *args = arg;
- rw_init_flags(args->ra_rw, args->ra_desc, args->ra_flags);
+ rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
+ args->ra_flags);
}
int
-rw_wowned(const struct rwlock *rw)
+_rw_wowned(const volatile uintptr_t *c)
{
- return (rw_wowner(rw) == curthread);
+ return (rw_wowner(rwlock2rw(c)) == curthread);
}
void
-_rw_wlock(struct rwlock *rw, const char *file, int line)
+_rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
{
+ struct rwlock *rw;
if (SCHEDULER_STOPPED())
return;
+
+ rw = rwlock2rw(c);
+
KASSERT(!TD_IS_IDLETHREAD(curthread),
("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
curthread, rw->lock_object.lo_name, file, line));
@@ -255,13 +272,16 @@ _rw_wlock(struct rwlock *rw, const char *file, int line)
}
int
-_rw_try_wlock(struct rwlock *rw, const char *file, int line)
+__rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
{
+ struct rwlock *rw;
int rval;
if (SCHEDULER_STOPPED())
return (1);
+ rw = rwlock2rw(c);
+
KASSERT(!TD_IS_IDLETHREAD(curthread),
("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
curthread, rw->lock_object.lo_name, file, line));
@@ -286,14 +306,18 @@ _rw_try_wlock(struct rwlock *rw, const char *file, int line)
}
void
-_rw_wunlock(struct rwlock *rw, const char *file, int line)
+_rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
{
+ struct rwlock *rw;
if (SCHEDULER_STOPPED())
return;
+
+ rw = rwlock2rw(c);
+
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
- _rw_assert(rw, RA_WLOCKED, file, line);
+ __rw_assert(c, RA_WLOCKED, file, line);
curthread->td_locks--;
WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
@@ -315,8 +339,9 @@ _rw_wunlock(struct rwlock *rw, const char *file, int line)
RW_LOCK_READ)
void
-_rw_rlock(struct rwlock *rw, const char *file, int line)
+__rw_rlock(volatile uintptr_t *c, const char *file, int line)
{
+ struct rwlock *rw;
struct turnstile *ts;
#ifdef ADAPTIVE_RWLOCKS
volatile struct thread *owner;
@@ -337,6 +362,8 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
+ rw = rwlock2rw(c);
+
KASSERT(!TD_IS_IDLETHREAD(curthread),
("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
curthread, rw->lock_object.lo_name, file, line));
@@ -521,13 +548,16 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
}
int
-_rw_try_rlock(struct rwlock *rw, const char *file, int line)
+__rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
{
+ struct rwlock *rw;
uintptr_t x;
if (SCHEDULER_STOPPED())
return (1);
+ rw = rwlock2rw(c);
+
KASSERT(!TD_IS_IDLETHREAD(curthread),
("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
curthread, rw->lock_object.lo_name, file, line));
@@ -553,17 +583,20 @@ _rw_try_rlock(struct rwlock *rw, const char *file, int line)
}
void
-_rw_runlock(struct rwlock *rw, const char *file, int line)
+_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
{
+ struct rwlock *rw;
struct turnstile *ts;
uintptr_t x, v, queue;
if (SCHEDULER_STOPPED())
return;
+ rw = rwlock2rw(c);
+
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
- _rw_assert(rw, RA_RLOCKED, file, line);
+ __rw_assert(c, RA_RLOCKED, file, line);
curthread->td_locks--;
curthread->td_rw_rlocks--;
WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
@@ -667,8 +700,10 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
* read or write lock.
*/
void
-_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
+__rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
+ int line)
{
+ struct rwlock *rw;
struct turnstile *ts;
#ifdef ADAPTIVE_RWLOCKS
volatile struct thread *owner;
@@ -689,6 +724,8 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
+ rw = rwlock2rw(c);
+
if (rw_wlocked(rw)) {
KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
("%s: recursing but non-recursive rw %s @ %s:%d\n",
@@ -850,8 +887,10 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
* least one thread is waiting on this lock.
*/
void
-_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
+__rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
+ int line)
{
+ struct rwlock *rw;
struct turnstile *ts;
uintptr_t v;
int queue;
@@ -859,6 +898,8 @@ _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
+ rw = rwlock2rw(c);
+
if (rw_wlocked(rw) && rw_recursed(rw)) {
rw->rw_recurse--;
if (LOCK_LOG_TEST(&rw->lock_object, 0))
@@ -915,8 +956,9 @@ _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
* lock. Returns true if the upgrade succeeded and false otherwise.
*/
int
-_rw_try_upgrade(struct rwlock *rw, const char *file, int line)
+__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
{
+ struct rwlock *rw;
uintptr_t v, x, tid;
struct turnstile *ts;
int success;
@@ -924,9 +966,11 @@ _rw_try_upgrade(struct rwlock *rw, const char *file, int line)
if (SCHEDULER_STOPPED())
return (1);
+ rw = rwlock2rw(c);
+
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
- _rw_assert(rw, RA_RLOCKED, file, line);
+ __rw_assert(c, RA_RLOCKED, file, line);
/*
* Attempt to switch from one reader to a writer. If there
@@ -988,8 +1032,9 @@ _rw_try_upgrade(struct rwlock *rw, const char *file, int line)
* Downgrade a write lock into a single read lock.
*/
void
-_rw_downgrade(struct rwlock *rw, const char *file, int line)
+__rw_downgrade(volatile uintptr_t *c, const char *file, int line)
{
+ struct rwlock *rw;
struct turnstile *ts;
uintptr_t tid, v;
int rwait, wwait;
@@ -997,9 +1042,11 @@ _rw_downgrade(struct rwlock *rw, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
+ rw = rwlock2rw(c);
+
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
- _rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line);
+ __rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
#ifndef INVARIANTS
if (rw_recursed(rw))
panic("downgrade of a recursed lock");
@@ -1053,7 +1100,7 @@ out:
#ifdef INVARIANT_SUPPORT
#ifndef INVARIANTS
-#undef _rw_assert
+#undef __rw_assert
#endif
/*
@@ -1062,11 +1109,15 @@ out:
* thread owns an rlock.
*/
void
-_rw_assert(const struct rwlock *rw, int what, const char *file, int line)
+__rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
{
+ const struct rwlock *rw;
if (panicstr != NULL)
return;
+
+ rw = rwlock2rw(c);
+
switch (what) {
case RA_LOCKED:
case RA_LOCKED | RA_RECURSED:
diff --git a/sys/sys/_rwlock.h b/sys/sys/_rwlock.h
index c5adac0..7b8c6c0 100644
--- a/sys/sys/_rwlock.h
+++ b/sys/sys/_rwlock.h
@@ -32,12 +32,35 @@
#ifndef _SYS__RWLOCK_H_
#define _SYS__RWLOCK_H_
+#include <machine/param.h>
+
/*
* Reader/writer lock.
+ *
+ * The layout of the first 2 members of struct rwlock* is considered fixed.
+ * More specifically, it is assumed that there is a member called rw_lock
+ * for every struct rwlock* and that other locking primitive structures are
+ * not allowed to use such name for their members.
+ * If this needs to change, the bits in the rwlock implementation might be
+ * modified appropriately.
*/
struct rwlock {
struct lock_object lock_object;
volatile uintptr_t rw_lock;
};
+/*
+ * Members of struct rwlock_padalign must mirror members of struct rwlock.
+ * rwlock_padalign rwlocks can use rwlock(9) KPI transparently, without
+ * modifies.
+ * When using pad-aligned rwlocks within structures, they should generally
+ * stay as the first member of the struct. This is because otherwise the
+ * compiler can generate ever more padding for the struct to keep a correct
+ * alignment for the rwlock.
+ */
+struct rwlock_padalign {
+ struct lock_object lock_object;
+ volatile uintptr_t rw_lock;
+} __aligned(CACHE_LINE_SIZE);
+
#endif /* !_SYS__RWLOCK_H_ */
diff --git a/sys/sys/rwlock.h b/sys/sys/rwlock.h
index 83bf57d..8623c05 100644
--- a/sys/sys/rwlock.h
+++ b/sys/sys/rwlock.h
@@ -121,29 +121,67 @@
* external API and should not be called directly. Wrapper macros should
* be used instead.
*/
-
-#define rw_init(rw, name) rw_init_flags((rw), (name), 0)
-void rw_init_flags(struct rwlock *rw, const char *name, int opts);
-void rw_destroy(struct rwlock *rw);
+void _rw_init_flags(volatile uintptr_t *c, const char *name, int opts);
+void _rw_destroy(volatile uintptr_t *c);
void rw_sysinit(void *arg);
void rw_sysinit_flags(void *arg);
-int rw_wowned(const struct rwlock *rw);
-void _rw_wlock(struct rwlock *rw, const char *file, int line);
-int _rw_try_wlock(struct rwlock *rw, const char *file, int line);
-void _rw_wunlock(struct rwlock *rw, const char *file, int line);
-void _rw_rlock(struct rwlock *rw, const char *file, int line);
-int _rw_try_rlock(struct rwlock *rw, const char *file, int line);
-void _rw_runlock(struct rwlock *rw, const char *file, int line);
-void _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file,
+int _rw_wowned(const volatile uintptr_t *c);
+void _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line);
+int __rw_try_wlock(volatile uintptr_t *c, const char *file, int line);
+void _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line);
+void __rw_rlock(volatile uintptr_t *c, const char *file, int line);
+int __rw_try_rlock(volatile uintptr_t *c, const char *file, int line);
+void _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line);
+void __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
int line);
-void _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file,
+void __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid,
+ const char *file, int line);
+int __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line);
+void __rw_downgrade(volatile uintptr_t *c, const char *file, int line);
+#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
+void __rw_assert(const volatile uintptr_t *c, int what, const char *file,
int line);
-int _rw_try_upgrade(struct rwlock *rw, const char *file, int line);
-void _rw_downgrade(struct rwlock *rw, const char *file, int line);
+#endif
+
+/*
+ * Top-level macros to provide lock cookie once the actual rwlock is passed.
+ * They will also prevent passing a malformed object to the rwlock KPI by
+ * failing compilation as the rw_lock reserved member will not be found.
+ */
+#define rw_init(rw, n) \
+ _rw_init_flags(&(rw)->rw_lock, n, 0)
+#define rw_init_flags(rw, n, o) \
+ _rw_init_flags(&(rw)->rw_lock, n, o)
+#define rw_destroy(rw) \
+ _rw_destroy(&(rw)->rw_lock)
+#define rw_wowned(rw) \
+ _rw_wowned(&(rw)->rw_lock)
+#define _rw_wlock(rw, f, l) \
+ _rw_wlock_cookie(&(rw)->rw_lock, f, l)
+#define _rw_try_wlock(rw, f, l) \
+ __rw_try_wlock(&(rw)->rw_lock, f, l)
+#define _rw_wunlock(rw, f, l) \
+ _rw_wunlock_cookie(&(rw)->rw_lock, f, l)
+#define _rw_rlock(rw, f, l) \
+ __rw_rlock(&(rw)->rw_lock, f, l)
+#define _rw_try_rlock(rw, f, l) \
+ __rw_try_rlock(&(rw)->rw_lock, f, l)
+#define _rw_runlock(rw, f, l) \
+ _rw_runlock_cookie(&(rw)->rw_lock, f, l)
+#define _rw_wlock_hard(rw, t, f, l) \
+ __rw_wlock_hard(&(rw)->rw_lock, t, f, l)
+#define _rw_wunlock_hard(rw, t, f, l) \
+ __rw_wunlock_hard(&(rw)->rw_lock, t, f, l)
+#define _rw_try_upgrade(rw, f, l) \
+ __rw_try_upgrade(&(rw)->rw_lock, f, l)
+#define _rw_downgrade(rw, f, l) \
+ __rw_downgrade(&(rw)->rw_lock, f, l)
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
-void _rw_assert(const struct rwlock *rw, int what, const char *file, int line);
+#define _rw_assert(rw, w, f, l) \
+ __rw_assert(&(rw)->rw_lock, w, f, l)
#endif
+
/*
* Public interface for lock operations.
*/
@@ -178,12 +216,12 @@ void _rw_assert(const struct rwlock *rw, int what, const char *file, int line);
#define rw_initialized(rw) lock_initalized(&(rw)->lock_object)
struct rw_args {
- struct rwlock *ra_rw;
+ void *ra_rw;
const char *ra_desc;
};
struct rw_args_flags {
- struct rwlock *ra_rw;
+ void *ra_rw;
const char *ra_desc;
int ra_flags;
};
@@ -196,7 +234,7 @@ struct rw_args_flags {
SYSINIT(name##_rw_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
rw_sysinit, &name##_args); \
SYSUNINIT(name##_rw_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
- rw_destroy, (rw))
+ _rw_destroy, __DEVOLATILE(void *, &(rw)->rw_lock))
#define RW_SYSINIT_FLAGS(name, rw, desc, flags) \
@@ -208,7 +246,7 @@ struct rw_args_flags {
SYSINIT(name##_rw_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
rw_sysinit_flags, &name##_args); \
SYSUNINIT(name##_rw_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
- rw_destroy, (rw))
+ _rw_destroy, __DEVOLATILE(void *, &(rw)->rw_lock))
/*
* Options passed to rw_init_flags().
OpenPOWER on IntegriCloud