summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authormarius <marius@FreeBSD.org>2017-09-21 19:24:11 +0000
committermarius <marius@FreeBSD.org>2017-09-21 19:24:11 +0000
commitee65f3381759668f6c2764e955c7d73070eb732e (patch)
tree0d65c11b02f2a445b1451527c04a423a4e6c663d /sys/kern
parent699bdb134051aad5ab4cca219b931597f3a9c819 (diff)
downloadFreeBSD-src-ee65f3381759668f6c2764e955c7d73070eb732e.zip
FreeBSD-src-ee65f3381759668f6c2764e955c7d73070eb732e.tar.gz
MFC: r275751
Add _NEW flag to mtx(9), sx(9), rmlock(9) and rwlock(9). A _NEW flag passed to _init_flags() to avoid check for double-init.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_mutex.c4
-rw-r--r--sys/kern/kern_rmlock.c12
-rw-r--r--sys/kern/kern_rwlock.c4
-rw-r--r--sys/kern/kern_sx.c4
-rw-r--r--sys/kern/subr_lock.c4
5 files changed, 20 insertions, 8 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 27038e1..11fb7db 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -979,7 +979,7 @@ _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
m = mtxlock2mtx(c);
MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
- MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
+ MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
("%s: mtx_lock not aligned for %s: %p", __func__, name,
&m->mtx_lock));
@@ -1005,6 +1005,8 @@ _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
flags |= LO_DUPOK;
if (opts & MTX_NOPROFILE)
flags |= LO_NOPROFILE;
+ if (opts & MTX_NEW)
+ flags |= LO_NEW;
/* Initialize mutex. */
lock_init(&m->lock_object, class, name, type, flags);
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
index aa8d4b0..6b5df78 100644
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -278,22 +278,28 @@ void
rm_init_flags(struct rmlock *rm, const char *name, int opts)
{
struct lock_class *lc;
- int liflags;
+ int liflags, xflags;
liflags = 0;
if (!(opts & RM_NOWITNESS))
liflags |= LO_WITNESS;
if (opts & RM_RECURSE)
liflags |= LO_RECURSABLE;
+ if (opts & RM_NEW)
+ liflags |= LO_NEW;
rm->rm_writecpus = all_cpus;
LIST_INIT(&rm->rm_activeReaders);
if (opts & RM_SLEEPABLE) {
liflags |= LO_SLEEPABLE;
lc = &lock_class_rm_sleepable;
- sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_NOWITNESS);
+ xflags = (opts & RM_NEW ? SX_NEW : 0);
+ sx_init_flags(&rm->rm_lock_sx, "rmlock_sx",
+ xflags | SX_NOWITNESS);
} else {
lc = &lock_class_rm;
- mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
+ xflags = (opts & RM_NEW ? MTX_NEW : 0);
+ mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx",
+ xflags | MTX_NOWITNESS);
}
lock_init(&rm->lock_object, lc, name, NULL, liflags);
}
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 8559840..29929d4 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -216,7 +216,7 @@ _rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
rw = rwlock2rw(c);
MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
- RW_RECURSE)) == 0);
+ RW_RECURSE | RW_NEW)) == 0);
ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
("%s: rw_lock not aligned for %s: %p", __func__, name,
&rw->rw_lock));
@@ -232,6 +232,8 @@ _rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
flags |= LO_RECURSABLE;
if (opts & RW_QUIET)
flags |= LO_QUIET;
+ if (opts & RW_NEW)
+ flags |= LO_NEW;
lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
rw->rw_lock = RW_UNLOCKED;
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 5d418c2..ca4a60c 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -240,7 +240,7 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
int flags;
MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
- SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
+ SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0);
ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
("%s: sx_lock not aligned for %s: %p", __func__, description,
&sx->sx_lock));
@@ -256,6 +256,8 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
flags |= LO_RECURSABLE;
if (opts & SX_QUIET)
flags |= LO_QUIET;
+ if (opts & SX_NEW)
+ flags |= LO_NEW;
flags |= opts & SX_NOADAPTIVE;
lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
diff --git a/sys/kern/subr_lock.c b/sys/kern/subr_lock.c
index cacaf56..3f374b3 100644
--- a/sys/kern/subr_lock.c
+++ b/sys/kern/subr_lock.c
@@ -76,8 +76,8 @@ lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
int i;
/* Check for double-init and zero object. */
- KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",
- name, lock));
+ KASSERT(flags & LO_NEW || !lock_initalized(lock),
+ ("lock \"%s\" %p already initialized", name, lock));
/* Look up lock class to find its index. */
for (i = 0; i < LOCK_CLASS_MAX; i++)
OpenPOWER on IntegriCloud