summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_sx.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2009-06-02 13:03:35 +0000
committerattilio <attilio@FreeBSD.org>2009-06-02 13:03:35 +0000
commit44c490ae17f695a91f5103b5ac4fa0448be2455f (patch)
tree0574452322c16cb1cd1a6d29e497620399499dff /sys/kern/kern_sx.c
parent0937475a4fb805678e611b218d2687ff0fe43637 (diff)
downloadFreeBSD-src-44c490ae17f695a91f5103b5ac4fa0448be2455f.zip
FreeBSD-src-44c490ae17f695a91f5103b5ac4fa0448be2455f.tar.gz
Handle lock recursion differenty by always checking against LO_RECURSABLE
instead the lock own flag itself. Tested by: pho
Diffstat (limited to 'sys/kern/kern_sx.c')
-rw-r--r--sys/kern/kern_sx.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 6342252..04c2c98 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -66,8 +66,7 @@ __FBSDID("$FreeBSD$");
#define ADAPTIVE_SX
#endif
-CTASSERT(((SX_NOADAPTIVE | SX_RECURSE) & LO_CLASSFLAGS) ==
- (SX_NOADAPTIVE | SX_RECURSE));
+CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
/* Handy macros for sleep queues. */
#define SQ_EXCLUSIVE_QUEUE 0
@@ -207,17 +206,19 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
- flags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
+ flags = LO_SLEEPABLE | LO_UPGRADABLE;
if (opts & SX_DUPOK)
flags |= LO_DUPOK;
if (opts & SX_NOPROFILE)
flags |= LO_NOPROFILE;
if (!(opts & SX_NOWITNESS))
flags |= LO_WITNESS;
+ if (opts & SX_RECURSE)
+ flags |= LO_RECURSABLE;
if (opts & SX_QUIET)
flags |= LO_QUIET;
- flags |= opts & (SX_NOADAPTIVE | SX_RECURSE);
+ flags |= opts & SX_NOADAPTIVE;
sx->sx_lock = SX_LOCK_UNLOCKED;
sx->sx_recurse = 0;
lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
@@ -305,7 +306,8 @@ _sx_try_xlock(struct sx *sx, const char *file, int line)
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
- if (sx_xlocked(sx) && (sx->lock_object.lo_flags & SX_RECURSE) != 0) {
+ if (sx_xlocked(sx) &&
+ (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
sx->sx_recurse++;
atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
rval = 1;
@@ -479,7 +481,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
/* If we already hold an exclusive lock, then recurse. */
if (sx_xlocked(sx)) {
- KASSERT((sx->lock_object.lo_flags & SX_RECURSE) != 0,
+ KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
sx->lock_object.lo_name, file, line));
sx->sx_recurse++;
OpenPOWER on IntegriCloud