diff options
author | jhb <jhb@FreeBSD.org> | 2003-03-04 20:32:41 +0000 |
---|---|---|
committer | jhb <jhb@FreeBSD.org> | 2003-03-04 20:32:41 +0000 |
commit | f78f351da3ab2ef37d3c47a8577df0cec4e271cd (patch) | |
tree | 842b508fe0eef7a8afde073a77ad4c34f18e1d8d /sys | |
parent | 3b2bb7e47b9eeacda34542c9c0135f7cff2bbeab (diff) | |
download | FreeBSD-src-f78f351da3ab2ef37d3c47a8577df0cec4e271cd.zip FreeBSD-src-f78f351da3ab2ef37d3c47a8577df0cec4e271cd.tar.gz |
Miscellaneous cleanups to _mtx_lock_sleep():
- Declare some local variables at the top of the function instead of in a
nested block.
- Use mtx_owned() instead of masking off bits from mtx_lock manually.
- Read the value of mtx_lock into 'v' as a separate line rather than inside
an if statement for clarity. This code is hairy enough as it is.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/kern/kern_mutex.c | 10 | ||||
-rw-r--r-- | sys/kern/subr_turnstile.c | 10 |
2 files changed, 12 insertions, 8 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index f5641c5..d2d907f 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -487,14 +487,16 @@ void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) { struct thread *td = curthread; + struct thread *td1; #if defined(SMP) && defined(ADAPTIVE_MUTEXES) struct thread *owner; #endif + uintptr_t v; #ifdef KTR int cont_logged = 0; #endif - if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { + if (mtx_owned(m)) { m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->mtx_object, opts)) @@ -508,15 +510,15 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); while (!_obtain_lock(m, td)) { - uintptr_t v; - struct thread *td1; mtx_lock_spin(&sched_lock); + v = m->mtx_lock; + /* * Check if the lock has been released while spinning for * the sched_lock. */ - if ((v = m->mtx_lock) == MTX_UNOWNED) { + if (v == MTX_UNOWNED) { mtx_unlock_spin(&sched_lock); #ifdef __i386__ ia32_pause(); diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c index f5641c5..d2d907f 100644 --- a/sys/kern/subr_turnstile.c +++ b/sys/kern/subr_turnstile.c @@ -487,14 +487,16 @@ void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) { struct thread *td = curthread; + struct thread *td1; #if defined(SMP) && defined(ADAPTIVE_MUTEXES) struct thread *owner; #endif + uintptr_t v; #ifdef KTR int cont_logged = 0; #endif - if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { + if (mtx_owned(m)) { m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->mtx_object, opts)) @@ -508,15 +510,15 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); while (!_obtain_lock(m, td)) { - uintptr_t v; - struct thread *td1; mtx_lock_spin(&sched_lock); + v = m->mtx_lock; + /* * Check if the lock has been released while spinning for * the sched_lock. */ - if ((v = m->mtx_lock) == MTX_UNOWNED) { + if (v == MTX_UNOWNED) { mtx_unlock_spin(&sched_lock); #ifdef __i386__ ia32_pause(); |