diff options
author | kib <kib@FreeBSD.org> | 2016-05-17 09:56:22 +0000 |
---|---|---|
committer | kib <kib@FreeBSD.org> | 2016-05-17 09:56:22 +0000 |
commit | 8da898f26c04f1b12f46ec60020d7d15d03799a9 (patch) | |
tree | d626a38beb7329b5a3aa09877b11a5ec03a6eb38 /lib/libthr | |
parent | afc75dd440c351adf17eb82272dd3e3f62f97410 (diff) | |
download | FreeBSD-src-8da898f26c04f1b12f46ec60020d7d15d03799a9.zip FreeBSD-src-8da898f26c04f1b12f46ec60020d7d15d03799a9.tar.gz |
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
Diffstat (limited to 'lib/libthr')
-rw-r--r-- | lib/libthr/pthread.map | 6 | ||||
-rw-r--r-- | lib/libthr/thread/thr_cond.c | 116 | ||||
-rw-r--r-- | lib/libthr/thread/thr_init.c | 11 | ||||
-rw-r--r-- | lib/libthr/thread/thr_mutex.c | 380 | ||||
-rw-r--r-- | lib/libthr/thread/thr_mutexattr.c | 67 | ||||
-rw-r--r-- | lib/libthr/thread/thr_private.h | 31 | ||||
-rw-r--r-- | lib/libthr/thread/thr_umtx.c | 165 | ||||
-rw-r--r-- | lib/libthr/thread/thr_umtx.h | 140 |
8 files changed, 618 insertions, 298 deletions
diff --git a/lib/libthr/pthread.map b/lib/libthr/pthread.map index 9fb72eb..e568393 100644 --- a/lib/libthr/pthread.map +++ b/lib/libthr/pthread.map @@ -315,3 +315,9 @@ FBSD_1.1 { FBSD_1.2 { pthread_getthreadid_np; }; + +FBSD_1.4 { + pthread_mutex_consistent; + pthread_mutexattr_getrobust; + pthread_mutexattr_setrobust; +}; diff --git a/lib/libthr/thread/thr_cond.c b/lib/libthr/thread/thr_cond.c index 0e37b70..4d9356a 100644 --- a/lib/libthr/thread/thr_cond.c +++ b/lib/libthr/thread/thr_cond.c @@ -188,46 +188,57 @@ _pthread_cond_destroy(pthread_cond_t *cond) */ static int cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, - const struct timespec *abstime, int cancel) + const struct timespec *abstime, int cancel) { - struct pthread *curthread = _get_curthread(); - int recurse; - int error, error2 = 0; + struct pthread *curthread; + int error, error2, recurse, robust; + + curthread = _get_curthread(); + robust = _mutex_enter_robust(curthread, mp); error = _mutex_cv_detach(mp, &recurse); - if (error != 0) + if (error != 0) { + if (robust) + _mutex_leave_robust(curthread, mp); return (error); + } - if (cancel) { + if (cancel) _thr_cancel_enter2(curthread, 0); - error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, - (struct umutex *)&mp->m_lock, abstime, - CVWAIT_ABSTIME|CVWAIT_CLOCKID); + error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, + (struct umutex *)&mp->m_lock, abstime, CVWAIT_ABSTIME | + CVWAIT_CLOCKID); + if (cancel) _thr_cancel_leave(curthread, 0); - } else { - error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, - (struct umutex *)&mp->m_lock, abstime, - CVWAIT_ABSTIME|CVWAIT_CLOCKID); - } /* * Note that PP mutex and ROBUST mutex may return * interesting error codes. */ if (error == 0) { - error2 = _mutex_cv_lock(mp, recurse); + error2 = _mutex_cv_lock(mp, recurse, true); } else if (error == EINTR || error == ETIMEDOUT) { - error2 = _mutex_cv_lock(mp, recurse); + error2 = _mutex_cv_lock(mp, recurse, true); + /* + * Do not do cancellation on EOWNERDEAD there. The + * cancellation cleanup handler will use the protected + * state and unlock the mutex without making the state + * consistent and the state will be unrecoverable. + */ if (error2 == 0 && cancel) _thr_testcancel(curthread); + if (error == EINTR) error = 0; } else { /* We know that it didn't unlock the mutex. */ - error2 = _mutex_cv_attach(mp, recurse); - if (error2 == 0 && cancel) + _mutex_cv_attach(mp, recurse); + if (cancel) _thr_testcancel(curthread); + error2 = 0; } + if (robust) + _mutex_leave_robust(curthread, mp); return (error2 != 0 ? error2 : error); } @@ -240,14 +251,13 @@ cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, static int cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, - const struct timespec *abstime, int cancel) + const struct timespec *abstime, int cancel) { - struct pthread *curthread = _get_curthread(); + struct pthread *curthread; struct sleepqueue *sq; - int recurse; - int error; - int defered; + int deferred, error, error2, recurse; + curthread = _get_curthread(); if (curthread->wchan != NULL) PANIC("thread was already on queue."); @@ -260,32 +270,31 @@ cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, * us to check it without locking in pthread_cond_signal(). */ cvp->__has_user_waiters = 1; - defered = 0; - (void)_mutex_cv_unlock(mp, &recurse, &defered); + deferred = 0; + (void)_mutex_cv_unlock(mp, &recurse, &deferred); curthread->mutex_obj = mp; _sleepq_add(cvp, curthread); for(;;) { _thr_clear_wake(curthread); _sleepq_unlock(cvp); - if (defered) { - defered = 0; + if (deferred) { + deferred = 0; if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0) - (void)_umtx_op_err(&mp->m_lock, UMTX_OP_MUTEX_WAKE2, - mp->m_lock.m_flags, 0, 0); + (void)_umtx_op_err(&mp->m_lock, + UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags, + 0, 0); } if (curthread->nwaiter_defer > 0) { _thr_wake_all(curthread->defer_waiters, - curthread->nwaiter_defer); + curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } - if (cancel) { + if (cancel) _thr_cancel_enter2(curthread, 0); - error = _thr_sleep(curthread, cvp->__clock_id, abstime); + error = _thr_sleep(curthread, cvp->__clock_id, abstime); + if (cancel) _thr_cancel_leave(curthread, 0); - } else { - error = _thr_sleep(curthread, cvp->__clock_id, abstime); - } _sleepq_lock(cvp); if (curthread->wchan == NULL) { @@ -293,25 +302,26 @@ cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, break; } else if (cancel && SHOULD_CANCEL(curthread)) { sq = _sleepq_lookup(cvp); - cvp->__has_user_waiters = - _sleepq_remove(sq, curthread); + cvp->__has_user_waiters = _sleepq_remove(sq, curthread); _sleepq_unlock(cvp); curthread->mutex_obj = NULL; - _mutex_cv_lock(mp, recurse); + error2 = _mutex_cv_lock(mp, recurse, false); if (!THR_IN_CRITICAL(curthread)) _pthread_exit(PTHREAD_CANCELED); else /* this should not happen */ - return (0); + return (error2); } else if (error == ETIMEDOUT) { sq = _sleepq_lookup(cvp); cvp->__has_user_waiters = - _sleepq_remove(sq, curthread); + _sleepq_remove(sq, curthread); break; } } _sleepq_unlock(cvp); curthread->mutex_obj = NULL; - _mutex_cv_lock(mp, recurse); + error2 = _mutex_cv_lock(mp, recurse, false); + if (error == 0) + error = error2; return (error); } @@ -338,12 +348,12 @@ cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, return (error); if (curthread->attr.sched_policy != SCHED_OTHER || - (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT| - USYNC_PROCESS_SHARED)) != 0 || + (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | + USYNC_PROCESS_SHARED)) != 0 || (cvp->__flags & USYNC_PROCESS_SHARED) != 0) - return cond_wait_kernel(cvp, mp, abstime, cancel); + return (cond_wait_kernel(cvp, mp, abstime, cancel)); else - return cond_wait_user(cvp, mp, abstime, cancel); + return (cond_wait_user(cvp, mp, abstime, cancel)); } int @@ -420,15 +430,15 @@ cond_signal_common(pthread_cond_t *cond) td = _sleepq_first(sq); mp = td->mutex_obj; cvp->__has_user_waiters = _sleepq_remove(sq, td); - if (mp->m_owner == TID(curthread)) { + if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { _thr_wake_all(curthread->defer_waiters, - curthread->nwaiter_defer); + curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } curthread->defer_waiters[curthread->nwaiter_defer++] = - &td->wake_addr->value; - mp->m_flags |= PMUTEX_FLAG_DEFERED; + &td->wake_addr->value; + mp->m_flags |= PMUTEX_FLAG_DEFERRED; } else { waddr = &td->wake_addr->value; } @@ -452,15 +462,15 @@ drop_cb(struct pthread *td, void *arg) struct pthread *curthread = ba->curthread; mp = td->mutex_obj; - if (mp->m_owner == TID(curthread)) { + if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { _thr_wake_all(curthread->defer_waiters, - curthread->nwaiter_defer); + curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } curthread->defer_waiters[curthread->nwaiter_defer++] = - &td->wake_addr->value; - mp->m_flags |= PMUTEX_FLAG_DEFERED; + &td->wake_addr->value; + mp->m_flags |= PMUTEX_FLAG_DEFERRED; } else { if (ba->count >= MAX_DEFER_WAITERS) { _thr_wake_all(ba->waddrs, ba->count); diff --git a/lib/libthr/thread/thr_init.c b/lib/libthr/thread/thr_init.c index c852406..0927e15 100644 --- a/lib/libthr/thread/thr_init.c +++ b/lib/libthr/thread/thr_init.c @@ -94,6 +94,7 @@ struct pthread_mutex_attr _pthread_mutexattr_default = { .m_protocol = PTHREAD_PRIO_NONE, .m_ceiling = 0, .m_pshared = PTHREAD_PROCESS_PRIVATE, + .m_robust = PTHREAD_MUTEX_STALLED, }; struct pthread_mutex_attr _pthread_mutexattr_adaptive_default = { @@ -101,6 +102,7 @@ struct pthread_mutex_attr _pthread_mutexattr_adaptive_default = { .m_protocol = PTHREAD_PRIO_NONE, .m_ceiling = 0, .m_pshared = PTHREAD_PROCESS_PRIVATE, + .m_robust = PTHREAD_MUTEX_STALLED, }; /* Default condition variable attributes: */ @@ -265,7 +267,10 @@ static pthread_func_t jmp_table[][2] = { {DUAL_ENTRY(__pthread_cleanup_pop_imp)},/* PJT_CLEANUP_POP_IMP */ {DUAL_ENTRY(__pthread_cleanup_push_imp)},/* PJT_CLEANUP_PUSH_IMP */ {DUAL_ENTRY(_pthread_cancel_enter)}, /* PJT_CANCEL_ENTER */ - {DUAL_ENTRY(_pthread_cancel_leave)} /* PJT_CANCEL_LEAVE */ + {DUAL_ENTRY(_pthread_cancel_leave)}, /* PJT_CANCEL_LEAVE */ + {DUAL_ENTRY(_pthread_mutex_consistent)},/* PJT_MUTEX_CONSISTENT */ + {DUAL_ENTRY(_pthread_mutexattr_getrobust)},/* PJT_MUTEXATTR_GETROBUST */ + {DUAL_ENTRY(_pthread_mutexattr_setrobust)},/* PJT_MUTEXATTR_SETROBUST */ }; static int init_once = 0; @@ -308,7 +313,7 @@ _libpthread_init(struct pthread *curthread) int first, dlopened; /* Check if this function has already been called: */ - if ((_thr_initial != NULL) && (curthread == NULL)) + if (_thr_initial != NULL && curthread == NULL) /* Only initialize the threaded application once. */ return; @@ -316,7 +321,7 @@ _libpthread_init(struct pthread *curthread) * Check the size of the jump table to make sure it is preset * with the correct number of entries. */ - if (sizeof(jmp_table) != (sizeof(pthread_func_t) * PJT_MAX * 2)) + if (sizeof(jmp_table) != sizeof(pthread_func_t) * PJT_MAX * 2) PANIC("Thread jump table not properly initialized"); memcpy(__thr_jtable, jmp_table, sizeof(jmp_table)); __thr_interpose_libc(); diff --git a/lib/libthr/thread/thr_mutex.c b/lib/libthr/thread/thr_mutex.c index f75ea6f..2d507e7 100644 --- a/lib/libthr/thread/thr_mutex.c +++ b/lib/libthr/thread/thr_mutex.c @@ -1,7 +1,7 @@ /* * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. - * Copyright (c) 2015 The FreeBSD Foundation + * Copyright (c) 2015, 2016 The FreeBSD Foundation * * All rights reserved. * @@ -39,7 +39,6 @@ #include <sys/cdefs.h> __FBSDID("$FreeBSD$"); -#include <stdbool.h> #include "namespace.h" #include <stdlib.h> #include <errno.h> @@ -64,6 +63,7 @@ _Static_assert(sizeof(struct pthread_mutex) <= PAGE_SIZE, /* * Prototypes */ +int __pthread_mutex_consistent(pthread_mutex_t *mutex); int __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr); int __pthread_mutex_trylock(pthread_mutex_t *mutex); @@ -82,9 +82,13 @@ int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); static int mutex_self_trylock(pthread_mutex_t); static int mutex_self_lock(pthread_mutex_t, const struct timespec *abstime); -static int mutex_unlock_common(struct pthread_mutex *, int, int *); +static int mutex_unlock_common(struct pthread_mutex *, bool, int *); static int mutex_lock_sleep(struct pthread *, pthread_mutex_t, const struct timespec *); +static void mutex_init_robust(struct pthread *curthread); +static int mutex_qidx(struct pthread_mutex *m); +static bool is_robust_mutex(struct pthread_mutex *m); +static bool is_pshared_mutex(struct pthread_mutex *m); __weak_reference(__pthread_mutex_init, pthread_mutex_init); __strong_reference(__pthread_mutex_init, _pthread_mutex_init); @@ -94,6 +98,8 @@ __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock); __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock); +__weak_reference(_pthread_mutex_consistent, pthread_mutex_consistent); +__strong_reference(_pthread_mutex_consistent, __pthread_mutex_consistent); /* Single underscore versions provided for libc internal usage: */ /* No difference between libc and application usage of these: */ @@ -125,23 +131,23 @@ mutex_init_link(struct pthread_mutex *m) } static void -mutex_assert_is_owned(struct pthread_mutex *m) +mutex_assert_is_owned(struct pthread_mutex *m __unused) { #if defined(_PTHREADS_INVARIANTS) if (__predict_false(m->m_qe.tqe_prev == NULL)) { char msg[128]; snprintf(msg, sizeof(msg), - "mutex %p own %#x %#x is not on list %p %p", - m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev, - m->m_qe.tqe_next); + "mutex %p own %#x is not on list %p %p", + m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next); PANIC(msg); } #endif } static void -mutex_assert_not_owned(struct pthread_mutex *m) +mutex_assert_not_owned(struct pthread *curthread __unused, + struct pthread_mutex *m __unused) { #if defined(_PTHREADS_INVARIANTS) @@ -149,21 +155,68 @@ mutex_assert_not_owned(struct pthread_mutex *m) m->m_qe.tqe_next != NULL)) { char msg[128]; snprintf(msg, sizeof(msg), - "mutex %p own %#x %#x is on list %p %p", - m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev, - m->m_qe.tqe_next); + "mutex %p own %#x is on list %p %p", + m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next); + PANIC(msg); + } + if (__predict_false(is_robust_mutex(m) && + (m->m_lock.m_rb_lnk != 0 || m->m_rb_prev != NULL || + (is_pshared_mutex(m) && curthread->robust_list == + (uintptr_t)&m->m_lock) || + (!is_pshared_mutex(m) && curthread->priv_robust_list == + (uintptr_t)&m->m_lock)))) { + char msg[128]; + snprintf(msg, sizeof(msg), + "mutex %p own %#x is on robust linkage %p %p head %p phead %p", + m, m->m_lock.m_owner, (void *)m->m_lock.m_rb_lnk, + m->m_rb_prev, (void *)curthread->robust_list, + (void *)curthread->priv_robust_list); PANIC(msg); } #endif } -static int +static bool is_pshared_mutex(struct pthread_mutex *m) { return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0); } +static bool +is_robust_mutex(struct pthread_mutex *m) +{ + + return ((m->m_lock.m_flags & UMUTEX_ROBUST) != 0); +} + +int +_mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m) +{ + +#if defined(_PTHREADS_INVARIANTS) + if (__predict_false(curthread->inact_mtx != 0)) + PANIC("inact_mtx enter"); +#endif + if (!is_robust_mutex(m)) + return (0); + + mutex_init_robust(curthread); + curthread->inact_mtx = (uintptr_t)&m->m_lock; + return (1); +} + +void +_mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m __unused) +{ + +#if defined(_PTHREADS_INVARIANTS) + if (__predict_false(curthread->inact_mtx != (uintptr_t)&m->m_lock)) + PANIC("inact_mtx leave"); +#endif + curthread->inact_mtx = 0; +} + static int mutex_check_attr(const struct pthread_mutex_attr *attr) { @@ -178,12 +231,27 @@ mutex_check_attr(const struct pthread_mutex_attr *attr) } static void +mutex_init_robust(struct pthread *curthread) +{ + struct umtx_robust_lists_params rb; + + if (curthread == NULL) + curthread = _get_curthread(); + if (curthread->robust_inited) + return; + rb.robust_list_offset = (uintptr_t)&curthread->robust_list; + rb.robust_priv_list_offset = (uintptr_t)&curthread->priv_robust_list; + rb.robust_inact_offset = (uintptr_t)&curthread->inact_mtx; + _umtx_op(NULL, UMTX_OP_ROBUST_LISTS, sizeof(rb), &rb, NULL); + curthread->robust_inited = 1; +} + +static void mutex_init_body(struct pthread_mutex *pmutex, const struct pthread_mutex_attr *attr) { pmutex->m_flags = attr->m_type; - pmutex->m_owner = 0; pmutex->m_count = 0; pmutex->m_spinloops = 0; pmutex->m_yieldloops = 0; @@ -205,7 +273,10 @@ mutex_init_body(struct pthread_mutex *pmutex, } if (attr->m_pshared == PTHREAD_PROCESS_SHARED) pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED; - + if (attr->m_robust == PTHREAD_MUTEX_ROBUST) { + mutex_init_robust(NULL); + pmutex->m_lock.m_flags |= UMUTEX_ROBUST; + } if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) { pmutex->m_spinloops = _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; @@ -262,7 +333,7 @@ set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) { struct pthread_mutex *m2; - m2 = TAILQ_LAST(&curthread->mq[TMQ_NORM_PP], mutex_queue); + m2 = TAILQ_LAST(&curthread->mq[mutex_qidx(m)], mutex_queue); if (m2 != NULL) m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; else @@ -277,7 +348,8 @@ shared_mutex_init(struct pthread_mutex *pmtx, const struct .m_type = PTHREAD_MUTEX_DEFAULT, .m_protocol = PTHREAD_PRIO_NONE, .m_ceiling = 0, - .m_pshared = PTHREAD_PROCESS_SHARED + .m_pshared = PTHREAD_PROCESS_SHARED, + .m_robust = PTHREAD_MUTEX_STALLED, }; bool done; @@ -329,7 +401,7 @@ __pthread_mutex_init(pthread_mutex_t *mutex, if (mutex_attr == NULL || (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) { return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, - calloc)); + calloc)); } pmtx = __thr_pshared_offpage(mutex, 1); if (pmtx == NULL) @@ -349,6 +421,7 @@ _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, .m_protocol = PTHREAD_PRIO_NONE, .m_ceiling = 0, .m_pshared = PTHREAD_PROCESS_PRIVATE, + .m_robust = PTHREAD_MUTEX_STALLED, }; int ret; @@ -378,7 +451,6 @@ queue_fork(struct pthread *curthread, struct mutex_queue *q, TAILQ_FOREACH(m, qp, m_pqe) { TAILQ_INSERT_TAIL(q, m, m_qe); m->m_lock.m_owner = TID(curthread) | bit; - m->m_owner = TID(curthread); } } @@ -390,6 +462,9 @@ _mutex_fork(struct pthread *curthread) &curthread->mq[TMQ_NORM_PRIV], 0); queue_fork(curthread, &curthread->mq[TMQ_NORM_PP], &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED); + queue_fork(curthread, &curthread->mq[TMQ_ROBUST_PP], + &curthread->mq[TMQ_ROBUST_PP_PRIV], UMUTEX_CONTESTED); + curthread->robust_list = 0; } int @@ -407,17 +482,18 @@ _pthread_mutex_destroy(pthread_mutex_t *mutex) if (m == THR_PSHARED_PTR) { m1 = __thr_pshared_offpage(mutex, 0); if (m1 != NULL) { - mutex_assert_not_owned(m1); + mutex_assert_not_owned(_get_curthread(), m1); __thr_pshared_destroy(mutex); } *mutex = THR_MUTEX_DESTROYED; return (0); } - if (m->m_owner != 0) { + if (PMUTEX_OWNER_ID(m) != 0 && + (uint32_t)m->m_lock.m_owner != UMUTEX_RB_NOTRECOV) { ret = EBUSY; } else { *mutex = THR_MUTEX_DESTROYED; - mutex_assert_not_owned(m); + mutex_assert_not_owned(_get_curthread(), m); free(m); ret = 0; } @@ -432,31 +508,81 @@ mutex_qidx(struct pthread_mutex *m) if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) return (TMQ_NORM); - return (TMQ_NORM_PP); + return (is_robust_mutex(m) ? TMQ_ROBUST_PP : TMQ_NORM_PP); } +/* + * Both enqueue_mutex() and dequeue_mutex() operate on the + * thread-private linkage of the locked mutexes and on the robust + * linkage. + * + * Robust list, as seen by kernel, must be consistent even in the case + * of thread termination at arbitrary moment. Since either enqueue or + * dequeue for list walked by kernel consists of rewriting a single + * forward pointer, it is safe. On the other hand, rewrite of the + * back pointer is not atomic WRT the forward one, but kernel does not + * care. + */ static void -enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m) +enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m, + int error) { + struct pthread_mutex *m1; + uintptr_t *rl; int qidx; - m->m_owner = TID(curthread); /* Add to the list of owned mutexes: */ - mutex_assert_not_owned(m); + if (error != EOWNERDEAD) + mutex_assert_not_owned(curthread, m); qidx = mutex_qidx(m); TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe); if (!is_pshared_mutex(m)) TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe); + if (is_robust_mutex(m)) { + rl = is_pshared_mutex(m) ? &curthread->robust_list : + &curthread->priv_robust_list; + m->m_rb_prev = NULL; + if (*rl != 0) { + m1 = __containerof((void *)*rl, + struct pthread_mutex, m_lock); + m->m_lock.m_rb_lnk = (uintptr_t)&m1->m_lock; + m1->m_rb_prev = m; + } else { + m1 = NULL; + m->m_lock.m_rb_lnk = 0; + } + *rl = (uintptr_t)&m->m_lock; + } } static void dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m) { + struct pthread_mutex *mp, *mn; int qidx; - m->m_owner = 0; mutex_assert_is_owned(m); qidx = mutex_qidx(m); + if (is_robust_mutex(m)) { + mp = m->m_rb_prev; + if (mp == NULL) { + if (is_pshared_mutex(m)) { + curthread->robust_list = m->m_lock.m_rb_lnk; + } else { + curthread->priv_robust_list = + m->m_lock.m_rb_lnk; + } + } else { + mp->m_lock.m_rb_lnk = m->m_lock.m_rb_lnk; + } + if (m->m_lock.m_rb_lnk != 0) { + mn = __containerof((void *)m->m_lock.m_rb_lnk, + struct pthread_mutex, m_lock); + mn->m_rb_prev = m->m_rb_prev; + } + m->m_lock.m_rb_lnk = 0; + m->m_rb_prev = NULL; + } TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe); if (!is_pshared_mutex(m)) TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe); @@ -496,7 +622,7 @@ __pthread_mutex_trylock(pthread_mutex_t *mutex) struct pthread *curthread; struct pthread_mutex *m; uint32_t id; - int ret; + int ret, robust; ret = check_and_init_mutex(mutex, &m); if (ret != 0) @@ -505,27 +631,32 @@ __pthread_mutex_trylock(pthread_mutex_t *mutex) id = TID(curthread); if (m->m_flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_ENTER(curthread); + robust = _mutex_enter_robust(curthread, m); ret = _thr_umutex_trylock(&m->m_lock, id); - if (__predict_true(ret == 0)) { - enqueue_mutex(curthread, m); - } else if (m->m_owner == id) { + if (__predict_true(ret == 0) || ret == EOWNERDEAD) { + enqueue_mutex(curthread, m, ret); + if (ret == EOWNERDEAD) + m->m_lock.m_flags |= UMUTEX_NONCONSISTENT; + } else if (PMUTEX_OWNER_ID(m) == id) { ret = mutex_self_trylock(m); } /* else {} */ - if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE)) + if (robust) + _mutex_leave_robust(curthread, m); + if ((ret == 0 || ret == EOWNERDEAD) && + (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0) THR_CRITICAL_LEAVE(curthread); return (ret); } static int mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, - const struct timespec *abstime) + const struct timespec *abstime) { - uint32_t id, owner; - int count; - int ret; + uint32_t id, owner; + int count, ret; id = TID(curthread); - if (m->m_owner == id) + if (PMUTEX_OWNER_ID(m) == id) return (mutex_self_lock(m, abstime)); /* @@ -534,10 +665,9 @@ mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, * the lock is likely to be released quickly and it is * faster than entering the kernel */ - if (__predict_false( - (m->m_lock.m_flags & - (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)) - goto sleep_in_kernel; + if (__predict_false((m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | + UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) != 0)) + goto sleep_in_kernel; if (!_thr_is_smp) goto yield_loop; @@ -546,7 +676,8 @@ mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, while (count--) { owner = m->m_lock.m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { + if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, + id | owner)) { ret = 0; goto done; } @@ -560,7 +691,8 @@ yield_loop: _sched_yield(); owner = m->m_lock.m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { + if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, + id | owner)) { ret = 0; goto done; } @@ -568,38 +700,46 @@ yield_loop: } sleep_in_kernel: - if (abstime == NULL) { + if (abstime == NULL) ret = __thr_umutex_lock(&m->m_lock, id); - } else if (__predict_false( - abstime->tv_nsec < 0 || - abstime->tv_nsec >= 1000000000)) { + else if (__predict_false(abstime->tv_nsec < 0 || + abstime->tv_nsec >= 1000000000)) ret = EINVAL; - } else { + else ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); - } done: - if (ret == 0) - enqueue_mutex(curthread, m); - + if (ret == 0 || ret == EOWNERDEAD) { + enqueue_mutex(curthread, m, ret); + if (ret == EOWNERDEAD) + m->m_lock.m_flags |= UMUTEX_NONCONSISTENT; + } return (ret); } static inline int -mutex_lock_common(struct pthread_mutex *m, - const struct timespec *abstime, int cvattach) +mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime, + bool cvattach, bool rb_onlist) { - struct pthread *curthread = _get_curthread(); - int ret; + struct pthread *curthread; + int ret, robust; + curthread = _get_curthread(); if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_ENTER(curthread); - if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) { - enqueue_mutex(curthread, m); - ret = 0; + if (!rb_onlist) + robust = _mutex_enter_robust(curthread, m); + ret = _thr_umutex_trylock2(&m->m_lock, TID(curthread)); + if (ret == 0 || ret == EOWNERDEAD) { + enqueue_mutex(curthread, m, ret); + if (ret == EOWNERDEAD) + m->m_lock.m_flags |= UMUTEX_NONCONSISTENT; } else { ret = mutex_lock_sleep(curthread, m, abstime); } - if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach) + if (!rb_onlist && robust) + _mutex_leave_robust(curthread, m); + if (ret != 0 && ret != EOWNERDEAD && + (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0 && !cvattach) THR_CRITICAL_LEAVE(curthread); return (ret); } @@ -613,7 +753,7 @@ __pthread_mutex_lock(pthread_mutex_t *mutex) _thr_check_init(); ret = check_and_init_mutex(mutex, &m); if (ret == 0) - ret = mutex_lock_common(m, NULL, 0); + ret = mutex_lock_common(m, NULL, false, false); return (ret); } @@ -627,7 +767,7 @@ __pthread_mutex_timedlock(pthread_mutex_t *mutex, _thr_check_init(); ret = check_and_init_mutex(mutex, &m); if (ret == 0) - ret = mutex_lock_common(m, abstime, 0); + ret = mutex_lock_common(m, abstime, false, false); return (ret); } @@ -644,16 +784,16 @@ _pthread_mutex_unlock(pthread_mutex_t *mutex) } else { mp = *mutex; } - return (mutex_unlock_common(mp, 0, NULL)); + return (mutex_unlock_common(mp, false, NULL)); } int -_mutex_cv_lock(struct pthread_mutex *m, int count) +_mutex_cv_lock(struct pthread_mutex *m, int count, bool rb_onlist) { - int error; + int error; - error = mutex_lock_common(m, NULL, 1); - if (error == 0) + error = mutex_lock_common(m, NULL, true, rb_onlist); + if (error == 0 || error == EOWNERDEAD) m->m_count = count; return (error); } @@ -667,16 +807,17 @@ _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer) */ *count = m->m_count; m->m_count = 0; - (void)mutex_unlock_common(m, 1, defer); + (void)mutex_unlock_common(m, true, defer); return (0); } int _mutex_cv_attach(struct pthread_mutex *m, int count) { - struct pthread *curthread = _get_curthread(); + struct pthread *curthread; - enqueue_mutex(curthread, m); + curthread = _get_curthread(); + enqueue_mutex(curthread, m, 0); m->m_count = count; return (0); } @@ -684,12 +825,12 @@ _mutex_cv_attach(struct pthread_mutex *m, int count) int _mutex_cv_detach(struct pthread_mutex *mp, int *recurse) { - struct pthread *curthread = _get_curthread(); - int defered; - int error; + struct pthread *curthread; + int deferred, error; + curthread = _get_curthread(); if ((error = _mutex_owned(curthread, mp)) != 0) - return (error); + return (error); /* * Clear the count in case this is a recursive mutex. @@ -699,15 +840,15 @@ _mutex_cv_detach(struct pthread_mutex *mp, int *recurse) dequeue_mutex(curthread, mp); /* Will this happen in real-world ? */ - if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) { - defered = 1; - mp->m_flags &= ~PMUTEX_FLAG_DEFERED; + if ((mp->m_flags & PMUTEX_FLAG_DEFERRED) != 0) { + deferred = 1; + mp->m_flags &= ~PMUTEX_FLAG_DEFERRED; } else - defered = 0; + deferred = 0; - if (defered) { + if (deferred) { _thr_wake_all(curthread->defer_waiters, - curthread->nwaiter_defer); + curthread->nwaiter_defer); curthread->nwaiter_defer = 0; } return (0); @@ -716,7 +857,7 @@ _mutex_cv_detach(struct pthread_mutex *mp, int *recurse) static int mutex_self_trylock(struct pthread_mutex *m) { - int ret; + int ret; switch (PMUTEX_TYPE(m->m_flags)) { case PTHREAD_MUTEX_ERRORCHECK: @@ -746,7 +887,7 @@ static int mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime) { struct timespec ts1, ts2; - int ret; + int ret; switch (PMUTEX_TYPE(m->m_flags)) { case PTHREAD_MUTEX_ERRORCHECK: @@ -812,11 +953,11 @@ mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime) } static int -mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer) +mutex_unlock_common(struct pthread_mutex *m, bool cv, int *mtx_defer) { - struct pthread *curthread = _get_curthread(); + struct pthread *curthread; uint32_t id; - int defered, error; + int deferred, error, robust; if (__predict_false(m <= THR_MUTEX_DESTROYED)) { if (m == THR_MUTEX_DESTROYED) @@ -824,34 +965,39 @@ mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer) return (EPERM); } + curthread = _get_curthread(); id = TID(curthread); /* * Check if the running thread is not the owner of the mutex. */ - if (__predict_false(m->m_owner != id)) + if (__predict_false(PMUTEX_OWNER_ID(m) != id)) return (EPERM); error = 0; - if (__predict_false( - PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE && - m->m_count > 0)) { + if (__predict_false(PMUTEX_TYPE(m->m_flags) == + PTHREAD_MUTEX_RECURSIVE && m->m_count > 0)) { m->m_count--; } else { - if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) { - defered = 1; - m->m_flags &= ~PMUTEX_FLAG_DEFERED; + if ((m->m_flags & PMUTEX_FLAG_DEFERRED) != 0) { + deferred = 1; + m->m_flags &= ~PMUTEX_FLAG_DEFERRED; } else - defered = 0; + deferred = 0; + robust = _mutex_enter_robust(curthread, m); dequeue_mutex(curthread, m); error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer); - - if (mtx_defer == NULL && defered) { - _thr_wake_all(curthread->defer_waiters, - curthread->nwaiter_defer); - curthread->nwaiter_defer = 0; + if (deferred) { + if (mtx_defer == NULL) { + _thr_wake_all(curthread->defer_waiters, + curthread->nwaiter_defer); + curthread->nwaiter_defer = 0; + } else + *mtx_defer = 1; } + if (robust) + _mutex_leave_robust(curthread, m); } if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_LEAVE(curthread); @@ -887,7 +1033,7 @@ _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, struct pthread *curthread; struct pthread_mutex *m, *m1, *m2; struct mutex_queue *q, *qp; - int ret; + int qidx, ret; if (*mutex == THR_PSHARED_PTR) { m = __thr_pshared_offpage(mutex, 0); @@ -907,14 +1053,15 @@ _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, return (ret); curthread = _get_curthread(); - if (m->m_owner == TID(curthread)) { + if (PMUTEX_OWNER_ID(m) == TID(curthread)) { mutex_assert_is_owned(m); m1 = TAILQ_PREV(m, mutex_queue, m_qe); m2 = TAILQ_NEXT(m, m_qe); if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { - q = &curthread->mq[TMQ_NORM_PP]; - qp = &curthread->mq[TMQ_NORM_PP_PRIV]; + qidx = mutex_qidx(m); + q = &curthread->mq[qidx]; + qp = &curthread->mq[qidx + 1]; TAILQ_REMOVE(q, m, m_qe); if (!is_pshared_mutex(m)) TAILQ_REMOVE(qp, m, m_pqe); @@ -1009,18 +1156,45 @@ _pthread_mutex_isowned_np(pthread_mutex_t *mutex) if (m <= THR_MUTEX_DESTROYED) return (0); } - return (m->m_owner == TID(_get_curthread())); + return (PMUTEX_OWNER_ID(m) == TID(_get_curthread())); } int _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp) { + if (__predict_false(mp <= THR_MUTEX_DESTROYED)) { if (mp == THR_MUTEX_DESTROYED) return (EINVAL); return (EPERM); } - if (mp->m_owner != TID(curthread)) + if (PMUTEX_OWNER_ID(mp) != TID(curthread)) return (EPERM); return (0); } + +int +_pthread_mutex_consistent(pthread_mutex_t *mutex) +{ + struct pthread_mutex *m; + struct pthread *curthread; + + if (*mutex == THR_PSHARED_PTR) { + m = __thr_pshared_offpage(mutex, 0); + if (m == NULL) + return (EINVAL); + shared_mutex_init(m, NULL); + } else { + m = *mutex; + if (m <= THR_MUTEX_DESTROYED) + return (EINVAL); + } + curthread = _get_curthread(); + if ((m->m_lock.m_flags & (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) != + (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) + return (EINVAL); + if (PMUTEX_OWNER_ID(m) != TID(curthread)) + return (EPERM); + m->m_lock.m_flags &= ~UMUTEX_NONCONSISTENT; + return (0); +} diff --git a/lib/libthr/thread/thr_mutexattr.c b/lib/libthr/thread/thr_mutexattr.c index a9e07c2..d8a8671 100644 --- a/lib/libthr/thread/thr_mutexattr.c +++ b/lib/libthr/thread/thr_mutexattr.c @@ -80,8 +80,12 @@ __weak_reference(_pthread_mutexattr_getpshared, pthread_mutexattr_getpshared); __weak_reference(_pthread_mutexattr_setpshared, pthread_mutexattr_setpshared); __weak_reference(_pthread_mutexattr_getprotocol, pthread_mutexattr_getprotocol); __weak_reference(_pthread_mutexattr_setprotocol, pthread_mutexattr_setprotocol); -__weak_reference(_pthread_mutexattr_getprioceiling, pthread_mutexattr_getprioceiling); -__weak_reference(_pthread_mutexattr_setprioceiling, pthread_mutexattr_setprioceiling); +__weak_reference(_pthread_mutexattr_getprioceiling, + pthread_mutexattr_getprioceiling); +__weak_reference(_pthread_mutexattr_setprioceiling, + pthread_mutexattr_setprioceiling); +__weak_reference(_pthread_mutexattr_getrobust, pthread_mutexattr_getrobust); +__weak_reference(_pthread_mutexattr_setrobust, pthread_mutexattr_setrobust); int _pthread_mutexattr_init(pthread_mutexattr_t *attr) @@ -119,26 +123,28 @@ int _pthread_mutexattr_getkind_np(pthread_mutexattr_t attr) { int ret; + if (attr == NULL) { errno = EINVAL; ret = -1; } else { ret = attr->m_type; } - return(ret); + return (ret); } int _pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type) { int ret; + if (attr == NULL || *attr == NULL || type >= PTHREAD_MUTEX_TYPE_MAX) { ret = EINVAL; } else { (*attr)->m_type = type; ret = 0; } - return(ret); + return (ret); } int @@ -153,7 +159,7 @@ _pthread_mutexattr_gettype(pthread_mutexattr_t *attr, int *type) *type = (*attr)->m_type; ret = 0; } - return ret; + return (ret); } int @@ -167,7 +173,7 @@ _pthread_mutexattr_destroy(pthread_mutexattr_t *attr) *attr = NULL; ret = 0; } - return(ret); + return (ret); } int @@ -198,12 +204,12 @@ _pthread_mutexattr_getprotocol(pthread_mutexattr_t *mattr, int *protocol) { int ret = 0; - if ((mattr == NULL) || (*mattr == NULL)) + if (mattr == NULL || *mattr == NULL) ret = EINVAL; else *protocol = (*mattr)->m_protocol; - return(ret); + return (ret); } int @@ -211,14 +217,14 @@ _pthread_mutexattr_setprotocol(pthread_mutexattr_t *mattr, int protocol) { int ret = 0; - if ((mattr == NULL) || (*mattr == NULL) || - (protocol < PTHREAD_PRIO_NONE) || (protocol > PTHREAD_PRIO_PROTECT)) + if (mattr == NULL || *mattr == NULL || + protocol < PTHREAD_PRIO_NONE || protocol > PTHREAD_PRIO_PROTECT) ret = EINVAL; else { (*mattr)->m_protocol = protocol; (*mattr)->m_ceiling = THR_MAX_RR_PRIORITY; } - return(ret); + return (ret); } int @@ -226,14 +232,14 @@ _pthread_mutexattr_getprioceiling(pthread_mutexattr_t *mattr, int *prioceiling) { int ret = 0; - if ((mattr == NULL) || (*mattr == NULL)) + if (mattr == NULL || *mattr == NULL) ret = EINVAL; else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT) ret = EINVAL; else *prioceiling = (*mattr)->m_ceiling; - return(ret); + return (ret); } int @@ -241,13 +247,44 @@ _pthread_mutexattr_setprioceiling(pthread_mutexattr_t *mattr, int prioceiling) { int ret = 0; - if ((mattr == NULL) || (*mattr == NULL)) + if (mattr == NULL || *mattr == NULL) ret = EINVAL; else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT) ret = EINVAL; else (*mattr)->m_ceiling = prioceiling; - return(ret); + return (ret); +} + +int +_pthread_mutexattr_getrobust(pthread_mutexattr_t *mattr, int *robust) +{ + int ret; + + if (mattr == NULL || *mattr == NULL) { + ret = EINVAL; + } else { + ret = 0; + *robust = (*mattr)->m_robust; + } + return (ret); +} + +int +_pthread_mutexattr_setrobust(pthread_mutexattr_t *mattr, int robust) +{ + int ret; + + if (mattr == NULL || *mattr == NULL) { + ret = EINVAL; + } else if (robust != PTHREAD_MUTEX_STALLED && + robust != PTHREAD_MUTEX_ROBUST) { + ret = EINVAL; + } else { + ret = 0; + (*mattr)->m_robust = robust; + } + return (ret); } diff --git a/lib/libthr/thread/thr_private.h b/lib/libthr/thread/thr_private.h index f35d3cd..21399f3 100644 --- a/lib/libthr/thread/thr_private.h +++ b/lib/libthr/thread/thr_private.h @@ -45,6 +45,7 @@ #include <errno.h> #include <limits.h> #include <signal.h> +#include <stdbool.h> #include <stddef.h> #include <stdio.h> #include <unistd.h> @@ -141,9 +142,11 @@ TAILQ_HEAD(mutex_queue, pthread_mutex); #define PMUTEX_FLAG_TYPE_MASK 0x0ff #define PMUTEX_FLAG_PRIVATE 0x100 -#define PMUTEX_FLAG_DEFERED 0x200 +#define PMUTEX_FLAG_DEFERRED 0x200 #define PMUTEX_TYPE(mtxflags) ((mtxflags) & PMUTEX_FLAG_TYPE_MASK) +#define PMUTEX_OWNER_ID(m) ((m)->m_lock.m_owner & ~UMUTEX_CONTESTED) + #define MAX_DEFER_WAITERS 50 /* @@ -159,7 +162,6 @@ struct pthread_mutex { */ struct umutex m_lock; int m_flags; - uint32_t m_owner; int m_count; int m_spinloops; int m_yieldloops; @@ -171,6 +173,7 @@ struct pthread_mutex { TAILQ_ENTRY(pthread_mutex) m_qe; /* Link for all private mutexes a thread currently owns. */ TAILQ_ENTRY(pthread_mutex) m_pqe; + struct pthread_mutex *m_rb_prev; }; struct pthread_mutex_attr { @@ -178,10 +181,12 @@ struct pthread_mutex_attr { int m_protocol; int m_ceiling; int m_pshared; + int m_robust; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ - { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } + { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE, \ + PTHREAD_MUTEX_STALLED } struct pthread_cond { __uint32_t __has_user_waiters; @@ -491,7 +496,9 @@ struct pthread { #define TMQ_NORM_PRIV 1 /* NORMAL or PRIO_INHERIT normal priv */ #define TMQ_NORM_PP 2 /* PRIO_PROTECT normal mutexes */ #define TMQ_NORM_PP_PRIV 3 /* PRIO_PROTECT normal priv */ -#define TMQ_NITEMS 4 +#define TMQ_ROBUST_PP 4 /* PRIO_PROTECT robust mutexes */ +#define TMQ_ROBUST_PP_PRIV 5 /* PRIO_PROTECT robust priv */ +#define TMQ_NITEMS 6 struct mutex_queue mq[TMQ_NITEMS]; void *ret; @@ -545,6 +552,11 @@ struct pthread { /* Number of threads deferred. */ int nwaiter_defer; + int robust_inited; + uintptr_t robust_list; + uintptr_t priv_robust_list; + uintptr_t inact_mtx; + /* Deferred threads from pthread_cond_signal. */ unsigned int *defer_waiters[MAX_DEFER_WAITERS]; #define _pthread_endzero wake_addr @@ -754,13 +766,17 @@ extern struct pthread *_single_thread __hidden; */ __BEGIN_DECLS int _thr_setthreaded(int) __hidden; -int _mutex_cv_lock(struct pthread_mutex *, int) __hidden; +int _mutex_cv_lock(struct pthread_mutex *, int, bool) __hidden; int _mutex_cv_unlock(struct pthread_mutex *, int *, int *) __hidden; int _mutex_cv_attach(struct pthread_mutex *, int) __hidden; int _mutex_cv_detach(struct pthread_mutex *, int *) __hidden; int _mutex_owned(struct pthread *, const struct pthread_mutex *) __hidden; int _mutex_reinit(pthread_mutex_t *) __hidden; void _mutex_fork(struct pthread *curthread) __hidden; +int _mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m) + __hidden; +void _mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m) + __hidden; void _libpthread_init(struct pthread *) __hidden; struct pthread *_thr_alloc(struct pthread *) __hidden; void _thread_exit(const char *, int, const char *) __hidden __dead2; @@ -819,6 +835,11 @@ void _pthread_cleanup_pop(int); void _pthread_exit_mask(void *status, sigset_t *mask) __dead2 __hidden; void _pthread_cancel_enter(int maycancel); void _pthread_cancel_leave(int maycancel); +int _pthread_mutex_consistent(pthread_mutex_t *) __nonnull(1); +int _pthread_mutexattr_getrobust(pthread_mutexattr_t *__restrict, + int *__restrict) __nonnull_all; +int _pthread_mutexattr_setrobust(pthread_mutexattr_t *, int) + __nonnull(1); /* #include <fcntl.h> */ #ifdef _SYS_FCNTL_H_ diff --git a/lib/libthr/thread/thr_umtx.c b/lib/libthr/thread/thr_umtx.c index ebf344b..cd2b101 100644 --- a/lib/libthr/thread/thr_umtx.c +++ b/lib/libthr/thread/thr_umtx.c @@ -33,6 +33,7 @@ __FBSDID("$FreeBSD$"); #ifndef HAS__UMTX_OP_ERR int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2) { + if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1) return (errno); return (0); @@ -60,19 +61,24 @@ __thr_umutex_lock(struct umutex *mtx, uint32_t id) { uint32_t owner; - if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { - for (;;) { - /* wait in kernel */ - _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); + if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) + return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0)); - owner = mtx->m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0 && - atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) - return (0); - } + for (;;) { + owner = mtx->m_owner; + if ((owner & ~UMUTEX_CONTESTED) == 0 && + atomic_cmpset_acq_32(&mtx->m_owner, owner, id | owner)) + return (0); + if (owner == UMUTEX_RB_OWNERDEAD && + atomic_cmpset_acq_32(&mtx->m_owner, owner, + id | UMUTEX_CONTESTED)) + return (EOWNERDEAD); + if (owner == UMUTEX_RB_NOTRECOV) + return (ENOTRECOVERABLE); + + /* wait in kernel */ + _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); } - - return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); } #define SPINLOOPS 1000 @@ -81,31 +87,33 @@ int __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) { uint32_t owner; + int count; if (!_thr_is_smp) - return __thr_umutex_lock(mtx, id); - - if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { - for (;;) { - int count = SPINLOOPS; - while (count--) { - owner = mtx->m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32( - &mtx->m_owner, - owner, id|owner)) { - return (0); - } - } - CPU_SPINWAIT; - } + return (__thr_umutex_lock(mtx, id)); + if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) + return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0)); - /* wait in kernel */ - _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); + for (;;) { + count = SPINLOOPS; + while (count--) { + owner = mtx->m_owner; + if ((owner & ~UMUTEX_CONTESTED) == 0 && + atomic_cmpset_acq_32(&mtx->m_owner, owner, + id | owner)) + return (0); + if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) && + atomic_cmpset_acq_32(&mtx->m_owner, owner, + id | UMUTEX_CONTESTED)) + return (EOWNERDEAD); + if (__predict_false(owner == UMUTEX_RB_NOTRECOV)) + return (ENOTRECOVERABLE); + CPU_SPINWAIT; } - } - return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); + /* wait in kernel */ + _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); + } } int @@ -129,21 +137,28 @@ __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, } for (;;) { - if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { - - /* wait in kernel */ - ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, - (void *)tm_size, __DECONST(void *, tm_p)); - - /* now try to lock it */ + if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | + UMUTEX_PRIO_INHERIT)) == 0) { + /* try to lock it */ owner = mtx->m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0 && - atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) + atomic_cmpset_acq_32(&mtx->m_owner, owner, + id | owner)) return (0); + if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) && + atomic_cmpset_acq_32(&mtx->m_owner, owner, + id | UMUTEX_CONTESTED)) + return (EOWNERDEAD); + if (__predict_false(owner == UMUTEX_RB_NOTRECOV)) + return (ENOTRECOVERABLE); + /* wait in kernel */ + ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, + (void *)tm_size, __DECONST(void *, tm_p)); } else { ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, - (void *)tm_size, __DECONST(void *, tm_p)); - if (ret == 0) + (void *)tm_size, __DECONST(void *, tm_p)); + if (ret == 0 || ret == EOWNERDEAD || + ret == ENOTRECOVERABLE) break; } if (ret == ETIMEDOUT) @@ -155,46 +170,52 @@ __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, int __thr_umutex_unlock(struct umutex *mtx, uint32_t id) { - return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0); + + return (_umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0)); } int __thr_umutex_trylock(struct umutex *mtx) { - return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0); + + return (_umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0)); } int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, - uint32_t *oldceiling) + uint32_t *oldceiling) { - return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0); + + return (_umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0)); } int _thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout) { + if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && - timeout->tv_nsec <= 0))) + timeout->tv_nsec <= 0))) return (ETIMEDOUT); - return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0, - __DECONST(void*, timeout)); + return (_umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0, + __DECONST(void*, timeout))); } int -_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared) +_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, + const struct timespec *timeout, int shared) { + if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && - timeout->tv_nsec <= 0))) + timeout->tv_nsec <= 0))) return (ETIMEDOUT); - return _umtx_op_err(__DEVOLATILE(void *, mtx), - shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, - __DECONST(void*, timeout)); + return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? + UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, + __DECONST(void*, timeout))); } int _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid, - const struct timespec *abstime, int shared) + const struct timespec *abstime, int shared) { struct _umtx_time *tm_p, timeout; size_t tm_size; @@ -210,21 +231,23 @@ _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid, tm_size = sizeof(timeout); } - return _umtx_op_err(__DEVOLATILE(void *, mtx), - shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, - (void *)tm_size, __DECONST(void *, tm_p)); + return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? + UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, + (void *)tm_size, __DECONST(void *, tm_p))); } int _thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared) { - return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, - nr_wakeup, 0, 0); + + return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? + UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, nr_wakeup, 0, 0)); } void _thr_ucond_init(struct ucond *cv) { + bzero(cv, sizeof(struct ucond)); } @@ -232,30 +255,34 @@ int _thr_ucond_wait(struct ucond *cv, struct umutex *m, const struct timespec *timeout, int flags) { + struct pthread *curthread; + if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))) { - struct pthread *curthread = _get_curthread(); + curthread = _get_curthread(); _thr_umutex_unlock(m, TID(curthread)); return (ETIMEDOUT); } - return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, - m, __DECONST(void*, timeout)); + return (_umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, m, + __DECONST(void*, timeout))); } int _thr_ucond_signal(struct ucond *cv) { + if (!cv->c_has_waiters) return (0); - return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL); + return (_umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL)); } int _thr_ucond_broadcast(struct ucond *cv) { + if (!cv->c_has_waiters) return (0); - return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL); + return (_umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL)); } int @@ -275,7 +302,8 @@ __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, tm_p = &timeout; tm_size = sizeof(timeout); } - return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, (void *)tm_size, tm_p); + return (_umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, + (void *)tm_size, tm_p)); } int @@ -294,13 +322,15 @@ __thr_rwlock_wrlock(struct urwlock *rwlock, const struct timespec *tsp) tm_p = &timeout; tm_size = sizeof(timeout); } - return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size, tm_p); + return (_umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size, + tm_p)); } int __thr_rwlock_unlock(struct urwlock *rwlock) { - return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL); + + return (_umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL)); } void @@ -338,6 +368,7 @@ _thr_rwl_wrlock(struct urwlock *rwlock) void _thr_rwl_unlock(struct urwlock *rwlock) { + if (_thr_rwlock_unlock(rwlock)) PANIC("unlock error"); } diff --git a/lib/libthr/thread/thr_umtx.h b/lib/libthr/thread/thr_umtx.h index 2c289a7..fff8729 100644 --- a/lib/libthr/thread/thr_umtx.h +++ b/lib/libthr/thread/thr_umtx.h @@ -32,7 +32,11 @@ #include <strings.h> #include <sys/umtx.h> -#define DEFAULT_UMUTEX {0,0,{0,0},{0,0,0,0}} +#ifdef __LP64__ +#define DEFAULT_UMUTEX {0,0,{0,0},0,{0,0}} +#else +#define DEFAULT_UMUTEX {0,0,{0,0},0,0,{0,0}} +#endif #define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}} int _umtx_op_err(void *, int op, u_long, void *, void *) __hidden; @@ -75,95 +79,122 @@ void _thr_rwl_unlock(struct urwlock *rwlock) __hidden; static inline int _thr_umutex_trylock(struct umutex *mtx, uint32_t id) { - if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) - return (0); - if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0) - return (EBUSY); - return (__thr_umutex_trylock(mtx)); + + if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) + return (0); + if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_OWNERDEAD) && + atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_RB_OWNERDEAD, + id | UMUTEX_CONTESTED)) + return (EOWNERDEAD); + if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_NOTRECOV)) + return (ENOTRECOVERABLE); + if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0) + return (EBUSY); + return (__thr_umutex_trylock(mtx)); } static inline int _thr_umutex_trylock2(struct umutex *mtx, uint32_t id) { - if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0) - return (0); - if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED && - __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0)) - if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED)) + + if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0) return (0); - return (EBUSY); + if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED && + __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT | + UMUTEX_PRIO_INHERIT)) == 0) && + atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, + id | UMUTEX_CONTESTED)) + return (0); + if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_OWNERDEAD) && + atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_RB_OWNERDEAD, + id | UMUTEX_CONTESTED)) + return (EOWNERDEAD); + if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_NOTRECOV)) + return (ENOTRECOVERABLE); + return (EBUSY); } static inline int _thr_umutex_lock(struct umutex *mtx, uint32_t id) { - if (_thr_umutex_trylock2(mtx, id) == 0) - return (0); - return (__thr_umutex_lock(mtx, id)); + + if (_thr_umutex_trylock2(mtx, id) == 0) + return (0); + return (__thr_umutex_lock(mtx, id)); } static inline int _thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) { - if (_thr_umutex_trylock2(mtx, id) == 0) - return (0); - return (__thr_umutex_lock_spin(mtx, id)); + + if (_thr_umutex_trylock2(mtx, id) == 0) + return (0); + return (__thr_umutex_lock_spin(mtx, id)); } static inline int _thr_umutex_timedlock(struct umutex *mtx, uint32_t id, - const struct timespec *timeout) + const struct timespec *timeout) { - if (_thr_umutex_trylock2(mtx, id) == 0) - return (0); - return (__thr_umutex_timedlock(mtx, id, timeout)); + + if (_thr_umutex_trylock2(mtx, id) == 0) + return (0); + return (__thr_umutex_timedlock(mtx, id, timeout)); } static inline int _thr_umutex_unlock2(struct umutex *mtx, uint32_t id, int *defer) { - uint32_t flags = mtx->m_flags; + uint32_t flags, owner; + bool noncst; - if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { - uint32_t owner; - do { - owner = mtx->m_owner; - if (__predict_false((owner & ~UMUTEX_CONTESTED) != id)) - return (EPERM); - } while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner, - owner, UMUTEX_UNOWNED))); - if ((owner & UMUTEX_CONTESTED)) { - if (defer == NULL) - (void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2, flags, 0, 0); - else - *defer = 1; - } - return (0); + flags = mtx->m_flags; + noncst = (flags & UMUTEX_NONCONSISTENT) != 0; + + if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) { + if (atomic_cmpset_rel_32(&mtx->m_owner, id, noncst ? + UMUTEX_RB_NOTRECOV : UMUTEX_UNOWNED)) + return (0); + return (__thr_umutex_unlock(mtx, id)); } - if (atomic_cmpset_rel_32(&mtx->m_owner, id, UMUTEX_UNOWNED)) - return (0); - return (__thr_umutex_unlock(mtx, id)); + + do { + owner = mtx->m_owner; + if (__predict_false((owner & ~UMUTEX_CONTESTED) != id)) + return (EPERM); + } while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner, owner, + noncst ? UMUTEX_RB_NOTRECOV : UMUTEX_UNOWNED))); + if ((owner & UMUTEX_CONTESTED) != 0) { + if (defer == NULL || noncst) + (void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2, + flags, 0, 0); + else + *defer = 1; + } + return (0); } static inline int _thr_umutex_unlock(struct umutex *mtx, uint32_t id) { - return _thr_umutex_unlock2(mtx, id, NULL); + + return (_thr_umutex_unlock2(mtx, id, NULL)); } static inline int _thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags) { - int32_t state; - int32_t wrflags; + int32_t state, wrflags; - if (flags & URWLOCK_PREFER_READER || rwlock->rw_flags & URWLOCK_PREFER_READER) + if ((flags & URWLOCK_PREFER_READER) != 0 || + (rwlock->rw_flags & URWLOCK_PREFER_READER) != 0) wrflags = URWLOCK_WRITE_OWNER; else wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS; state = rwlock->rw_state; while (!(state & wrflags)) { - if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) + if (__predict_false(URWLOCK_READER_COUNT(state) == + URWLOCK_MAX_READERS)) return (EAGAIN); if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1)) return (0); @@ -179,8 +210,10 @@ _thr_rwlock_trywrlock(struct urwlock *rwlock) int32_t state; state = rwlock->rw_state; - while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) { - if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER)) + while ((state & URWLOCK_WRITE_OWNER) == 0 && + URWLOCK_READER_COUNT(state) == 0) { + if (atomic_cmpset_acq_32(&rwlock->rw_state, state, + state | URWLOCK_WRITE_OWNER)) return (0); state = rwlock->rw_state; } @@ -191,6 +224,7 @@ _thr_rwlock_trywrlock(struct urwlock *rwlock) static inline int _thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) { + if (_thr_rwlock_tryrdlock(rwlock, flags) == 0) return (0); return (__thr_rwlock_rdlock(rwlock, flags, tsp)); @@ -199,6 +233,7 @@ _thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) static inline int _thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) { + if (_thr_rwlock_trywrlock(rwlock) == 0) return (0); return (__thr_rwlock_wrlock(rwlock, tsp)); @@ -210,18 +245,19 @@ _thr_rwlock_unlock(struct urwlock *rwlock) int32_t state; state = rwlock->rw_state; - if (state & URWLOCK_WRITE_OWNER) { - if (atomic_cmpset_rel_32(&rwlock->rw_state, URWLOCK_WRITE_OWNER, 0)) + if ((state & URWLOCK_WRITE_OWNER) != 0) { + if (atomic_cmpset_rel_32(&rwlock->rw_state, + URWLOCK_WRITE_OWNER, 0)) return (0); } else { for (;;) { if (__predict_false(URWLOCK_READER_COUNT(state) == 0)) return (EPERM); if (!((state & (URWLOCK_WRITE_WAITERS | - URWLOCK_READ_WAITERS)) && + URWLOCK_READ_WAITERS)) != 0 && URWLOCK_READER_COUNT(state) == 1)) { if (atomic_cmpset_rel_32(&rwlock->rw_state, - state, state-1)) + state, state - 1)) return (0); state = rwlock->rw_state; } else { |