summaryrefslogtreecommitdiffstats
path: root/lib/libthr/thread/thr_mutex.c
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2016-02-28 17:52:33 +0000
committerkib <kib@FreeBSD.org>2016-02-28 17:52:33 +0000
commite76eb4255b957aa73f6228dd8d525d1946e3707d (patch)
tree93354adb0a612a635964c8498072087760a0f93b /lib/libthr/thread/thr_mutex.c
parent800b1f3198ded0c65c024ea0cef1f44d4bc59fed (diff)
downloadFreeBSD-src-e76eb4255b957aa73f6228dd8d525d1946e3707d.zip
FreeBSD-src-e76eb4255b957aa73f6228dd8d525d1946e3707d.tar.gz
Implement process-shared locks support for libthr.so.3, without
breaking the ABI. Special value is stored in the lock pointer to indicate shared lock, and offline page in the shared memory is allocated to store the actual lock. Reviewed by: vangyzen (previous version) Discussed with: deischen, emaste, jhb, rwatson, Martin Simmons <martin@lispworks.com> Tested by: pho Sponsored by: The FreeBSD Foundation
Diffstat (limited to 'lib/libthr/thread/thr_mutex.c')
-rw-r--r--lib/libthr/thread/thr_mutex.c516
1 files changed, 338 insertions, 178 deletions
diff --git a/lib/libthr/thread/thr_mutex.c b/lib/libthr/thread/thr_mutex.c
index 26e8776..30a8be2 100644
--- a/lib/libthr/thread/thr_mutex.c
+++ b/lib/libthr/thread/thr_mutex.c
@@ -1,8 +1,13 @@
/*
* Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
* Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
+ * Copyright (c) 2015 The FreeBSD Foundation
+ *
* All rights reserved.
*
+ * Portions of this software were developed by Konstantin Belousov
+ * under sponsorship from the FreeBSD Foundation.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -45,26 +50,6 @@
#include "thr_private.h"
-#if defined(_PTHREADS_INVARIANTS)
-#define MUTEX_INIT_LINK(m) do { \
- (m)->m_qe.tqe_prev = NULL; \
- (m)->m_qe.tqe_next = NULL; \
-} while (0)
-#define MUTEX_ASSERT_IS_OWNED(m) do { \
- if (__predict_false((m)->m_qe.tqe_prev == NULL))\
- PANIC("mutex is not on list"); \
-} while (0)
-#define MUTEX_ASSERT_NOT_OWNED(m) do { \
- if (__predict_false((m)->m_qe.tqe_prev != NULL || \
- (m)->m_qe.tqe_next != NULL)) \
- PANIC("mutex is on list"); \
-} while (0)
-#else
-#define MUTEX_INIT_LINK(m)
-#define MUTEX_ASSERT_IS_OWNED(m)
-#define MUTEX_ASSERT_NOT_OWNED(m)
-#endif
-
/*
* For adaptive mutexes, how many times to spin doing trylock2
* before entering the kernel to block
@@ -122,36 +107,71 @@ __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloop
__weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
__weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
+static void
+mutex_init_link(struct pthread_mutex *m)
+{
+
+#if defined(_PTHREADS_INVARIANTS)
+ m->m_qe.tqe_prev = NULL;
+ m->m_qe.tqe_next = NULL;
+ m->m_pqe.tqe_prev = NULL;
+ m->m_pqe.tqe_next = NULL;
+#endif
+}
+
+static void
+mutex_assert_is_owned(struct pthread_mutex *m)
+{
+
+#if defined(_PTHREADS_INVARIANTS)
+ if (__predict_false(m->m_qe.tqe_prev == NULL))
+ PANIC("mutex is not on list");
+#endif
+}
+
+static void
+mutex_assert_not_owned(struct pthread_mutex *m)
+{
+
+#if defined(_PTHREADS_INVARIANTS)
+ if (__predict_false(m->m_qe.tqe_prev != NULL ||
+ m->m_qe.tqe_next != NULL))
+ PANIC("mutex is on list");
+#endif
+}
+
static int
-mutex_init(pthread_mutex_t *mutex,
- const struct pthread_mutex_attr *mutex_attr,
- void *(calloc_cb)(size_t, size_t))
+is_pshared_mutex(struct pthread_mutex *m)
{
- const struct pthread_mutex_attr *attr;
- struct pthread_mutex *pmutex;
- if (mutex_attr == NULL) {
- attr = &_pthread_mutexattr_default;
- } else {
- attr = mutex_attr;
- if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
- attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
- return (EINVAL);
- if (attr->m_protocol < PTHREAD_PRIO_NONE ||
- attr->m_protocol > PTHREAD_PRIO_PROTECT)
- return (EINVAL);
- }
- if ((pmutex = (pthread_mutex_t)
- calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
- return (ENOMEM);
+ return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0);
+}
+
+static int
+mutex_check_attr(const struct pthread_mutex_attr *attr)
+{
+
+ if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
+ attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
+ return (EINVAL);
+ if (attr->m_protocol < PTHREAD_PRIO_NONE ||
+ attr->m_protocol > PTHREAD_PRIO_PROTECT)
+ return (EINVAL);
+ return (0);
+}
+
+static void
+mutex_init_body(struct pthread_mutex *pmutex,
+ const struct pthread_mutex_attr *attr)
+{
pmutex->m_flags = attr->m_type;
- pmutex->m_owner = NULL;
+ pmutex->m_owner = 0;
pmutex->m_count = 0;
pmutex->m_spinloops = 0;
pmutex->m_yieldloops = 0;
- MUTEX_INIT_LINK(pmutex);
- switch(attr->m_protocol) {
+ mutex_init_link(pmutex);
+ switch (attr->m_protocol) {
case PTHREAD_PRIO_NONE:
pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
pmutex->m_lock.m_flags = 0;
@@ -166,13 +186,37 @@ mutex_init(pthread_mutex_t *mutex,
pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
break;
}
+ if (attr->m_pshared == PTHREAD_PROCESS_SHARED)
+ pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED;
if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
pmutex->m_spinloops =
_thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
pmutex->m_yieldloops = _thr_yieldloops;
}
+}
+static int
+mutex_init(pthread_mutex_t *mutex,
+ const struct pthread_mutex_attr *mutex_attr,
+ void *(calloc_cb)(size_t, size_t))
+{
+ const struct pthread_mutex_attr *attr;
+ struct pthread_mutex *pmutex;
+ int error;
+
+ if (mutex_attr == NULL) {
+ attr = &_pthread_mutexattr_default;
+ } else {
+ attr = mutex_attr;
+ error = mutex_check_attr(attr);
+ if (error != 0)
+ return (error);
+ }
+ if ((pmutex = (pthread_mutex_t)
+ calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
+ return (ENOMEM);
+ mutex_init_body(pmutex, attr);
*mutex = pmutex;
return (0);
}
@@ -187,7 +231,8 @@ init_static(struct pthread *thread, pthread_mutex_t *mutex)
if (*mutex == THR_MUTEX_INITIALIZER)
ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
- ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc);
+ ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default,
+ calloc);
else
ret = 0;
THR_LOCK_RELEASE(thread, &_mutex_static_lock);
@@ -200,7 +245,7 @@ set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
{
struct pthread_mutex *m2;
- m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
+ m2 = TAILQ_LAST(&curthread->mq[TMQ_NORM_PP], mutex_queue);
if (m2 != NULL)
m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
else
@@ -211,7 +256,25 @@ int
__pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *mutex_attr)
{
- return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc);
+ struct pthread_mutex *pmtx;
+ int ret;
+
+ if (mutex_attr != NULL) {
+ ret = mutex_check_attr(*mutex_attr);
+ if (ret != 0)
+ return (ret);
+ }
+ if (mutex_attr == NULL ||
+ (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) {
+ return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
+ calloc));
+ }
+ pmtx = __thr_pshared_offpage(mutex, 1);
+ if (pmtx == NULL)
+ return (EFAULT);
+ *mutex = THR_PSHARED_PTR;
+ mutex_init_body(pmtx, *mutex_attr);
+ return (0);
}
/* This function is used internally by malloc. */
@@ -222,7 +285,8 @@ _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
static const struct pthread_mutex_attr attr = {
.m_type = PTHREAD_MUTEX_NORMAL,
.m_protocol = PTHREAD_PRIO_NONE,
- .m_ceiling = 0
+ .m_ceiling = 0,
+ .m_pshared = PTHREAD_PROCESS_PRIVATE,
};
int ret;
@@ -232,31 +296,44 @@ _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
return (ret);
}
-void
-_mutex_fork(struct pthread *curthread)
+/*
+ * Fix mutex ownership for child process.
+ *
+ * Process private mutex ownership is transmitted from the forking
+ * thread to the child process.
+ *
+ * Process shared mutex should not be inherited because owner is
+ * forking thread which is in parent process, they are removed from
+ * the owned mutex list.
+ */
+static void
+queue_fork(struct pthread *curthread, struct mutex_queue *q,
+ struct mutex_queue *qp, uint bit)
{
struct pthread_mutex *m;
- /*
- * Fix mutex ownership for child process.
- * note that process shared mutex should not
- * be inherited because owner is forking thread
- * which is in parent process, they should be
- * removed from the owned mutex list, current,
- * process shared mutex is not supported, so I
- * am not worried.
- */
+ TAILQ_INIT(q);
+ TAILQ_FOREACH(m, qp, m_pqe) {
+ TAILQ_INSERT_TAIL(q, m, m_qe);
+ m->m_lock.m_owner = TID(curthread) | bit;
+ m->m_owner = TID(curthread);
+ }
+}
+
+void
+_mutex_fork(struct pthread *curthread)
+{
- TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
- m->m_lock.m_owner = TID(curthread);
- TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
- m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
+ queue_fork(curthread, &curthread->mq[TMQ_NORM],
+ &curthread->mq[TMQ_NORM_PRIV], 0);
+ queue_fork(curthread, &curthread->mq[TMQ_NORM_PP],
+ &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED);
}
int
_pthread_mutex_destroy(pthread_mutex_t *mutex)
{
- pthread_mutex_t m;
+ pthread_mutex_t m, m1;
int ret;
m = *mutex;
@@ -265,11 +342,20 @@ _pthread_mutex_destroy(pthread_mutex_t *mutex)
} else if (m == THR_MUTEX_DESTROYED) {
ret = EINVAL;
} else {
- if (m->m_owner != NULL) {
+ if (m == THR_PSHARED_PTR) {
+ m1 = __thr_pshared_offpage(mutex, 0);
+ if (m1 != NULL) {
+ mutex_assert_not_owned(m1);
+ __thr_pshared_destroy(mutex);
+ }
+ *mutex = THR_MUTEX_DESTROYED;
+ return (0);
+ }
+ if (m->m_owner != 0) {
ret = EBUSY;
} else {
*mutex = THR_MUTEX_DESTROYED;
- MUTEX_ASSERT_NOT_OWNED(m);
+ mutex_assert_not_owned(m);
free(m);
ret = 0;
}
@@ -278,54 +364,87 @@ _pthread_mutex_destroy(pthread_mutex_t *mutex)
return (ret);
}
-#define ENQUEUE_MUTEX(curthread, m) \
- do { \
- (m)->m_owner = curthread; \
- /* Add to the list of owned mutexes: */ \
- MUTEX_ASSERT_NOT_OWNED((m)); \
- if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \
- TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
- else \
- TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
- } while (0)
-
-#define DEQUEUE_MUTEX(curthread, m) \
- (m)->m_owner = NULL; \
- MUTEX_ASSERT_IS_OWNED(m); \
- if (__predict_true(((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) \
- TAILQ_REMOVE(&curthread->mutexq, (m), m_qe); \
- else { \
- TAILQ_REMOVE(&curthread->pp_mutexq, (m), m_qe); \
- set_inherited_priority(curthread, m); \
- } \
- MUTEX_INIT_LINK(m);
-
-#define CHECK_AND_INIT_MUTEX \
- if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \
- if (m == THR_MUTEX_DESTROYED) \
- return (EINVAL); \
- int ret; \
- ret = init_static(_get_curthread(), mutex); \
- if (ret) \
- return (ret); \
- m = *mutex; \
- }
+static int
+mutex_qidx(struct pthread_mutex *m)
+{
+
+ if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
+ return (TMQ_NORM);
+ return (TMQ_NORM_PP);
+}
+
+static void
+enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m)
+{
+ int qidx;
+
+ m->m_owner = TID(curthread);
+ /* Add to the list of owned mutexes: */
+ mutex_assert_not_owned(m);
+ qidx = mutex_qidx(m);
+ TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe);
+ if (!is_pshared_mutex(m))
+ TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe);
+}
+
+static void
+dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
+{
+ int qidx;
+
+ m->m_owner = 0;
+ mutex_assert_is_owned(m);
+ qidx = mutex_qidx(m);
+ TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe);
+ if (!is_pshared_mutex(m))
+ TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe);
+ if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0)
+ set_inherited_priority(curthread, m);
+ mutex_init_link(m);
+}
static int
-mutex_trylock_common(pthread_mutex_t *mutex)
+check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m)
{
- struct pthread *curthread = _get_curthread();
- struct pthread_mutex *m = *mutex;
+ int ret;
+
+ *m = *mutex;
+ ret = 0;
+ if (*m == THR_PSHARED_PTR) {
+ *m = __thr_pshared_offpage(mutex, 0);
+ if (*m == NULL)
+ ret = EINVAL;
+ } else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) {
+ if (*m == THR_MUTEX_DESTROYED) {
+ ret = EINVAL;
+ } else {
+ ret = init_static(_get_curthread(), mutex);
+ if (ret == 0)
+ *m = *mutex;
+ }
+ }
+ return (ret);
+}
+
+int
+__pthread_mutex_trylock(pthread_mutex_t *mutex)
+{
+ struct pthread *curthread;
+ struct pthread_mutex *m;
uint32_t id;
int ret;
+ ret = check_and_init_mutex(mutex, &m);
+ if (ret != 0)
+ return (ret);
+ curthread = _get_curthread();
id = TID(curthread);
if (m->m_flags & PMUTEX_FLAG_PRIVATE)
THR_CRITICAL_ENTER(curthread);
ret = _thr_umutex_trylock(&m->m_lock, id);
if (__predict_true(ret == 0)) {
- ENQUEUE_MUTEX(curthread, m);
- } else if (m->m_owner == curthread) {
+ enqueue_mutex(curthread, m);
+ } else if (m->m_owner == id) {
ret = mutex_self_trylock(m);
} /* else {} */
if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
@@ -333,16 +452,6 @@ mutex_trylock_common(pthread_mutex_t *mutex)
return (ret);
}
-int
-__pthread_mutex_trylock(pthread_mutex_t *mutex)
-{
- struct pthread_mutex *m;
-
- CHECK_AND_INIT_MUTEX
-
- return (mutex_trylock_common(mutex));
-}
-
static int
mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
const struct timespec *abstime)
@@ -351,10 +460,10 @@ mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
int count;
int ret;
- if (m->m_owner == curthread)
- return mutex_self_lock(m, abstime);
-
id = TID(curthread);
+ if (m->m_owner == id)
+ return (mutex_self_lock(m, abstime));
+
/*
* For adaptive mutexes, spin for a bit in the expectation
* that if the application requests this mutex type then
@@ -406,7 +515,7 @@ sleep_in_kernel:
}
done:
if (ret == 0)
- ENQUEUE_MUTEX(curthread, m);
+ enqueue_mutex(curthread, m);
return (ret);
}
@@ -421,7 +530,7 @@ mutex_lock_common(struct pthread_mutex *m,
if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
THR_CRITICAL_ENTER(curthread);
if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
- ENQUEUE_MUTEX(curthread, m);
+ enqueue_mutex(curthread, m);
ret = 0;
} else {
ret = mutex_lock_sleep(curthread, m, abstime);
@@ -434,25 +543,28 @@ mutex_lock_common(struct pthread_mutex *m,
int
__pthread_mutex_lock(pthread_mutex_t *mutex)
{
- struct pthread_mutex *m;
+ struct pthread_mutex *m;
+ int ret;
_thr_check_init();
-
- CHECK_AND_INIT_MUTEX
-
- return (mutex_lock_common(m, NULL, 0));
+ ret = check_and_init_mutex(mutex, &m);
+ if (ret == 0)
+ ret = mutex_lock_common(m, NULL, 0);
+ return (ret);
}
int
-__pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
+__pthread_mutex_timedlock(pthread_mutex_t *mutex,
+ const struct timespec *abstime)
{
- struct pthread_mutex *m;
+ struct pthread_mutex *m;
+ int ret;
_thr_check_init();
-
- CHECK_AND_INIT_MUTEX
-
- return (mutex_lock_common(m, abstime, 0));
+ ret = check_and_init_mutex(mutex, &m);
+ if (ret == 0)
+ ret = mutex_lock_common(m, abstime, 0);
+ return (ret);
}
int
@@ -460,7 +572,13 @@ _pthread_mutex_unlock(pthread_mutex_t *mutex)
{
struct pthread_mutex *mp;
- mp = *mutex;
+ if (*mutex == THR_PSHARED_PTR) {
+ mp = __thr_pshared_offpage(mutex, 0);
+ if (mp == NULL)
+ return (EINVAL);
+ } else {
+ mp = *mutex;
+ }
return (mutex_unlock_common(mp, 0, NULL));
}
@@ -493,7 +611,7 @@ _mutex_cv_attach(struct pthread_mutex *m, int count)
{
struct pthread *curthread = _get_curthread();
- ENQUEUE_MUTEX(curthread, m);
+ enqueue_mutex(curthread, m);
m->m_count = count;
return (0);
}
@@ -513,7 +631,7 @@ _mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
*/
*recurse = mp->m_count;
mp->m_count = 0;
- DEQUEUE_MUTEX(curthread, mp);
+ dequeue_mutex(curthread, mp);
/* Will this happen in real-world ? */
if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
@@ -641,14 +759,15 @@ mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
return (EPERM);
}
+ id = TID(curthread);
+
/*
* Check if the running thread is not the owner of the mutex.
*/
- if (__predict_false(m->m_owner != curthread))
+ if (__predict_false(m->m_owner != id))
return (EPERM);
error = 0;
- id = TID(curthread);
if (__predict_false(
PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE &&
m->m_count > 0)) {
@@ -660,7 +779,7 @@ mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
} else
defered = 0;
- DEQUEUE_MUTEX(curthread, m);
+ dequeue_mutex(curthread, m);
error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
if (mtx_defer == NULL && defered) {
@@ -676,54 +795,85 @@ mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
int
_pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
- int *prioceiling)
+ int *prioceiling)
{
struct pthread_mutex *m;
- int ret;
- m = *mutex;
- if ((m <= THR_MUTEX_DESTROYED) ||
- (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
- ret = EINVAL;
- else {
- *prioceiling = m->m_lock.m_ceilings[0];
- ret = 0;
+ if (*mutex == THR_PSHARED_PTR) {
+ m = __thr_pshared_offpage(mutex, 0);
+ if (m == NULL)
+ return (EINVAL);
+ } else {
+ m = *mutex;
+ if (m <= THR_MUTEX_DESTROYED)
+ return (EINVAL);
}
-
- return (ret);
+ if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
+ return (EINVAL);
+ *prioceiling = m->m_lock.m_ceilings[0];
+ return (0);
}
int
_pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
- int ceiling, int *old_ceiling)
+ int ceiling, int *old_ceiling)
{
- struct pthread *curthread = _get_curthread();
+ struct pthread *curthread;
struct pthread_mutex *m, *m1, *m2;
+ struct mutex_queue *q, *qp;
int ret;
- m = *mutex;
- if ((m <= THR_MUTEX_DESTROYED) ||
- (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
+ if (*mutex == THR_PSHARED_PTR) {
+ m = __thr_pshared_offpage(mutex, 0);
+ if (m == NULL)
+ return (EINVAL);
+ } else {
+ m = *mutex;
+ if (m <= THR_MUTEX_DESTROYED)
+ return (EINVAL);
+ }
+ if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
return (EINVAL);
ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
if (ret != 0)
return (ret);
- if (m->m_owner == curthread) {
- MUTEX_ASSERT_IS_OWNED(m);
+ curthread = _get_curthread();
+ if (m->m_owner == TID(curthread)) {
+ mutex_assert_is_owned(m);
m1 = TAILQ_PREV(m, mutex_queue, m_qe);
m2 = TAILQ_NEXT(m, m_qe);
if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
(m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
- TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
- TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
+ q = &curthread->mq[TMQ_NORM_PP];
+ qp = &curthread->mq[TMQ_NORM_PP_PRIV];
+ TAILQ_REMOVE(q, m, m_qe);
+ if (!is_pshared_mutex(m))
+ TAILQ_REMOVE(qp, m, m_pqe);
+ TAILQ_FOREACH(m2, q, m_qe) {
if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
TAILQ_INSERT_BEFORE(m2, m, m_qe);
+ if (!is_pshared_mutex(m)) {
+ while (m2 != NULL &&
+ is_pshared_mutex(m2)) {
+ m2 = TAILQ_PREV(m2,
+ mutex_queue, m_qe);
+ }
+ if (m2 == NULL) {
+ TAILQ_INSERT_HEAD(qp,
+ m, m_pqe);
+ } else {
+ TAILQ_INSERT_BEFORE(m2,
+ m, m_pqe);
+ }
+ }
return (0);
}
}
- TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
+ TAILQ_INSERT_TAIL(q, m, m_qe);
+ if (!is_pshared_mutex(m))
+ TAILQ_INSERT_TAIL(qp, m, m_pqe);
}
}
return (0);
@@ -732,44 +882,48 @@ _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
int
_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
{
- struct pthread_mutex *m;
-
- CHECK_AND_INIT_MUTEX
+ struct pthread_mutex *m;
+ int ret;
- *count = m->m_spinloops;
- return (0);
+ ret = check_and_init_mutex(mutex, &m);
+ if (ret == 0)
+ *count = m->m_spinloops;
+ return (ret);
}
int
__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
{
- struct pthread_mutex *m;
-
- CHECK_AND_INIT_MUTEX
+ struct pthread_mutex *m;
+ int ret;
- m->m_spinloops = count;
- return (0);
+ ret = check_and_init_mutex(mutex, &m);
+ if (ret == 0)
+ m->m_spinloops = count;
+ return (ret);
}
int
_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
{
- struct pthread_mutex *m;
-
- CHECK_AND_INIT_MUTEX
+ struct pthread_mutex *m;
+ int ret;
- *count = m->m_yieldloops;
- return (0);
+ ret = check_and_init_mutex(mutex, &m);
+ if (ret == 0)
+ *count = m->m_yieldloops;
+ return (ret);
}
int
__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
{
- struct pthread_mutex *m;
-
- CHECK_AND_INIT_MUTEX
+ struct pthread_mutex *m;
+ int ret;
- m->m_yieldloops = count;
+ ret = check_and_init_mutex(mutex, &m);
+ if (ret == 0)
+ m->m_yieldloops = count;
return (0);
}
@@ -778,10 +932,16 @@ _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
{
struct pthread_mutex *m;
- m = *mutex;
- if (m <= THR_MUTEX_DESTROYED)
- return (0);
- return (m->m_owner == _get_curthread());
+ if (*mutex == THR_PSHARED_PTR) {
+ m = __thr_pshared_offpage(mutex, 0);
+ if (m == NULL)
+ return (0);
+ } else {
+ m = *mutex;
+ if (m <= THR_MUTEX_DESTROYED)
+ return (0);
+ }
+ return (m->m_owner == TID(_get_curthread()));
}
int
@@ -792,7 +952,7 @@ _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
return (EINVAL);
return (EPERM);
}
- if (mp->m_owner != curthread)
+ if (mp->m_owner != TID(curthread))
return (EPERM);
return (0);
}
OpenPOWER on IntegriCloud