summaryrefslogtreecommitdiffstats
path: root/lib/libkse/thread
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libkse/thread')
-rw-r--r--lib/libkse/thread/thr_cond.c2
-rw-r--r--lib/libkse/thread/thr_init.c8
-rw-r--r--lib/libkse/thread/thr_kern.c10
-rw-r--r--lib/libkse/thread/thr_mutex.c36
-rw-r--r--lib/libkse/thread/thr_rtld.c2
5 files changed, 40 insertions, 18 deletions
diff --git a/lib/libkse/thread/thr_cond.c b/lib/libkse/thread/thr_cond.c
index 589f4b1..8e2582a 100644
--- a/lib/libkse/thread/thr_cond.c
+++ b/lib/libkse/thread/thr_cond.c
@@ -122,7 +122,7 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
malloc(sizeof(struct pthread_cond))) == NULL) {
rval = ENOMEM;
} else if (_lock_init(&pcond->c_lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup) != 0) {
+ _thr_lock_wait, _thr_lock_wakeup, calloc) != 0) {
free(pcond);
rval = ENOMEM;
} else {
diff --git a/lib/libkse/thread/thr_init.c b/lib/libkse/thread/thr_init.c
index 5c8aac4..424378d 100644
--- a/lib/libkse/thread/thr_init.c
+++ b/lib/libkse/thread/thr_init.c
@@ -495,16 +495,16 @@ init_private(void)
* process signal mask and pending signal sets.
*/
if (_lock_init(&_thread_signal_lock, LCK_ADAPTIVE,
- _kse_lock_wait, _kse_lock_wakeup) != 0)
+ _kse_lock_wait, _kse_lock_wakeup, calloc) != 0)
PANIC("Cannot initialize _thread_signal_lock");
if (_lock_init(&_mutex_static_lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup) != 0)
+ _thr_lock_wait, _thr_lock_wakeup, calloc) != 0)
PANIC("Cannot initialize mutex static init lock");
if (_lock_init(&_rwlock_static_lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup) != 0)
+ _thr_lock_wait, _thr_lock_wakeup, calloc) != 0)
PANIC("Cannot initialize rwlock static init lock");
if (_lock_init(&_keytable_lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup) != 0)
+ _thr_lock_wait, _thr_lock_wakeup, calloc) != 0)
PANIC("Cannot initialize thread specific keytable lock");
_thr_spinlock_init();
diff --git a/lib/libkse/thread/thr_kern.c b/lib/libkse/thread/thr_kern.c
index b362fe9..cc60988 100644
--- a/lib/libkse/thread/thr_kern.c
+++ b/lib/libkse/thread/thr_kern.c
@@ -378,13 +378,13 @@ _kse_init(void)
TAILQ_INIT(&free_threadq);
TAILQ_INIT(&gc_ksegq);
if (_lock_init(&kse_lock, LCK_ADAPTIVE,
- _kse_lock_wait, _kse_lock_wakeup) != 0)
+ _kse_lock_wait, _kse_lock_wakeup, calloc) != 0)
PANIC("Unable to initialize free KSE queue lock");
if (_lock_init(&thread_lock, LCK_ADAPTIVE,
- _kse_lock_wait, _kse_lock_wakeup) != 0)
+ _kse_lock_wait, _kse_lock_wakeup, calloc) != 0)
PANIC("Unable to initialize free thread queue lock");
if (_lock_init(&_thread_list_lock, LCK_ADAPTIVE,
- _kse_lock_wait, _kse_lock_wakeup) != 0)
+ _kse_lock_wait, _kse_lock_wakeup, calloc) != 0)
PANIC("Unable to initialize thread list lock");
_pthread_mutex_init(&_tcb_mutex, NULL);
active_kse_count = 0;
@@ -2120,7 +2120,7 @@ kseg_init(struct kse_group *kseg)
{
kseg_reinit(kseg);
_lock_init(&kseg->kg_lock, LCK_ADAPTIVE, _kse_lock_wait,
- _kse_lock_wakeup);
+ _kse_lock_wakeup, calloc);
}
static void
@@ -2390,7 +2390,7 @@ _thr_alloc(struct pthread *curthread)
* enter critical region before doing this!
*/
if (_lock_init(&thread->lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup) != 0)
+ _thr_lock_wait, _thr_lock_wakeup, calloc) != 0)
PANIC("Cannot initialize thread lock");
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
_lockuser_init(&thread->lockusers[i], (void *)thread);
diff --git a/lib/libkse/thread/thr_mutex.c b/lib/libkse/thread/thr_mutex.c
index 264e01e..d8bf498 100644
--- a/lib/libkse/thread/thr_mutex.c
+++ b/lib/libkse/thread/thr_mutex.c
@@ -115,11 +115,9 @@ __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
-
-
-int
-__pthread_mutex_init(pthread_mutex_t *mutex,
- const pthread_mutexattr_t *mutex_attr)
+static int
+thr_mutex_init(pthread_mutex_t *mutex,
+ const pthread_mutexattr_t *mutex_attr, void *(calloc_cb)(size_t, size_t))
{
struct pthread_mutex *pmutex;
enum pthread_mutextype type;
@@ -163,10 +161,10 @@ __pthread_mutex_init(pthread_mutex_t *mutex,
/* Check no errors so far: */
if (ret == 0) {
if ((pmutex = (pthread_mutex_t)
- malloc(sizeof(struct pthread_mutex))) == NULL)
+ calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
ret = ENOMEM;
else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup) != 0) {
+ _thr_lock_wait, _thr_lock_wakeup, calloc_cb) != 0) {
free(pmutex);
*mutex = NULL;
ret = ENOMEM;
@@ -222,6 +220,14 @@ __pthread_mutex_init(pthread_mutex_t *mutex,
}
int
+__pthread_mutex_init(pthread_mutex_t *mutex,
+ const pthread_mutexattr_t *mutex_attr)
+{
+
+ return (thr_mutex_init(mutex, mutex_attr, calloc));
+}
+
+int
_pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *mutex_attr)
{
@@ -237,6 +243,22 @@ _pthread_mutex_init(pthread_mutex_t *mutex,
}
}
+/* This function is used internally by malloc. */
+int
+_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+ void *(calloc_cb)(size_t, size_t))
+{
+ static const struct pthread_mutex_attr attr = {
+ .m_type = PTHREAD_MUTEX_NORMAL,
+ .m_protocol = PTHREAD_PRIO_NONE,
+ .m_ceiling = 0,
+ .m_flags = 0
+ };
+
+ return (thr_mutex_init(mutex, (pthread_mutexattr_t *)&attr,
+ calloc_cb));
+}
+
void
_thr_mutex_reinit(pthread_mutex_t *mutex)
{
diff --git a/lib/libkse/thread/thr_rtld.c b/lib/libkse/thread/thr_rtld.c
index e813073..acb92a3 100644
--- a/lib/libkse/thread/thr_rtld.c
+++ b/lib/libkse/thread/thr_rtld.c
@@ -162,7 +162,7 @@ _thr_rtld_lock_create(void)
if ((l = malloc(sizeof(struct rtld_kse_lock))) != NULL) {
_lock_init(&l->lck, LCK_ADAPTIVE, _kse_lock_wait,
- _kse_lock_wakeup);
+ _kse_lock_wakeup, calloc);
l->owner = NULL;
l->count = 0;
l->write = 0;
OpenPOWER on IntegriCloud