summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorjasone <jasone@FreeBSD.org>2007-11-27 03:16:44 +0000
committerjasone <jasone@FreeBSD.org>2007-11-27 03:16:44 +0000
commit21bb948195adb6636c33738cab39bb89cac41bc7 (patch)
treebc0c2019e6f4f8b5aa0d67d7e3517c626797bb0e /lib
parent2dd595aefe6b80476344fe4fc8b7934db055aaa2 (diff)
downloadFreeBSD-src-21bb948195adb6636c33738cab39bb89cac41bc7.zip
FreeBSD-src-21bb948195adb6636c33738cab39bb89cac41bc7.tar.gz
Add _pthread_mutex_init_calloc_cb() to libthr and libkse, so that malloc(3)
(part of libc) can use pthreads mutexes without causing infinite recursion during initialization.
Diffstat (limited to 'lib')
-rw-r--r--lib/libkse/kse.map1
-rw-r--r--lib/libkse/sys/lock.c7
-rw-r--r--lib/libkse/sys/lock.h2
-rw-r--r--lib/libkse/thread/thr_cond.c2
-rw-r--r--lib/libkse/thread/thr_init.c8
-rw-r--r--lib/libkse/thread/thr_kern.c10
-rw-r--r--lib/libkse/thread/thr_mutex.c36
-rw-r--r--lib/libkse/thread/thr_rtld.c2
-rw-r--r--lib/libthr/pthread.map1
-rw-r--r--lib/libthr/thread/thr_mutex.c33
10 files changed, 74 insertions, 28 deletions
diff --git a/lib/libkse/kse.map b/lib/libkse/kse.map
index e8888df..0121a9d 100644
--- a/lib/libkse/kse.map
+++ b/lib/libkse/kse.map
@@ -259,6 +259,7 @@ global:
_pthread_mutex_destroy;
_pthread_mutex_getprioceiling;
_pthread_mutex_init;
+ _pthread_mutex_init_calloc_cb;
_pthread_mutex_lock;
_pthread_mutex_setprioceiling;
_pthread_mutex_timedlock;
diff --git a/lib/libkse/sys/lock.c b/lib/libkse/sys/lock.c
index 2ac8c0c..7c111bc 100644
--- a/lib/libkse/sys/lock.c
+++ b/lib/libkse/sys/lock.c
@@ -54,11 +54,12 @@ _lock_destroy(struct lock *lck)
int
_lock_init(struct lock *lck, enum lock_type ltype,
- lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
+ lock_handler_t *waitfunc, lock_handler_t *wakeupfunc,
+ void *(calloc_cb)(size_t, size_t))
{
if (lck == NULL)
return (-1);
- else if ((lck->l_head = malloc(sizeof(struct lockreq))) == NULL)
+ else if ((lck->l_head = calloc_cb(1, sizeof(struct lockreq))) == NULL)
return (-1);
else {
lck->l_type = ltype;
@@ -80,7 +81,7 @@ _lock_reinit(struct lock *lck, enum lock_type ltype,
if (lck == NULL)
return (-1);
else if (lck->l_head == NULL)
- return (_lock_init(lck, ltype, waitfunc, wakeupfunc));
+ return (_lock_init(lck, ltype, waitfunc, wakeupfunc, calloc));
else {
lck->l_head->lr_locked = 0;
lck->l_head->lr_watcher = NULL;
diff --git a/lib/libkse/sys/lock.h b/lib/libkse/sys/lock.h
index 6102a0b..815e444 100644
--- a/lib/libkse/sys/lock.h
+++ b/lib/libkse/sys/lock.h
@@ -83,7 +83,7 @@ void _lock_acquire(struct lock *, struct lockuser *, int);
void _lock_destroy(struct lock *);
void _lock_grant(struct lock *, struct lockuser *);
int _lock_init(struct lock *, enum lock_type,
- lock_handler_t *, lock_handler_t *);
+ lock_handler_t *, lock_handler_t *, void *(size_t, size_t));
int _lock_reinit(struct lock *, enum lock_type,
lock_handler_t *, lock_handler_t *);
void _lock_release(struct lock *, struct lockuser *);
diff --git a/lib/libkse/thread/thr_cond.c b/lib/libkse/thread/thr_cond.c
index 589f4b1..8e2582a 100644
--- a/lib/libkse/thread/thr_cond.c
+++ b/lib/libkse/thread/thr_cond.c
@@ -122,7 +122,7 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
malloc(sizeof(struct pthread_cond))) == NULL) {
rval = ENOMEM;
} else if (_lock_init(&pcond->c_lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup) != 0) {
+ _thr_lock_wait, _thr_lock_wakeup, calloc) != 0) {
free(pcond);
rval = ENOMEM;
} else {
diff --git a/lib/libkse/thread/thr_init.c b/lib/libkse/thread/thr_init.c
index 5c8aac4..424378d 100644
--- a/lib/libkse/thread/thr_init.c
+++ b/lib/libkse/thread/thr_init.c
@@ -495,16 +495,16 @@ init_private(void)
* process signal mask and pending signal sets.
*/
if (_lock_init(&_thread_signal_lock, LCK_ADAPTIVE,
- _kse_lock_wait, _kse_lock_wakeup) != 0)
+ _kse_lock_wait, _kse_lock_wakeup, calloc) != 0)
PANIC("Cannot initialize _thread_signal_lock");
if (_lock_init(&_mutex_static_lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup) != 0)
+ _thr_lock_wait, _thr_lock_wakeup, calloc) != 0)
PANIC("Cannot initialize mutex static init lock");
if (_lock_init(&_rwlock_static_lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup) != 0)
+ _thr_lock_wait, _thr_lock_wakeup, calloc) != 0)
PANIC("Cannot initialize rwlock static init lock");
if (_lock_init(&_keytable_lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup) != 0)
+ _thr_lock_wait, _thr_lock_wakeup, calloc) != 0)
PANIC("Cannot initialize thread specific keytable lock");
_thr_spinlock_init();
diff --git a/lib/libkse/thread/thr_kern.c b/lib/libkse/thread/thr_kern.c
index b362fe9..cc60988 100644
--- a/lib/libkse/thread/thr_kern.c
+++ b/lib/libkse/thread/thr_kern.c
@@ -378,13 +378,13 @@ _kse_init(void)
TAILQ_INIT(&free_threadq);
TAILQ_INIT(&gc_ksegq);
if (_lock_init(&kse_lock, LCK_ADAPTIVE,
- _kse_lock_wait, _kse_lock_wakeup) != 0)
+ _kse_lock_wait, _kse_lock_wakeup, calloc) != 0)
PANIC("Unable to initialize free KSE queue lock");
if (_lock_init(&thread_lock, LCK_ADAPTIVE,
- _kse_lock_wait, _kse_lock_wakeup) != 0)
+ _kse_lock_wait, _kse_lock_wakeup, calloc) != 0)
PANIC("Unable to initialize free thread queue lock");
if (_lock_init(&_thread_list_lock, LCK_ADAPTIVE,
- _kse_lock_wait, _kse_lock_wakeup) != 0)
+ _kse_lock_wait, _kse_lock_wakeup, calloc) != 0)
PANIC("Unable to initialize thread list lock");
_pthread_mutex_init(&_tcb_mutex, NULL);
active_kse_count = 0;
@@ -2120,7 +2120,7 @@ kseg_init(struct kse_group *kseg)
{
kseg_reinit(kseg);
_lock_init(&kseg->kg_lock, LCK_ADAPTIVE, _kse_lock_wait,
- _kse_lock_wakeup);
+ _kse_lock_wakeup, calloc);
}
static void
@@ -2390,7 +2390,7 @@ _thr_alloc(struct pthread *curthread)
* enter critical region before doing this!
*/
if (_lock_init(&thread->lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup) != 0)
+ _thr_lock_wait, _thr_lock_wakeup, calloc) != 0)
PANIC("Cannot initialize thread lock");
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
_lockuser_init(&thread->lockusers[i], (void *)thread);
diff --git a/lib/libkse/thread/thr_mutex.c b/lib/libkse/thread/thr_mutex.c
index 264e01e..d8bf498 100644
--- a/lib/libkse/thread/thr_mutex.c
+++ b/lib/libkse/thread/thr_mutex.c
@@ -115,11 +115,9 @@ __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
-
-
-int
-__pthread_mutex_init(pthread_mutex_t *mutex,
- const pthread_mutexattr_t *mutex_attr)
+static int
+thr_mutex_init(pthread_mutex_t *mutex,
+ const pthread_mutexattr_t *mutex_attr, void *(calloc_cb)(size_t, size_t))
{
struct pthread_mutex *pmutex;
enum pthread_mutextype type;
@@ -163,10 +161,10 @@ __pthread_mutex_init(pthread_mutex_t *mutex,
/* Check no errors so far: */
if (ret == 0) {
if ((pmutex = (pthread_mutex_t)
- malloc(sizeof(struct pthread_mutex))) == NULL)
+ calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
ret = ENOMEM;
else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
- _thr_lock_wait, _thr_lock_wakeup) != 0) {
+ _thr_lock_wait, _thr_lock_wakeup, calloc_cb) != 0) {
free(pmutex);
*mutex = NULL;
ret = ENOMEM;
@@ -222,6 +220,14 @@ __pthread_mutex_init(pthread_mutex_t *mutex,
}
int
+__pthread_mutex_init(pthread_mutex_t *mutex,
+ const pthread_mutexattr_t *mutex_attr)
+{
+
+ return (thr_mutex_init(mutex, mutex_attr, calloc));
+}
+
+int
_pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *mutex_attr)
{
@@ -237,6 +243,22 @@ _pthread_mutex_init(pthread_mutex_t *mutex,
}
}
+/* This function is used internally by malloc. */
+int
+_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+ void *(calloc_cb)(size_t, size_t))
+{
+ static const struct pthread_mutex_attr attr = {
+ .m_type = PTHREAD_MUTEX_NORMAL,
+ .m_protocol = PTHREAD_PRIO_NONE,
+ .m_ceiling = 0,
+ .m_flags = 0
+ };
+
+ return (thr_mutex_init(mutex, (pthread_mutexattr_t *)&attr,
+ calloc_cb));
+}
+
void
_thr_mutex_reinit(pthread_mutex_t *mutex)
{
diff --git a/lib/libkse/thread/thr_rtld.c b/lib/libkse/thread/thr_rtld.c
index e813073..acb92a3 100644
--- a/lib/libkse/thread/thr_rtld.c
+++ b/lib/libkse/thread/thr_rtld.c
@@ -162,7 +162,7 @@ _thr_rtld_lock_create(void)
if ((l = malloc(sizeof(struct rtld_kse_lock))) != NULL) {
_lock_init(&l->lck, LCK_ADAPTIVE, _kse_lock_wait,
- _kse_lock_wakeup);
+ _kse_lock_wakeup, calloc);
l->owner = NULL;
l->count = 0;
l->write = 0;
diff --git a/lib/libthr/pthread.map b/lib/libthr/pthread.map
index 1995fda..92be295 100644
--- a/lib/libthr/pthread.map
+++ b/lib/libthr/pthread.map
@@ -285,6 +285,7 @@ global:
_pthread_mutex_destroy;
_pthread_mutex_getprioceiling;
_pthread_mutex_init;
+ _pthread_mutex_init_calloc_cb;
_pthread_mutex_lock;
_pthread_mutex_setprioceiling;
_pthread_mutex_timedlock;
diff --git a/lib/libthr/thread/thr_mutex.c b/lib/libthr/thread/thr_mutex.c
index 1337ae0..35b6352 100644
--- a/lib/libthr/thread/thr_mutex.c
+++ b/lib/libthr/thread/thr_mutex.c
@@ -100,7 +100,8 @@ __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
static int
mutex_init(pthread_mutex_t *mutex,
- const pthread_mutexattr_t *mutex_attr, int private)
+ const pthread_mutexattr_t *mutex_attr, int private,
+ void *(calloc_cb)(size_t, size_t))
{
const struct pthread_mutex_attr *attr;
struct pthread_mutex *pmutex;
@@ -117,7 +118,7 @@ mutex_init(pthread_mutex_t *mutex,
return (EINVAL);
}
if ((pmutex = (pthread_mutex_t)
- calloc(1, sizeof(struct pthread_mutex))) == NULL)
+ calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
return (ENOMEM);
pmutex->m_type = attr->m_type;
@@ -154,7 +155,7 @@ init_static(struct pthread *thread, pthread_mutex_t *mutex)
THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
if (*mutex == NULL)
- ret = mutex_init(mutex, NULL, 0);
+ ret = mutex_init(mutex, NULL, 0, calloc);
else
ret = 0;
@@ -171,7 +172,7 @@ init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
if (*mutex == NULL)
- ret = mutex_init(mutex, NULL, 1);
+ ret = mutex_init(mutex, NULL, 1, calloc);
else
ret = 0;
@@ -196,14 +197,34 @@ int
_pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *mutex_attr)
{
- return mutex_init(mutex, mutex_attr, 1);
+ return mutex_init(mutex, mutex_attr, 1, calloc);
}
int
__pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *mutex_attr)
{
- return mutex_init(mutex, mutex_attr, 0);
+ return mutex_init(mutex, mutex_attr, 0, calloc);
+}
+
+/* This function is used internally by malloc. */
+int
+_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+ void *(calloc_cb)(size_t, size_t))
+{
+/* XXX Enable adaptive locking if similar code is removed from malloc. */
+#if 0
+ static const struct pthread_mutex_attr attr = {
+ .m_type = PTHREAD_MUTEX_ADAPTIVE_NP,
+ .m_protocol = PTHREAD_PRIO_NONE,
+ .m_ceiling = 0,
+ .m_flags = 0
+ };
+
+ return mutex_init(mutex, (pthread_mutexattr_t *)&attr, 0, calloc_cb);
+#else
+ return mutex_init(mutex, NULL, 0, calloc_cb);
+#endif
}
void
OpenPOWER on IntegriCloud