summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorjasone <jasone@FreeBSD.org>2006-04-04 19:46:28 +0000
committerjasone <jasone@FreeBSD.org>2006-04-04 19:46:28 +0000
commitb2f560b56da603c47027a33fadcae456012269cd (patch)
treeb779dc347fe3836cf7bd5c682447b4e36d325e03 /lib
parentc95a2670388d0513559c5aa2c453fa9932f58d8d (diff)
downloadFreeBSD-src-b2f560b56da603c47027a33fadcae456012269cd.zip
FreeBSD-src-b2f560b56da603c47027a33fadcae456012269cd.tar.gz
Add init_lock, and use it to protect against allocator initialization
races. This isn't currently necessary for libpthread or libthr, but without it external threads libraries like the linuxthreads port are not safe to use. Reported by: ganbold@micom.mng.net
Diffstat (limited to 'lib')
-rw-r--r--lib/libc/stdlib/malloc.c29
1 files changed, 21 insertions, 8 deletions
diff --git a/lib/libc/stdlib/malloc.c b/lib/libc/stdlib/malloc.c
index 86c22fe..a765af9 100644
--- a/lib/libc/stdlib/malloc.c
+++ b/lib/libc/stdlib/malloc.c
@@ -350,8 +350,12 @@ typedef struct {
spinlock_t lock;
} malloc_mutex_t;
+/* Set to true once the allocator has been initialized. */
static bool malloc_initialized = false;
+/* Used to avoid initialization races. */
+static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
+
/******************************************************************************/
/*
* Statistics data structures.
@@ -2969,12 +2973,6 @@ static inline bool
malloc_init(void)
{
- /*
- * We always initialize before threads are created, since any thread
- * creation first triggers allocations.
- */
- assert(__isthreaded == 0 || malloc_initialized);
-
if (malloc_initialized == false)
return (malloc_init_hard());
@@ -2989,6 +2987,16 @@ malloc_init_hard(void)
char buf[PATH_MAX + 1];
const char *opts;
+ malloc_mutex_lock(&init_lock);
+ if (malloc_initialized) {
+ /*
+ * Another thread initialized the allocator before this one
+ * acquired init_lock.
+ */
+ malloc_mutex_unlock(&init_lock);
+ return (false);
+ }
+
/* Get number of CPUs. */
{
int mib[2];
@@ -3339,8 +3347,10 @@ malloc_init_hard(void)
/* Allocate and initialize arenas. */
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
- if (arenas == NULL)
+ if (arenas == NULL) {
+ malloc_mutex_unlock(&init_lock);
return (true);
+ }
/*
* Zero the array. In practice, this should always be pre-zeroed,
* since it was just mmap()ed, but let's be sure.
@@ -3352,12 +3362,15 @@ malloc_init_hard(void)
* arena_choose_hard().
*/
arenas_extend(0);
- if (arenas[0] == NULL)
+ if (arenas[0] == NULL) {
+ malloc_mutex_unlock(&init_lock);
return (true);
+ }
malloc_mutex_init(&arenas_mtx);
malloc_initialized = true;
+ malloc_mutex_unlock(&init_lock);
return (false);
}
OpenPOWER on IntegriCloud