summaryrefslogtreecommitdiffstats
path: root/contrib/jemalloc/src/jemalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/jemalloc/src/jemalloc.c')
-rw-r--r--contrib/jemalloc/src/jemalloc.c33
1 files changed, 28 insertions, 5 deletions
diff --git a/contrib/jemalloc/src/jemalloc.c b/contrib/jemalloc/src/jemalloc.c
index 665d98f..352c98e 100644
--- a/contrib/jemalloc/src/jemalloc.c
+++ b/contrib/jemalloc/src/jemalloc.c
@@ -286,12 +286,30 @@ arenas_cleanup(void *arg)
malloc_mutex_unlock(&arenas_lock);
}
+static JEMALLOC_ATTR(always_inline) void
+malloc_thread_init(void)
+{
+
+ /*
+ * TSD initialization can't be safely done as a side effect of
+ * deallocation, because it is possible for a thread to do nothing but
+ * deallocate its TLS data via free(), in which case writing to TLS
+ * would cause write-after-free memory corruption. The quarantine
+ * facility *only* gets used as a side effect of deallocation, so make
+ * a best effort attempt at initializing its TSD by hooking all
+ * allocation events.
+ */
+ if (config_fill && opt_quarantine)
+ quarantine_alloc_hook();
+}
+
static JEMALLOC_ATTR(always_inline) bool
malloc_init(void)
{
- if (malloc_initialized == false)
- return (malloc_init_hard());
+ if (malloc_initialized == false && malloc_init_hard())
+ return (true);
+ malloc_thread_init();
return (false);
}
@@ -1100,6 +1118,7 @@ je_realloc(void *ptr, size_t size)
if (size == 0) {
if (ptr != NULL) {
/* realloc(ptr, 0) is equivalent to free(p). */
+ assert(malloc_initialized || IS_INITIALIZER);
if (config_prof) {
old_size = isalloc(ptr, true);
if (config_valgrind && opt_valgrind)
@@ -1125,6 +1144,7 @@ je_realloc(void *ptr, size_t size)
if (ptr != NULL) {
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (config_prof) {
old_size = isalloc(ptr, true);
@@ -1328,6 +1348,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
size_t ret;
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (config_ivsalloc)
ret = ivsalloc(ptr, config_prof);
@@ -1502,6 +1523,7 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (arena_ind != UINT_MAX) {
arena_chunk_t *chunk;
@@ -1616,6 +1638,7 @@ je_sallocm(const void *ptr, size_t *rsize, int flags)
size_t sz;
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (config_ivsalloc)
sz = ivsalloc(ptr, config_prof);
@@ -1735,12 +1758,12 @@ _malloc_prefork(void)
/* Acquire all mutexes in a safe order. */
ctl_prefork();
+ prof_prefork();
malloc_mutex_prefork(&arenas_lock);
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_prefork(arenas[i]);
}
- prof_prefork();
chunk_prefork();
base_prefork();
huge_prefork();
@@ -1766,12 +1789,12 @@ _malloc_postfork(void)
huge_postfork_parent();
base_postfork_parent();
chunk_postfork_parent();
- prof_postfork_parent();
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_parent(arenas[i]);
}
malloc_mutex_postfork_parent(&arenas_lock);
+ prof_postfork_parent();
ctl_postfork_parent();
}
@@ -1786,12 +1809,12 @@ jemalloc_postfork_child(void)
huge_postfork_child();
base_postfork_child();
chunk_postfork_child();
- prof_postfork_child();
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_child(arenas[i]);
}
malloc_mutex_postfork_child(&arenas_lock);
+ prof_postfork_child();
ctl_postfork_child();
}
OpenPOWER on IntegriCloud