summaryrefslogtreecommitdiffstats
path: root/contrib/jemalloc/src
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/jemalloc/src')
-rw-r--r--contrib/jemalloc/src/arena.c77
-rw-r--r--contrib/jemalloc/src/chunk.c35
-rw-r--r--contrib/jemalloc/src/chunk_dss.c1
-rw-r--r--contrib/jemalloc/src/ckh.c86
-rw-r--r--contrib/jemalloc/src/ctl.c20
-rw-r--r--contrib/jemalloc/src/jemalloc.c69
-rw-r--r--contrib/jemalloc/src/prof.c34
-rw-r--r--contrib/jemalloc/src/tcache.c9
8 files changed, 126 insertions, 205 deletions
diff --git a/contrib/jemalloc/src/arena.c b/contrib/jemalloc/src/arena.c
index 0c53b07..8d50f4d 100644
--- a/contrib/jemalloc/src/arena.c
+++ b/contrib/jemalloc/src/arena.c
@@ -359,13 +359,29 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
}
static inline void
-arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
+arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
+{
+
+ VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), (npages << LG_PAGE));
+ memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
+ (npages << LG_PAGE));
+ VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), (npages << LG_PAGE));
+}
+
+static inline void
+arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
size_t i;
UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
+ VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), PAGE);
for (i = 0; i < PAGE / sizeof(size_t); i++)
assert(p[i] == 0);
+ VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), PAGE);
}
static void
@@ -441,19 +457,10 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
for (i = 0; i < need_pages; i++) {
if (arena_mapbits_unzeroed_get(chunk,
run_ind+i) != 0) {
- VALGRIND_MAKE_MEM_UNDEFINED(
- (void *)((uintptr_t)
- chunk + ((run_ind+i) <<
- LG_PAGE)), PAGE);
- memset((void *)((uintptr_t)
- chunk + ((run_ind+i) <<
- LG_PAGE)), 0, PAGE);
+ arena_run_zero(chunk, run_ind+i,
+ 1);
} else if (config_debug) {
- VALGRIND_MAKE_MEM_DEFINED(
- (void *)((uintptr_t)
- chunk + ((run_ind+i) <<
- LG_PAGE)), PAGE);
- arena_chunk_validate_zeroed(
+ arena_run_page_validate_zeroed(
chunk, run_ind+i);
}
}
@@ -462,11 +469,7 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
* The run is dirty, so all pages must be
* zeroed.
*/
- VALGRIND_MAKE_MEM_UNDEFINED((void
- *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), (need_pages << LG_PAGE));
- memset((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), 0, (need_pages << LG_PAGE));
+ arena_run_zero(chunk, run_ind, need_pages);
}
}
@@ -492,19 +495,21 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
*/
if (config_debug && flag_dirty == 0 &&
arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
- arena_chunk_validate_zeroed(chunk, run_ind);
+ arena_run_page_validate_zeroed(chunk, run_ind);
for (i = 1; i < need_pages - 1; i++) {
arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
if (config_debug && flag_dirty == 0 &&
- arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
- arena_chunk_validate_zeroed(chunk, run_ind+i);
+ arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) {
+ arena_run_page_validate_zeroed(chunk,
+ run_ind+i);
+ }
}
arena_mapbits_small_set(chunk, run_ind+need_pages-1,
need_pages-1, binind, flag_dirty);
if (config_debug && flag_dirty == 0 &&
arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
0) {
- arena_chunk_validate_zeroed(chunk,
+ arena_run_page_validate_zeroed(chunk,
run_ind+need_pages-1);
}
}
@@ -1322,21 +1327,6 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
}
void
-arena_prof_accum(arena_t *arena, uint64_t accumbytes)
-{
-
- cassert(config_prof);
-
- if (config_prof && prof_interval != 0) {
- arena->prof_accumbytes += accumbytes;
- if (arena->prof_accumbytes >= prof_interval) {
- prof_idump();
- arena->prof_accumbytes -= prof_interval;
- }
- }
-}
-
-void
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
uint64_t prof_accumbytes)
{
@@ -1347,11 +1337,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
assert(tbin->ncached == 0);
- if (config_prof) {
- malloc_mutex_lock(&arena->lock);
+ if (config_prof)
arena_prof_accum(arena, prof_accumbytes);
- malloc_mutex_unlock(&arena->lock);
- }
bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
@@ -1459,11 +1446,8 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
bin->stats.nrequests++;
}
malloc_mutex_unlock(&bin->lock);
- if (config_prof && isthreaded == false) {
- malloc_mutex_lock(&arena->lock);
+ if (config_prof && isthreaded == false)
arena_prof_accum(arena, size);
- malloc_mutex_unlock(&arena->lock);
- }
if (zero == false) {
if (config_fill) {
@@ -1480,6 +1464,7 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
return (ret);
@@ -1507,7 +1492,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
}
if (config_prof)
- arena_prof_accum(arena, size);
+ arena_prof_accum_locked(arena, size);
malloc_mutex_unlock(&arena->lock);
if (zero == false) {
diff --git a/contrib/jemalloc/src/chunk.c b/contrib/jemalloc/src/chunk.c
index 1a3bb4f6..46e387e 100644
--- a/contrib/jemalloc/src/chunk.c
+++ b/contrib/jemalloc/src/chunk.c
@@ -78,6 +78,9 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
assert(node->size >= leadsize + size);
trailsize = node->size - leadsize - size;
ret = (void *)((uintptr_t)node->addr + leadsize);
+ zeroed = node->zeroed;
+ if (zeroed)
+ *zero = true;
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
@@ -114,17 +117,21 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
}
malloc_mutex_unlock(&chunks_mtx);
- zeroed = false;
- if (node != NULL) {
- if (node->zeroed) {
- zeroed = true;
- *zero = true;
- }
+ if (node != NULL)
base_node_dealloc(node);
- }
- if (zeroed == false && *zero) {
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- memset(ret, 0, size);
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ if (*zero) {
+ if (zeroed == false)
+ memset(ret, 0, size);
+ else if (config_debug) {
+ size_t i;
+ size_t *p = (size_t *)(uintptr_t)ret;
+
+ VALGRIND_MAKE_MEM_DEFINED(ret, size);
+ for (i = 0; i < size / sizeof(size_t); i++)
+ assert(p[i] == 0);
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ }
}
return (ret);
}
@@ -194,14 +201,6 @@ label_return:
if (config_prof && opt_prof && opt_prof_gdump && gdump)
prof_gdump();
}
- if (config_debug && *zero && ret != NULL) {
- size_t i;
- size_t *p = (size_t *)(uintptr_t)ret;
-
- VALGRIND_MAKE_MEM_DEFINED(ret, size);
- for (i = 0; i < size / sizeof(size_t); i++)
- assert(p[i] == 0);
- }
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
}
diff --git a/contrib/jemalloc/src/chunk_dss.c b/contrib/jemalloc/src/chunk_dss.c
index 24781cc..d1aea93 100644
--- a/contrib/jemalloc/src/chunk_dss.c
+++ b/contrib/jemalloc/src/chunk_dss.c
@@ -127,6 +127,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
return (ret);
}
diff --git a/contrib/jemalloc/src/ckh.c b/contrib/jemalloc/src/ckh.c
index 742a950..2f38348 100644
--- a/contrib/jemalloc/src/ckh.c
+++ b/contrib/jemalloc/src/ckh.c
@@ -70,20 +70,20 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
JEMALLOC_INLINE size_t
ckh_isearch(ckh_t *ckh, const void *key)
{
- size_t hash1, hash2, bucket, cell;
+ size_t hashes[2], bucket, cell;
assert(ckh != NULL);
- ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
+ ckh->hash(key, hashes);
/* Search primary bucket. */
- bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
if (cell != SIZE_T_MAX)
return (cell);
/* Search secondary bucket. */
- bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
return (cell);
}
@@ -126,7 +126,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
{
const void *key, *data, *tkey, *tdata;
ckhc_t *cell;
- size_t hash1, hash2, bucket, tbucket;
+ size_t hashes[2], bucket, tbucket;
unsigned i;
bucket = argbucket;
@@ -155,10 +155,11 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
#endif
/* Find the alternate bucket for the evicted item. */
- ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
- tbucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ ckh->hash(key, hashes);
+ tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (tbucket == bucket) {
- tbucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
+ - 1);
/*
* It may be that (tbucket == bucket) still, if the
* item's hashes both indicate this bucket. However,
@@ -192,19 +193,19 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
JEMALLOC_INLINE bool
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
{
- size_t hash1, hash2, bucket;
+ size_t hashes[2], bucket;
const void *key = *argkey;
const void *data = *argdata;
- ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
+ ckh->hash(key, hashes);
/* Try to insert in primary bucket. */
- bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
/* Try to insert in secondary bucket. */
- bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
@@ -417,9 +418,8 @@ ckh_delete(ckh_t *ckh)
#endif
idalloc(ckh->tab);
-#ifdef JEMALLOC_DEBUG
- memset(ckh, 0x5a, sizeof(ckh_t));
-#endif
+ if (config_debug)
+ memset(ckh, 0x5a, sizeof(ckh_t));
}
size_t
@@ -526,31 +526,10 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
}
void
-ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
+ckh_string_hash(const void *key, size_t r_hash[2])
{
- size_t ret1, ret2;
- uint64_t h;
-
- assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
- assert(hash1 != NULL);
- assert(hash2 != NULL);
-
- h = hash(key, strlen((const char *)key), UINT64_C(0x94122f335b332aea));
- if (minbits <= 32) {
- /*
- * Avoid doing multiple hashes, since a single hash provides
- * enough bits.
- */
- ret1 = h & ZU(0xffffffffU);
- ret2 = h >> 32;
- } else {
- ret1 = h;
- ret2 = hash(key, strlen((const char *)key),
- UINT64_C(0x8432a476666bbc13));
- }
- *hash1 = ret1;
- *hash2 = ret2;
+ hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
}
bool
@@ -564,41 +543,16 @@ ckh_string_keycomp(const void *k1, const void *k2)
}
void
-ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
- size_t *hash2)
+ckh_pointer_hash(const void *key, size_t r_hash[2])
{
- size_t ret1, ret2;
- uint64_t h;
union {
const void *v;
- uint64_t i;
+ size_t i;
} u;
- assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
- assert(hash1 != NULL);
- assert(hash2 != NULL);
-
assert(sizeof(u.v) == sizeof(u.i));
-#if (LG_SIZEOF_PTR != LG_SIZEOF_INT)
- u.i = 0;
-#endif
u.v = key;
- h = hash(&u.i, sizeof(u.i), UINT64_C(0xd983396e68886082));
- if (minbits <= 32) {
- /*
- * Avoid doing multiple hashes, since a single hash provides
- * enough bits.
- */
- ret1 = h & ZU(0xffffffffU);
- ret2 = h >> 32;
- } else {
- assert(SIZEOF_PTR == 8);
- ret1 = h;
- ret2 = hash(&u.i, sizeof(u.i), UINT64_C(0x5e2be9aff8709a5d));
- }
-
- *hash1 = ret1;
- *hash2 = ret2;
+ hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
}
bool
diff --git a/contrib/jemalloc/src/ctl.c b/contrib/jemalloc/src/ctl.c
index 6e01b1e..f2ef4e6 100644
--- a/contrib/jemalloc/src/ctl.c
+++ b/contrib/jemalloc/src/ctl.c
@@ -960,11 +960,11 @@ ctl_postfork_child(void)
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
? sizeof(t) : *oldlenp; \
- memcpy(oldp, (void *)&v, copylen); \
+ memcpy(oldp, (void *)&(v), copylen); \
ret = EINVAL; \
goto label_return; \
} else \
- *(t *)oldp = v; \
+ *(t *)oldp = (v); \
} \
} while (0)
@@ -974,7 +974,7 @@ ctl_postfork_child(void)
ret = EINVAL; \
goto label_return; \
} \
- v = *(t *)newp; \
+ (v) = *(t *)newp; \
} \
} while (0)
@@ -995,7 +995,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
if (l) \
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
- oldval = v; \
+ oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
@@ -1017,7 +1017,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
return (ENOENT); \
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
- oldval = v; \
+ oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
@@ -1036,7 +1036,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
\
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
- oldval = v; \
+ oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
@@ -1060,7 +1060,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
if ((c) == false) \
return (ENOENT); \
READONLY(); \
- oldval = v; \
+ oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
@@ -1077,7 +1077,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
t oldval; \
\
READONLY(); \
- oldval = v; \
+ oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
@@ -1492,6 +1492,7 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
+ unsigned narenas;
malloc_mutex_lock(&ctl_mtx);
READONLY();
@@ -1499,7 +1500,8 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = EAGAIN;
goto label_return;
}
- READ(ctl_stats.narenas - 1, unsigned);
+ narenas = ctl_stats.narenas - 1;
+ READ(narenas, unsigned);
ret = 0;
label_return:
diff --git a/contrib/jemalloc/src/jemalloc.c b/contrib/jemalloc/src/jemalloc.c
index aaf5012..665d98f 100644
--- a/contrib/jemalloc/src/jemalloc.c
+++ b/contrib/jemalloc/src/jemalloc.c
@@ -14,17 +14,20 @@ __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
/* Runtime configuration options. */
const char *je_malloc_conf;
+bool opt_abort =
#ifdef JEMALLOC_DEBUG
-bool opt_abort = true;
-# ifdef JEMALLOC_FILL
-bool opt_junk = true;
-# else
-bool opt_junk = false;
-# endif
+ true
+#else
+ false
+#endif
+ ;
+bool opt_junk =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ true
#else
-bool opt_abort = false;
-bool opt_junk = false;
+ false
#endif
+ ;
size_t opt_quarantine = ZU(0);
bool opt_redzone = false;
bool opt_utrace = false;
@@ -87,11 +90,13 @@ typedef struct {
#ifdef JEMALLOC_UTRACE
# define UTRACE(a, b, c) do { \
if (opt_utrace) { \
+ int utrace_serrno = errno; \
malloc_utrace_t ut; \
ut.p = (a); \
ut.s = (b); \
ut.r = (c); \
utrace(&ut, sizeof(ut)); \
+ errno = utrace_serrno; \
} \
} while (0)
#else
@@ -281,7 +286,7 @@ arenas_cleanup(void *arg)
malloc_mutex_unlock(&arenas_lock);
}
-static inline bool
+static JEMALLOC_ATTR(always_inline) bool
malloc_init(void)
{
@@ -474,7 +479,7 @@ malloc_conf_init(void)
while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
&vlen) == false) {
-#define CONF_HANDLE_BOOL_HIT(o, n, hit) \
+#define CONF_HANDLE_BOOL(o, n) \
if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
if (strncmp("true", v, vlen) == 0 && \
@@ -488,16 +493,9 @@ malloc_conf_init(void)
"Invalid conf value", \
k, klen, v, vlen); \
} \
- hit = true; \
- } else \
- hit = false;
-#define CONF_HANDLE_BOOL(o, n) { \
- bool hit; \
- CONF_HANDLE_BOOL_HIT(o, n, hit); \
- if (hit) \
continue; \
-}
-#define CONF_HANDLE_SIZE_T(o, n, min, max) \
+ }
+#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
uintmax_t um; \
@@ -510,12 +508,22 @@ malloc_conf_init(void)
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
- } else if (um < min || um > max) { \
- malloc_conf_error( \
- "Out-of-range conf value", \
- k, klen, v, vlen); \
- } else \
- o = um; \
+ } else if (clip) { \
+ if (um < min) \
+ o = min; \
+ else if (um > max) \
+ o = max; \
+ else \
+ o = um; \
+ } else { \
+ if (um < min || um > max) { \
+ malloc_conf_error( \
+ "Out-of-range " \
+ "conf value", \
+ k, klen, v, vlen); \
+ } else \
+ o = um; \
+ } \
continue; \
}
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
@@ -560,7 +568,8 @@ malloc_conf_init(void)
* config_fill.
*/
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
- (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
+ (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
+ true)
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;
@@ -586,14 +595,14 @@ malloc_conf_init(void)
continue;
}
CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
- SIZE_T_MAX)
+ SIZE_T_MAX, false)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
-1, (sizeof(size_t) << 3) - 1)
CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
if (config_fill) {
CONF_HANDLE_BOOL(opt_junk, "junk")
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
- 0, SIZE_T_MAX)
+ 0, SIZE_T_MAX, false)
CONF_HANDLE_BOOL(opt_redzone, "redzone")
CONF_HANDLE_BOOL(opt_zero, "zero")
}
@@ -891,7 +900,7 @@ JEMALLOC_ATTR(nonnull(1))
* Avoid any uncertainty as to how many backtrace frames to ignore in
* PROF_ALLOC_PREP().
*/
-JEMALLOC_ATTR(noinline)
+JEMALLOC_NOINLINE
#endif
static int
imemalign(void **memptr, size_t alignment, size_t size,
@@ -1377,7 +1386,7 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
*/
#ifdef JEMALLOC_EXPERIMENTAL
-JEMALLOC_INLINE void *
+static JEMALLOC_ATTR(always_inline) void *
iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{
diff --git a/contrib/jemalloc/src/prof.c b/contrib/jemalloc/src/prof.c
index 04964ef..b9f03a0 100644
--- a/contrib/jemalloc/src/prof.c
+++ b/contrib/jemalloc/src/prof.c
@@ -26,7 +26,7 @@ bool opt_prof_leak = false;
bool opt_prof_accum = false;
char opt_prof_prefix[PATH_MAX + 1];
-uint64_t prof_interval;
+uint64_t prof_interval = 0;
bool prof_promote;
/*
@@ -90,8 +90,7 @@ static bool prof_dump(bool propagate_err, const char *filename,
bool leakcheck);
static void prof_dump_filename(char *filename, char v, int64_t vseq);
static void prof_fdump(void);
-static void prof_bt_hash(const void *key, unsigned minbits, size_t *hash1,
- size_t *hash2);
+static void prof_bt_hash(const void *key, size_t r_hash[2]);
static bool prof_bt_keycomp(const void *k1, const void *k2);
static malloc_mutex_t *prof_ctx_mutex_choose(void);
@@ -1043,34 +1042,13 @@ prof_gdump(void)
}
static void
-prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
+prof_bt_hash(const void *key, size_t r_hash[2])
{
- size_t ret1, ret2;
- uint64_t h;
prof_bt_t *bt = (prof_bt_t *)key;
cassert(config_prof);
- assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
- assert(hash1 != NULL);
- assert(hash2 != NULL);
- h = hash(bt->vec, bt->len * sizeof(void *),
- UINT64_C(0x94122f335b332aea));
- if (minbits <= 32) {
- /*
- * Avoid doing multiple hashes, since a single hash provides
- * enough bits.
- */
- ret1 = h & ZU(0xffffffffU);
- ret2 = h >> 32;
- } else {
- ret1 = h;
- ret2 = hash(bt->vec, bt->len * sizeof(void *),
- UINT64_C(0x8432a476666bbc13));
- }
-
- *hash1 = ret1;
- *hash2 = ret2;
+ hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
}
static bool
@@ -1206,13 +1184,11 @@ prof_boot1(void)
*/
opt_prof = true;
opt_prof_gdump = false;
- prof_interval = 0;
} else if (opt_prof) {
if (opt_lg_prof_interval >= 0) {
prof_interval = (((uint64_t)1U) <<
opt_lg_prof_interval);
- } else
- prof_interval = 0;
+ }
}
prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);
diff --git a/contrib/jemalloc/src/tcache.c b/contrib/jemalloc/src/tcache.c
index 47e14f3..7befdc8 100644
--- a/contrib/jemalloc/src/tcache.c
+++ b/contrib/jemalloc/src/tcache.c
@@ -97,9 +97,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
arena_bin_t *bin = &arena->bins[binind];
if (config_prof && arena == tcache->arena) {
- malloc_mutex_lock(&arena->lock);
arena_prof_accum(arena, tcache->prof_accumbytes);
- malloc_mutex_unlock(&arena->lock);
tcache->prof_accumbytes = 0;
}
@@ -180,7 +178,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
malloc_mutex_lock(&arena->lock);
if ((config_prof || config_stats) && arena == tcache->arena) {
if (config_prof) {
- arena_prof_accum(arena,
+ arena_prof_accum_locked(arena,
tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
}
@@ -343,11 +341,8 @@ tcache_destroy(tcache_t *tcache)
}
}
- if (config_prof && tcache->prof_accumbytes > 0) {
- malloc_mutex_lock(&tcache->arena->lock);
+ if (config_prof && tcache->prof_accumbytes > 0)
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
- malloc_mutex_unlock(&tcache->arena->lock);
- }
tcache_size = arena_salloc(tcache, false);
if (tcache_size <= SMALL_MAXCLASS) {
OpenPOWER on IntegriCloud