summaryrefslogtreecommitdiffstats
path: root/contrib/jemalloc/src
diff options
context:
space:
mode:
authorjasone <jasone@FreeBSD.org>2012-04-17 07:22:14 +0000
committerjasone <jasone@FreeBSD.org>2012-04-17 07:22:14 +0000
commitcbeacb7c46f3a3650e5dbefa9a1a18bc9943a8cc (patch)
tree24efdb5b31d087479e78f72f3b772bd5b02e470c /contrib/jemalloc/src
parent1bc364bf7eebf6139e4f968987974484d35c5cb4 (diff)
downloadFreeBSD-src-cbeacb7c46f3a3650e5dbefa9a1a18bc9943a8cc.zip
FreeBSD-src-cbeacb7c46f3a3650e5dbefa9a1a18bc9943a8cc.tar.gz
Import jemalloc 9ef7f5dc34ff02f50d401e41c8d9a4a928e7c2aa (dev branch,
prior to 3.0.0 release) as contrib/jemalloc, and integrate it into libc. The code being imported by this commit diverged from lib/libc/stdlib/malloc.c in March 2010, which means that a portion of the jemalloc 1.0.0 ChangeLog entries are relevant, as are the entries for all subsequent releases.
Diffstat (limited to 'contrib/jemalloc/src')
-rw-r--r--contrib/jemalloc/src/arena.c2248
-rw-r--r--contrib/jemalloc/src/atomic.c2
-rw-r--r--contrib/jemalloc/src/base.c138
-rw-r--r--contrib/jemalloc/src/bitmap.c90
-rw-r--r--contrib/jemalloc/src/chunk.c304
-rw-r--r--contrib/jemalloc/src/chunk_dss.c159
-rw-r--r--contrib/jemalloc/src/chunk_mmap.c207
-rw-r--r--contrib/jemalloc/src/ckh.c609
-rw-r--r--contrib/jemalloc/src/ctl.c1385
-rw-r--r--contrib/jemalloc/src/extent.c39
-rw-r--r--contrib/jemalloc/src/hash.c2
-rw-r--r--contrib/jemalloc/src/huge.c306
-rw-r--r--contrib/jemalloc/src/jemalloc.c1733
-rw-r--r--contrib/jemalloc/src/mb.c2
-rw-r--r--contrib/jemalloc/src/mutex.c153
-rw-r--r--contrib/jemalloc/src/prof.c1243
-rw-r--r--contrib/jemalloc/src/quarantine.c163
-rw-r--r--contrib/jemalloc/src/rtree.c46
-rw-r--r--contrib/jemalloc/src/stats.c550
-rw-r--r--contrib/jemalloc/src/tcache.c435
-rw-r--r--contrib/jemalloc/src/tsd.c72
-rw-r--r--contrib/jemalloc/src/util.c635
22 files changed, 10521 insertions, 0 deletions
diff --git a/contrib/jemalloc/src/arena.c b/contrib/jemalloc/src/arena.c
new file mode 100644
index 0000000..989034d
--- /dev/null
+++ b/contrib/jemalloc/src/arena.c
@@ -0,0 +1,2248 @@
+#define JEMALLOC_ARENA_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
+arena_bin_info_t arena_bin_info[NBINS];
+
+JEMALLOC_ATTR(aligned(CACHELINE))
+const uint8_t small_size2bin[] = {
+#define S2B_8(i) i,
+#define S2B_16(i) S2B_8(i) S2B_8(i)
+#define S2B_32(i) S2B_16(i) S2B_16(i)
+#define S2B_64(i) S2B_32(i) S2B_32(i)
+#define S2B_128(i) S2B_64(i) S2B_64(i)
+#define S2B_256(i) S2B_128(i) S2B_128(i)
+#define S2B_512(i) S2B_256(i) S2B_256(i)
+#define S2B_1024(i) S2B_512(i) S2B_512(i)
+#define S2B_2048(i) S2B_1024(i) S2B_1024(i)
+#define S2B_4096(i) S2B_2048(i) S2B_2048(i)
+#define S2B_8192(i) S2B_4096(i) S2B_4096(i)
+#define SIZE_CLASS(bin, delta, size) \
+ S2B_##delta(bin)
+ SIZE_CLASSES
+#undef S2B_8
+#undef S2B_16
+#undef S2B_32
+#undef S2B_64
+#undef S2B_128
+#undef S2B_256
+#undef S2B_512
+#undef S2B_1024
+#undef S2B_2048
+#undef S2B_4096
+#undef S2B_8192
+#undef SIZE_CLASS
+};
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
+ bool large, bool zero);
+static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
+static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
+static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
+ bool zero);
+static void arena_purge(arena_t *arena, bool all);
+static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
+static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, size_t oldsize, size_t newsize);
+static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
+static arena_run_t *arena_bin_runs_first(arena_bin_t *bin);
+static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
+static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
+static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
+static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
+static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
+static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
+ arena_bin_t *bin);
+static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, arena_bin_t *bin);
+static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, arena_bin_t *bin);
+static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t oldsize, size_t size);
+static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
+static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
+ size_t extra, bool zero);
+static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info,
+ size_t min_run_size);
+static void bin_info_init(void);
+
+/******************************************************************************/
+
+static inline int
+arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
+{
+ uintptr_t a_mapelm = (uintptr_t)a;
+ uintptr_t b_mapelm = (uintptr_t)b;
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
+}
+
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
+ u.rb_link, arena_run_comp)
+
+static inline int
+arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
+{
+ int ret;
+ size_t a_size = a->bits & ~PAGE_MASK;
+ size_t b_size = b->bits & ~PAGE_MASK;
+
+ assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits &
+ CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY));
+
+ ret = (a_size > b_size) - (a_size < b_size);
+ if (ret == 0) {
+ uintptr_t a_mapelm, b_mapelm;
+
+ if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
+ a_mapelm = (uintptr_t)a;
+ else {
+ /*
+ * Treat keys as though they are lower than anything
+ * else.
+ */
+ a_mapelm = 0;
+ }
+ b_mapelm = (uintptr_t)b;
+
+ ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
+ }
+
+ return (ret);
+}
+
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
+ u.rb_link, arena_avail_comp)
+
+static inline void *
+arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
+{
+ void *ret;
+ unsigned regind;
+ bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
+ (uintptr_t)bin_info->bitmap_offset);
+
+ assert(run->nfree > 0);
+ assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
+
+ regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
+ ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
+ (uintptr_t)(bin_info->reg_interval * regind));
+ run->nfree--;
+ if (regind == run->nextind)
+ run->nextind++;
+ assert(regind < run->nextind);
+ return (ret);
+}
+
+static inline void
+arena_run_reg_dalloc(arena_run_t *run, void *ptr)
+{
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ size_t binind = arena_bin_index(chunk->arena, run->bin);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ unsigned regind = arena_run_regind(run, bin_info, ptr);
+ bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
+ (uintptr_t)bin_info->bitmap_offset);
+
+ assert(run->nfree < bin_info->nregs);
+ /* Freeing an interior pointer can cause assertion failure. */
+ assert(((uintptr_t)ptr - ((uintptr_t)run +
+ (uintptr_t)bin_info->reg0_offset)) %
+ (uintptr_t)bin_info->reg_interval == 0);
+ assert((uintptr_t)ptr >= (uintptr_t)run +
+ (uintptr_t)bin_info->reg0_offset);
+ /* Freeing an unallocated pointer can cause assertion failure. */
+ assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
+
+ bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
+ run->nfree++;
+}
+
+static inline void
+arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
+{
+ size_t i;
+ UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
+
+ for (i = 0; i < PAGE / sizeof(size_t); i++)
+ assert(p[i] == 0);
+}
+
+static void
+arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
+ bool zero)
+{
+ arena_chunk_t *chunk;
+ size_t run_ind, total_pages, need_pages, rem_pages, i;
+ size_t flag_dirty;
+ arena_avail_tree_t *runs_avail;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY;
+ runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
+ &arena->runs_avail_clean;
+ total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >>
+ LG_PAGE;
+ assert((chunk->map[run_ind+total_pages-1-map_bias].bits &
+ CHUNK_MAP_DIRTY) == flag_dirty);
+ need_pages = (size >> LG_PAGE);
+ assert(need_pages > 0);
+ assert(need_pages <= total_pages);
+ rem_pages = total_pages - need_pages;
+
+ arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]);
+ if (config_stats) {
+ /*
+ * Update stats_cactive if nactive is crossing a chunk
+ * multiple.
+ */
+ size_t cactive_diff = CHUNK_CEILING((arena->nactive +
+ need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
+ LG_PAGE);
+ if (cactive_diff != 0)
+ stats_cactive_add(cactive_diff);
+ }
+ arena->nactive += need_pages;
+
+ /* Keep track of trailing unused pages for later use. */
+ if (rem_pages > 0) {
+ if (flag_dirty != 0) {
+ chunk->map[run_ind+need_pages-map_bias].bits =
+ (rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY;
+ chunk->map[run_ind+total_pages-1-map_bias].bits =
+ (rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY;
+ } else {
+ chunk->map[run_ind+need_pages-map_bias].bits =
+ (rem_pages << LG_PAGE) |
+ (chunk->map[run_ind+need_pages-map_bias].bits &
+ CHUNK_MAP_UNZEROED);
+ chunk->map[run_ind+total_pages-1-map_bias].bits =
+ (rem_pages << LG_PAGE) |
+ (chunk->map[run_ind+total_pages-1-map_bias].bits &
+ CHUNK_MAP_UNZEROED);
+ }
+ arena_avail_tree_insert(runs_avail,
+ &chunk->map[run_ind+need_pages-map_bias]);
+ }
+
+ /* Update dirty page accounting. */
+ if (flag_dirty != 0) {
+ chunk->ndirty -= need_pages;
+ arena->ndirty -= need_pages;
+ }
+
+ /*
+ * Update the page map separately for large vs. small runs, since it is
+ * possible to avoid iteration for large mallocs.
+ */
+ if (large) {
+ if (zero) {
+ if (flag_dirty == 0) {
+ /*
+ * The run is clean, so some pages may be
+ * zeroed (i.e. never before touched).
+ */
+ for (i = 0; i < need_pages; i++) {
+ if ((chunk->map[run_ind+i-map_bias].bits
+ & CHUNK_MAP_UNZEROED) != 0) {
+ VALGRIND_MAKE_MEM_UNDEFINED(
+ (void *)((uintptr_t)
+ chunk + ((run_ind+i) <<
+ LG_PAGE)), PAGE);
+ memset((void *)((uintptr_t)
+ chunk + ((run_ind+i) <<
+ LG_PAGE)), 0, PAGE);
+ } else if (config_debug) {
+ VALGRIND_MAKE_MEM_DEFINED(
+ (void *)((uintptr_t)
+ chunk + ((run_ind+i) <<
+ LG_PAGE)), PAGE);
+ arena_chunk_validate_zeroed(
+ chunk, run_ind+i);
+ }
+ }
+ } else {
+ /*
+ * The run is dirty, so all pages must be
+ * zeroed.
+ */
+ VALGRIND_MAKE_MEM_UNDEFINED((void
+ *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), (need_pages << LG_PAGE));
+ memset((void *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), 0, (need_pages << LG_PAGE));
+ }
+ }
+
+ /*
+ * Set the last element first, in case the run only contains one
+ * page (i.e. both statements set the same element).
+ */
+ chunk->map[run_ind+need_pages-1-map_bias].bits =
+ CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED | flag_dirty;
+ chunk->map[run_ind-map_bias].bits = size | flag_dirty |
+ CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ } else {
+ assert(zero == false);
+ /*
+ * Propagate the dirty and unzeroed flags to the allocated
+ * small run, so that arena_dalloc_bin_run() has the ability to
+ * conditionally trim clean pages.
+ */
+ chunk->map[run_ind-map_bias].bits =
+ (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) |
+ CHUNK_MAP_ALLOCATED | flag_dirty;
+ /*
+ * The first page will always be dirtied during small run
+ * initialization, so a validation failure here would not
+ * actually cause an observable failure.
+ */
+ if (config_debug && flag_dirty == 0 &&
+ (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED)
+ == 0)
+ arena_chunk_validate_zeroed(chunk, run_ind);
+ for (i = 1; i < need_pages - 1; i++) {
+ chunk->map[run_ind+i-map_bias].bits = (i << LG_PAGE)
+ | (chunk->map[run_ind+i-map_bias].bits &
+ CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED;
+ if (config_debug && flag_dirty == 0 &&
+ (chunk->map[run_ind+i-map_bias].bits &
+ CHUNK_MAP_UNZEROED) == 0)
+ arena_chunk_validate_zeroed(chunk, run_ind+i);
+ }
+ chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages
+ - 1) << LG_PAGE) |
+ (chunk->map[run_ind+need_pages-1-map_bias].bits &
+ CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty;
+ if (config_debug && flag_dirty == 0 &&
+ (chunk->map[run_ind+need_pages-1-map_bias].bits &
+ CHUNK_MAP_UNZEROED) == 0) {
+ arena_chunk_validate_zeroed(chunk,
+ run_ind+need_pages-1);
+ }
+ }
+}
+
+static arena_chunk_t *
+arena_chunk_alloc(arena_t *arena)
+{
+ arena_chunk_t *chunk;
+ size_t i;
+
+ if (arena->spare != NULL) {
+ arena_avail_tree_t *runs_avail;
+
+ chunk = arena->spare;
+ arena->spare = NULL;
+
+ /* Insert the run into the appropriate runs_avail_* tree. */
+ if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
+ runs_avail = &arena->runs_avail_clean;
+ else
+ runs_avail = &arena->runs_avail_dirty;
+ assert((chunk->map[0].bits & ~PAGE_MASK) == arena_maxclass);
+ assert((chunk->map[chunk_npages-1-map_bias].bits & ~PAGE_MASK)
+ == arena_maxclass);
+ assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) ==
+ (chunk->map[chunk_npages-1-map_bias].bits &
+ CHUNK_MAP_DIRTY));
+ arena_avail_tree_insert(runs_avail, &chunk->map[0]);
+ } else {
+ bool zero;
+ size_t unzeroed;
+
+ zero = false;
+ malloc_mutex_unlock(&arena->lock);
+ chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
+ false, &zero);
+ malloc_mutex_lock(&arena->lock);
+ if (chunk == NULL)
+ return (NULL);
+ if (config_stats)
+ arena->stats.mapped += chunksize;
+
+ chunk->arena = arena;
+ ql_elm_new(chunk, link_dirty);
+ chunk->dirtied = false;
+
+ /*
+ * Claim that no pages are in use, since the header is merely
+ * overhead.
+ */
+ chunk->ndirty = 0;
+
+ /*
+ * Initialize the map to contain one maximal free untouched run.
+ * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
+ * chunk.
+ */
+ unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
+ chunk->map[0].bits = arena_maxclass | unzeroed;
+ /*
+ * There is no need to initialize the internal page map entries
+ * unless the chunk is not zeroed.
+ */
+ if (zero == false) {
+ for (i = map_bias+1; i < chunk_npages-1; i++)
+ chunk->map[i-map_bias].bits = unzeroed;
+ } else if (config_debug) {
+ for (i = map_bias+1; i < chunk_npages-1; i++)
+ assert(chunk->map[i-map_bias].bits == unzeroed);
+ }
+ chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass |
+ unzeroed;
+
+ /* Insert the run into the runs_avail_clean tree. */
+ arena_avail_tree_insert(&arena->runs_avail_clean,
+ &chunk->map[0]);
+ }
+
+ return (chunk);
+}
+
+static void
+arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
+{
+ arena_avail_tree_t *runs_avail;
+
+ /*
+ * Remove run from the appropriate runs_avail_* tree, so that the arena
+ * does not use it.
+ */
+ if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
+ runs_avail = &arena->runs_avail_clean;
+ else
+ runs_avail = &arena->runs_avail_dirty;
+ arena_avail_tree_remove(runs_avail, &chunk->map[0]);
+
+ if (arena->spare != NULL) {
+ arena_chunk_t *spare = arena->spare;
+
+ arena->spare = chunk;
+ if (spare->dirtied) {
+ ql_remove(&chunk->arena->chunks_dirty, spare,
+ link_dirty);
+ arena->ndirty -= spare->ndirty;
+ }
+ malloc_mutex_unlock(&arena->lock);
+ chunk_dealloc((void *)spare, chunksize, true);
+ malloc_mutex_lock(&arena->lock);
+ if (config_stats)
+ arena->stats.mapped -= chunksize;
+ } else
+ arena->spare = chunk;
+}
+
+static arena_run_t *
+arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
+{
+ arena_chunk_t *chunk;
+ arena_run_t *run;
+ arena_chunk_map_t *mapelm, key;
+
+ assert(size <= arena_maxclass);
+ assert((size & PAGE_MASK) == 0);
+
+ /* Search the arena's chunks for the lowest best fit. */
+ key.bits = size | CHUNK_MAP_KEY;
+ mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
+ if (mapelm != NULL) {
+ arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
+ size_t pageind = (((uintptr_t)mapelm -
+ (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+ + map_bias;
+
+ run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
+ LG_PAGE));
+ arena_run_split(arena, run, size, large, zero);
+ return (run);
+ }
+ mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
+ if (mapelm != NULL) {
+ arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
+ size_t pageind = (((uintptr_t)mapelm -
+ (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+ + map_bias;
+
+ run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
+ LG_PAGE));
+ arena_run_split(arena, run, size, large, zero);
+ return (run);
+ }
+
+ /*
+ * No usable runs. Create a new chunk from which to allocate the run.
+ */
+ chunk = arena_chunk_alloc(arena);
+ if (chunk != NULL) {
+ run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
+ arena_run_split(arena, run, size, large, zero);
+ return (run);
+ }
+
+ /*
+ * arena_chunk_alloc() failed, but another thread may have made
+ * sufficient memory available while this one dropped arena->lock in
+ * arena_chunk_alloc(), so search one more time.
+ */
+ mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
+ if (mapelm != NULL) {
+ arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
+ size_t pageind = (((uintptr_t)mapelm -
+ (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+ + map_bias;
+
+ run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
+ LG_PAGE));
+ arena_run_split(arena, run, size, large, zero);
+ return (run);
+ }
+ mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
+ if (mapelm != NULL) {
+ arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
+ size_t pageind = (((uintptr_t)mapelm -
+ (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+ + map_bias;
+
+ run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
+ LG_PAGE));
+ arena_run_split(arena, run, size, large, zero);
+ return (run);
+ }
+
+ return (NULL);
+}
+
+static inline void
+arena_maybe_purge(arena_t *arena)
+{
+
+ /* Enforce opt_lg_dirty_mult. */
+ if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory &&
+ (arena->ndirty - arena->npurgatory) > chunk_npages &&
+ (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
+ arena->npurgatory))
+ arena_purge(arena, false);
+}
+
+static inline void
+arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
+{
+ ql_head(arena_chunk_map_t) mapelms;
+ arena_chunk_map_t *mapelm;
+ size_t pageind, flag_unzeroed;
+ size_t ndirty;
+ size_t nmadvise;
+
+ ql_new(&mapelms);
+
+ flag_unzeroed =
+#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
+ /*
+ * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
+ * mappings, but not for file-backed mappings.
+ */
+ 0
+#else
+ CHUNK_MAP_UNZEROED
+#endif
+ ;
+
+ /*
+ * If chunk is the spare, temporarily re-allocate it, 1) so that its
+ * run is reinserted into runs_avail_dirty, and 2) so that it cannot be
+ * completely discarded by another thread while arena->lock is dropped
+ * by this thread. Note that the arena_run_dalloc() call will
+ * implicitly deallocate the chunk, so no explicit action is required
+ * in this function to deallocate the chunk.
+ *
+ * Note that once a chunk contains dirty pages, it cannot again contain
+ * a single run unless 1) it is a dirty run, or 2) this function purges
+ * dirty pages and causes the transition to a single clean run. Thus
+ * (chunk == arena->spare) is possible, but it is not possible for
+ * this function to be called on the spare unless it contains a dirty
+ * run.
+ */
+ if (chunk == arena->spare) {
+ assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) != 0);
+ arena_chunk_alloc(arena);
+ }
+
+ /* Temporarily allocate all free dirty runs within chunk. */
+ for (pageind = map_bias; pageind < chunk_npages;) {
+ mapelm = &chunk->map[pageind-map_bias];
+ if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) {
+ size_t npages;
+
+ npages = mapelm->bits >> LG_PAGE;
+ assert(pageind + npages <= chunk_npages);
+ if (mapelm->bits & CHUNK_MAP_DIRTY) {
+ size_t i;
+
+ arena_avail_tree_remove(
+ &arena->runs_avail_dirty, mapelm);
+
+ mapelm->bits = (npages << LG_PAGE) |
+ flag_unzeroed | CHUNK_MAP_LARGE |
+ CHUNK_MAP_ALLOCATED;
+ /*
+ * Update internal elements in the page map, so
+ * that CHUNK_MAP_UNZEROED is properly set.
+ */
+ for (i = 1; i < npages - 1; i++) {
+ chunk->map[pageind+i-map_bias].bits =
+ flag_unzeroed;
+ }
+ if (npages > 1) {
+ chunk->map[
+ pageind+npages-1-map_bias].bits =
+ flag_unzeroed | CHUNK_MAP_LARGE |
+ CHUNK_MAP_ALLOCATED;
+ }
+
+ if (config_stats) {
+ /*
+ * Update stats_cactive if nactive is
+ * crossing a chunk multiple.
+ */
+ size_t cactive_diff =
+ CHUNK_CEILING((arena->nactive +
+ npages) << LG_PAGE) -
+ CHUNK_CEILING(arena->nactive <<
+ LG_PAGE);
+ if (cactive_diff != 0)
+ stats_cactive_add(cactive_diff);
+ }
+ arena->nactive += npages;
+ /* Append to list for later processing. */
+ ql_elm_new(mapelm, u.ql_link);
+ ql_tail_insert(&mapelms, mapelm, u.ql_link);
+ }
+
+ pageind += npages;
+ } else {
+ /* Skip allocated run. */
+ if (mapelm->bits & CHUNK_MAP_LARGE)
+ pageind += mapelm->bits >> LG_PAGE;
+ else {
+ arena_run_t *run = (arena_run_t *)((uintptr_t)
+ chunk + (uintptr_t)(pageind << LG_PAGE));
+
+ assert((mapelm->bits >> LG_PAGE) == 0);
+ size_t binind = arena_bin_index(arena,
+ run->bin);
+ arena_bin_info_t *bin_info =
+ &arena_bin_info[binind];
+ pageind += bin_info->run_size >> LG_PAGE;
+ }
+ }
+ }
+ assert(pageind == chunk_npages);
+
+ if (config_debug)
+ ndirty = chunk->ndirty;
+ if (config_stats)
+ arena->stats.purged += chunk->ndirty;
+ arena->ndirty -= chunk->ndirty;
+ chunk->ndirty = 0;
+ ql_remove(&arena->chunks_dirty, chunk, link_dirty);
+ chunk->dirtied = false;
+
+ malloc_mutex_unlock(&arena->lock);
+ if (config_stats)
+ nmadvise = 0;
+ ql_foreach(mapelm, &mapelms, u.ql_link) {
+ size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
+ sizeof(arena_chunk_map_t)) + map_bias;
+ size_t npages = mapelm->bits >> LG_PAGE;
+
+ assert(pageind + npages <= chunk_npages);
+ assert(ndirty >= npages);
+ if (config_debug)
+ ndirty -= npages;
+
+ madvise((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
+ (npages << LG_PAGE), JEMALLOC_MADV_PURGE);
+ if (config_stats)
+ nmadvise++;
+ }
+ assert(ndirty == 0);
+ malloc_mutex_lock(&arena->lock);
+ if (config_stats)
+ arena->stats.nmadvise += nmadvise;
+
+ /* Deallocate runs. */
+ for (mapelm = ql_first(&mapelms); mapelm != NULL;
+ mapelm = ql_first(&mapelms)) {
+ size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
+ sizeof(arena_chunk_map_t)) + map_bias;
+ arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
+ (uintptr_t)(pageind << LG_PAGE));
+
+ ql_remove(&mapelms, mapelm, u.ql_link);
+ arena_run_dalloc(arena, run, false);
+ }
+}
+
+static void
+arena_purge(arena_t *arena, bool all)
+{
+ arena_chunk_t *chunk;
+ size_t npurgatory;
+ if (config_debug) {
+ size_t ndirty = 0;
+
+ ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
+ assert(chunk->dirtied);
+ ndirty += chunk->ndirty;
+ }
+ assert(ndirty == arena->ndirty);
+ }
+ assert(arena->ndirty > arena->npurgatory || all);
+ assert(arena->ndirty - arena->npurgatory > chunk_npages || all);
+ assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
+ arena->npurgatory) || all);
+
+ if (config_stats)
+ arena->stats.npurge++;
+
+ /*
+ * Compute the minimum number of pages that this thread should try to
+ * purge, and add the result to arena->npurgatory. This will keep
+ * multiple threads from racing to reduce ndirty below the threshold.
+ */
+ npurgatory = arena->ndirty - arena->npurgatory;
+ if (all == false) {
+ assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult);
+ npurgatory -= arena->nactive >> opt_lg_dirty_mult;
+ }
+ arena->npurgatory += npurgatory;
+
+ while (npurgatory > 0) {
+ /* Get next chunk with dirty pages. */
+ chunk = ql_first(&arena->chunks_dirty);
+ if (chunk == NULL) {
+ /*
+ * This thread was unable to purge as many pages as
+ * originally intended, due to races with other threads
+ * that either did some of the purging work, or re-used
+ * dirty pages.
+ */
+ arena->npurgatory -= npurgatory;
+ return;
+ }
+ while (chunk->ndirty == 0) {
+ ql_remove(&arena->chunks_dirty, chunk, link_dirty);
+ chunk->dirtied = false;
+ chunk = ql_first(&arena->chunks_dirty);
+ if (chunk == NULL) {
+ /* Same logic as for above. */
+ arena->npurgatory -= npurgatory;
+ return;
+ }
+ }
+
+ if (chunk->ndirty > npurgatory) {
+ /*
+ * This thread will, at a minimum, purge all the dirty
+ * pages in chunk, so set npurgatory to reflect this
+ * thread's commitment to purge the pages. This tends
+ * to reduce the chances of the following scenario:
+ *
+ * 1) This thread sets arena->npurgatory such that
+ * (arena->ndirty - arena->npurgatory) is at the
+ * threshold.
+ * 2) This thread drops arena->lock.
+ * 3) Another thread causes one or more pages to be
+ * dirtied, and immediately determines that it must
+ * purge dirty pages.
+ *
+ * If this scenario *does* play out, that's okay,
+ * because all of the purging work being done really
+ * needs to happen.
+ */
+ arena->npurgatory += chunk->ndirty - npurgatory;
+ npurgatory = chunk->ndirty;
+ }
+
+ arena->npurgatory -= chunk->ndirty;
+ npurgatory -= chunk->ndirty;
+ arena_chunk_purge(arena, chunk);
+ }
+}
+
+void
+arena_purge_all(arena_t *arena)
+{
+
+ malloc_mutex_lock(&arena->lock);
+ arena_purge(arena, true);
+ malloc_mutex_unlock(&arena->lock);
+}
+
+static void
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
+{
+ arena_chunk_t *chunk;
+ size_t size, run_ind, run_pages, flag_dirty;
+ arena_avail_tree_t *runs_avail;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ assert(run_ind >= map_bias);
+ assert(run_ind < chunk_npages);
+ if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) {
+ size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK;
+ assert(size == PAGE ||
+ (chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
+ ~PAGE_MASK) == 0);
+ assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
+ CHUNK_MAP_LARGE) != 0);
+ assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
+ CHUNK_MAP_ALLOCATED) != 0);
+ } else {
+ size_t binind = arena_bin_index(arena, run->bin);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ size = bin_info->run_size;
+ }
+ run_pages = (size >> LG_PAGE);
+ if (config_stats) {
+ /*
+ * Update stats_cactive if nactive is crossing a chunk
+ * multiple.
+ */
+ size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
+ CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
+ if (cactive_diff != 0)
+ stats_cactive_sub(cactive_diff);
+ }
+ arena->nactive -= run_pages;
+
+ /*
+ * The run is dirty if the caller claims to have dirtied it, as well as
+ * if it was already dirty before being allocated.
+ */
+ if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) != 0)
+ dirty = true;
+ flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
+ runs_avail = dirty ? &arena->runs_avail_dirty :
+ &arena->runs_avail_clean;
+
+ /* Mark pages as unallocated in the chunk map. */
+ if (dirty) {
+ chunk->map[run_ind-map_bias].bits = size | CHUNK_MAP_DIRTY;
+ chunk->map[run_ind+run_pages-1-map_bias].bits = size |
+ CHUNK_MAP_DIRTY;
+
+ chunk->ndirty += run_pages;
+ arena->ndirty += run_pages;
+ } else {
+ chunk->map[run_ind-map_bias].bits = size |
+ (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED);
+ chunk->map[run_ind+run_pages-1-map_bias].bits = size |
+ (chunk->map[run_ind+run_pages-1-map_bias].bits &
+ CHUNK_MAP_UNZEROED);
+ }
+
+ /* Try to coalesce forward. */
+ if (run_ind + run_pages < chunk_npages &&
+ (chunk->map[run_ind+run_pages-map_bias].bits & CHUNK_MAP_ALLOCATED)
+ == 0 && (chunk->map[run_ind+run_pages-map_bias].bits &
+ CHUNK_MAP_DIRTY) == flag_dirty) {
+ size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits &
+ ~PAGE_MASK;
+ size_t nrun_pages = nrun_size >> LG_PAGE;
+
+ /*
+ * Remove successor from runs_avail; the coalesced run is
+ * inserted later.
+ */
+ assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
+ & ~PAGE_MASK) == nrun_size);
+ assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
+ & CHUNK_MAP_ALLOCATED) == 0);
+ assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
+ & CHUNK_MAP_DIRTY) == flag_dirty);
+ arena_avail_tree_remove(runs_avail,
+ &chunk->map[run_ind+run_pages-map_bias]);
+
+ size += nrun_size;
+ run_pages += nrun_pages;
+
+ chunk->map[run_ind-map_bias].bits = size |
+ (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
+ chunk->map[run_ind+run_pages-1-map_bias].bits = size |
+ (chunk->map[run_ind+run_pages-1-map_bias].bits &
+ CHUNK_MAP_FLAGS_MASK);
+ }
+
+ /* Try to coalesce backward. */
+ if (run_ind > map_bias && (chunk->map[run_ind-1-map_bias].bits &
+ CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[run_ind-1-map_bias].bits &
+ CHUNK_MAP_DIRTY) == flag_dirty) {
+ size_t prun_size = chunk->map[run_ind-1-map_bias].bits &
+ ~PAGE_MASK;
+ size_t prun_pages = prun_size >> LG_PAGE;
+
+ run_ind -= prun_pages;
+
+ /*
+ * Remove predecessor from runs_avail; the coalesced run is
+ * inserted later.
+ */
+ assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK)
+ == prun_size);
+ assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED)
+ == 0);
+ assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY)
+ == flag_dirty);
+ arena_avail_tree_remove(runs_avail,
+ &chunk->map[run_ind-map_bias]);
+
+ size += prun_size;
+ run_pages += prun_pages;
+
+ chunk->map[run_ind-map_bias].bits = size |
+ (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
+ chunk->map[run_ind+run_pages-1-map_bias].bits = size |
+ (chunk->map[run_ind+run_pages-1-map_bias].bits &
+ CHUNK_MAP_FLAGS_MASK);
+ }
+
+ /* Insert into runs_avail, now that coalescing is complete. */
+ assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) ==
+ (chunk->map[run_ind+run_pages-1-map_bias].bits & ~PAGE_MASK));
+ assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) ==
+ (chunk->map[run_ind+run_pages-1-map_bias].bits & CHUNK_MAP_DIRTY));
+ arena_avail_tree_insert(runs_avail, &chunk->map[run_ind-map_bias]);
+
+ if (dirty) {
+ /*
+ * Insert into chunks_dirty before potentially calling
+ * arena_chunk_dealloc(), so that chunks_dirty and
+ * arena->ndirty are consistent.
+ */
+ if (chunk->dirtied == false) {
+ ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
+ chunk->dirtied = true;
+ }
+ }
+
+ /*
+ * Deallocate chunk if it is now completely unused. The bit
+ * manipulation checks whether the first run is unallocated and extends
+ * to the end of the chunk.
+ */
+ if ((chunk->map[0].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) ==
+ arena_maxclass)
+ arena_chunk_dealloc(arena, chunk);
+
+ /*
+ * It is okay to do dirty page processing here even if the chunk was
+ * deallocated above, since in that case it is the spare. Waiting
+ * until after possible chunk deallocation to do dirty processing
+ * allows for an old spare to be fully deallocated, thus decreasing the
+ * chances of spuriously crossing the dirty page purging threshold.
+ */
+ if (dirty)
+ arena_maybe_purge(arena);
+}
+
+static void
+arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ size_t oldsize, size_t newsize)
+{
+ size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+ size_t head_npages = (oldsize - newsize) >> LG_PAGE;
+ size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY;
+
+ assert(oldsize > newsize);
+
+ /*
+ * Update the chunk map so that arena_run_dalloc() can treat the
+ * leading run as separately allocated. Set the last element of each
+ * run first, in case of single-page runs.
+ */
+ assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
+ assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
+ chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
+ (chunk->map[pageind+head_npages-1-map_bias].bits &
+ CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ chunk->map[pageind-map_bias].bits = (oldsize - newsize)
+ | flag_dirty | (chunk->map[pageind-map_bias].bits &
+ CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+
+ if (config_debug) {
+ UNUSED size_t tail_npages = newsize >> LG_PAGE;
+ assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
+ .bits & ~PAGE_MASK) == 0);
+ assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
+ .bits & CHUNK_MAP_DIRTY) == flag_dirty);
+ assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
+ .bits & CHUNK_MAP_LARGE) != 0);
+ assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
+ .bits & CHUNK_MAP_ALLOCATED) != 0);
+ }
+ chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty |
+ (chunk->map[pageind+head_npages-map_bias].bits &
+ CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+
+ arena_run_dalloc(arena, run, false);
+}
+
+static void
+arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ size_t oldsize, size_t newsize, bool dirty)
+{
+ size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+ size_t head_npages = newsize >> LG_PAGE;
+ size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
+ size_t flag_dirty = chunk->map[pageind-map_bias].bits &
+ CHUNK_MAP_DIRTY;
+
+ assert(oldsize > newsize);
+
+ /*
+ * Update the chunk map so that arena_run_dalloc() can treat the
+ * trailing run as separately allocated. Set the last element of each
+ * run first, in case of single-page runs.
+ */
+ assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
+ assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
+ chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
+ (chunk->map[pageind+head_npages-1-map_bias].bits &
+ CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ chunk->map[pageind-map_bias].bits = newsize | flag_dirty |
+ (chunk->map[pageind-map_bias].bits & CHUNK_MAP_UNZEROED) |
+ CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+
+ assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
+ ~PAGE_MASK) == 0);
+ assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
+ CHUNK_MAP_LARGE) != 0);
+ assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
+ CHUNK_MAP_ALLOCATED) != 0);
+ chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits =
+ flag_dirty |
+ (chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
+ CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ chunk->map[pageind+head_npages-map_bias].bits = (oldsize - newsize) |
+ flag_dirty | (chunk->map[pageind+head_npages-map_bias].bits &
+ CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+
+ arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
+ dirty);
+}
+
+static arena_run_t *
+arena_bin_runs_first(arena_bin_t *bin)
+{
+ arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
+ if (mapelm != NULL) {
+ arena_chunk_t *chunk;
+ size_t pageind;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
+ pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
+ sizeof(arena_chunk_map_t))) + map_bias;
+ arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
+ (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
+ LG_PAGE));
+ return (run);
+ }
+
+ return (NULL);
+}
+
+static void
+arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
+{
+ arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
+ size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
+
+ assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
+
+ arena_run_tree_insert(&bin->runs, mapelm);
+}
+
+static void
+arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
+{
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
+
+ assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
+
+ arena_run_tree_remove(&bin->runs, mapelm);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_tryget(arena_bin_t *bin)
+{
+ arena_run_t *run = arena_bin_runs_first(bin);
+ if (run != NULL) {
+ arena_bin_runs_remove(bin, run);
+ if (config_stats)
+ bin->stats.reruns++;
+ }
+ return (run);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
+{
+ arena_run_t *run;
+ size_t binind;
+ arena_bin_info_t *bin_info;
+
+ /* Look for a usable run. */
+ run = arena_bin_nonfull_run_tryget(bin);
+ if (run != NULL)
+ return (run);
+ /* No existing runs have any space available. */
+
+ binind = arena_bin_index(arena, bin);
+ bin_info = &arena_bin_info[binind];
+
+ /* Allocate a new run. */
+ malloc_mutex_unlock(&bin->lock);
+ /******************************/
+ malloc_mutex_lock(&arena->lock);
+ run = arena_run_alloc(arena, bin_info->run_size, false, false);
+ if (run != NULL) {
+ bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
+ (uintptr_t)bin_info->bitmap_offset);
+
+ /* Initialize run internals. */
+ run->bin = bin;
+ run->nextind = 0;
+ run->nfree = bin_info->nregs;
+ bitmap_init(bitmap, &bin_info->bitmap_info);
+ }
+ malloc_mutex_unlock(&arena->lock);
+ /********************************/
+ malloc_mutex_lock(&bin->lock);
+ if (run != NULL) {
+ if (config_stats) {
+ bin->stats.nruns++;
+ bin->stats.curruns++;
+ }
+ return (run);
+ }
+
+ /*
+ * arena_run_alloc() failed, but another thread may have made
+ * sufficient memory available while this one dropped bin->lock above,
+ * so search one more time.
+ */
+ run = arena_bin_nonfull_run_tryget(bin);
+ if (run != NULL)
+ return (run);
+
+ return (NULL);
+}
+
+/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
+static void *
+arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
+{
+ void *ret;
+ size_t binind;
+ arena_bin_info_t *bin_info;
+ arena_run_t *run;
+
+ binind = arena_bin_index(arena, bin);
+ bin_info = &arena_bin_info[binind];
+ bin->runcur = NULL;
+ run = arena_bin_nonfull_run_get(arena, bin);
+ if (bin->runcur != NULL && bin->runcur->nfree > 0) {
+ /*
+ * Another thread updated runcur while this one ran without the
+ * bin lock in arena_bin_nonfull_run_get().
+ */
+ assert(bin->runcur->nfree > 0);
+ ret = arena_run_reg_alloc(bin->runcur, bin_info);
+ if (run != NULL) {
+ arena_chunk_t *chunk;
+
+ /*
+ * arena_run_alloc() may have allocated run, or it may
+ * have pulled run from the bin's run tree. Therefore
+ * it is unsafe to make any assumptions about how run
+ * has previously been used, and arena_bin_lower_run()
+ * must be called, as if a region were just deallocated
+ * from the run.
+ */
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ if (run->nfree == bin_info->nregs)
+ arena_dalloc_bin_run(arena, chunk, run, bin);
+ else
+ arena_bin_lower_run(arena, chunk, run, bin);
+ }
+ return (ret);
+ }
+
+ if (run == NULL)
+ return (NULL);
+
+ bin->runcur = run;
+
+ assert(bin->runcur->nfree > 0);
+
+ return (arena_run_reg_alloc(bin->runcur, bin_info));
+}
+
+void
+arena_prof_accum(arena_t *arena, uint64_t accumbytes)
+{
+
+ if (prof_interval != 0) {
+ arena->prof_accumbytes += accumbytes;
+ if (arena->prof_accumbytes >= prof_interval) {
+ prof_idump();
+ arena->prof_accumbytes -= prof_interval;
+ }
+ }
+}
+
+void
+arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
+ uint64_t prof_accumbytes)
+{
+ unsigned i, nfill;
+ arena_bin_t *bin;
+ arena_run_t *run;
+ void *ptr;
+
+ assert(tbin->ncached == 0);
+
+ if (config_prof) {
+ malloc_mutex_lock(&arena->lock);
+ arena_prof_accum(arena, prof_accumbytes);
+ malloc_mutex_unlock(&arena->lock);
+ }
+ bin = &arena->bins[binind];
+ malloc_mutex_lock(&bin->lock);
+ for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
+ tbin->lg_fill_div); i < nfill; i++) {
+ if ((run = bin->runcur) != NULL && run->nfree > 0)
+ ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
+ else
+ ptr = arena_bin_malloc_hard(arena, bin);
+ if (ptr == NULL)
+ break;
+ if (config_fill && opt_junk) {
+ arena_alloc_junk_small(ptr, &arena_bin_info[binind],
+ true);
+ }
+ /* Insert such that low regions get used first. */
+ tbin->avail[nfill - 1 - i] = ptr;
+ }
+ if (config_stats) {
+ bin->stats.allocated += i * arena_bin_info[binind].reg_size;
+ bin->stats.nmalloc += i;
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ bin->stats.nfills++;
+ tbin->tstats.nrequests = 0;
+ }
+ malloc_mutex_unlock(&bin->lock);
+ tbin->ncached = i;
+}
+
+void
+arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
+{
+
+ if (zero) {
+ size_t redzone_size = bin_info->redzone_size;
+ memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
+ redzone_size);
+ memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
+ redzone_size);
+ } else {
+ memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
+ bin_info->reg_interval);
+ }
+}
+
+void
+arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
+{
+ size_t size = bin_info->reg_size;
+ size_t redzone_size = bin_info->redzone_size;
+ size_t i;
+ bool error = false;
+
+ for (i = 1; i <= redzone_size; i++) {
+ unsigned byte;
+ if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
+ error = true;
+ malloc_printf("<jemalloc>: Corrupt redzone "
+ "%zu byte%s before %p (size %zu), byte=%#x\n", i,
+ (i == 1) ? "" : "s", ptr, size, byte);
+ }
+ }
+ for (i = 0; i < redzone_size; i++) {
+ unsigned byte;
+ if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
+ error = true;
+ malloc_printf("<jemalloc>: Corrupt redzone "
+ "%zu byte%s after end of %p (size %zu), byte=%#x\n",
+ i, (i == 1) ? "" : "s", ptr, size, byte);
+ }
+ }
+ if (opt_abort && error)
+ abort();
+
+ memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
+ bin_info->reg_interval);
+}
+
+void *
+arena_malloc_small(arena_t *arena, size_t size, bool zero)
+{
+ void *ret;
+ arena_bin_t *bin;
+ arena_run_t *run;
+ size_t binind;
+
+ binind = SMALL_SIZE2BIN(size);
+ assert(binind < NBINS);
+ bin = &arena->bins[binind];
+ size = arena_bin_info[binind].reg_size;
+
+ malloc_mutex_lock(&bin->lock);
+ if ((run = bin->runcur) != NULL && run->nfree > 0)
+ ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
+ else
+ ret = arena_bin_malloc_hard(arena, bin);
+
+ if (ret == NULL) {
+ malloc_mutex_unlock(&bin->lock);
+ return (NULL);
+ }
+
+ if (config_stats) {
+ bin->stats.allocated += size;
+ bin->stats.nmalloc++;
+ bin->stats.nrequests++;
+ }
+ malloc_mutex_unlock(&bin->lock);
+ if (config_prof && isthreaded == false) {
+ malloc_mutex_lock(&arena->lock);
+ arena_prof_accum(arena, size);
+ malloc_mutex_unlock(&arena->lock);
+ }
+
+ if (zero == false) {
+ if (config_fill) {
+ if (opt_junk) {
+ arena_alloc_junk_small(ret,
+ &arena_bin_info[binind], false);
+ } else if (opt_zero)
+ memset(ret, 0, size);
+ }
+ } else {
+ if (config_fill && opt_junk) {
+ arena_alloc_junk_small(ret, &arena_bin_info[binind],
+ true);
+ }
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ memset(ret, 0, size);
+ }
+
+ return (ret);
+}
+
+void *
+arena_malloc_large(arena_t *arena, size_t size, bool zero)
+{
+ void *ret;
+
+ /* Large allocation. */
+ size = PAGE_CEILING(size);
+ malloc_mutex_lock(&arena->lock);
+ ret = (void *)arena_run_alloc(arena, size, true, zero);
+ if (ret == NULL) {
+ malloc_mutex_unlock(&arena->lock);
+ return (NULL);
+ }
+ if (config_stats) {
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ }
+ if (config_prof)
+ arena_prof_accum(arena, size);
+ malloc_mutex_unlock(&arena->lock);
+
+ if (zero == false) {
+ if (config_fill) {
+ if (opt_junk)
+ memset(ret, 0xa5, size);
+ else if (opt_zero)
+ memset(ret, 0, size);
+ }
+ }
+
+ return (ret);
+}
+
+/* Only handles large allocations that require more than page alignment. */
+void *
+arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
+{
+ void *ret;
+ size_t alloc_size, leadsize, trailsize;
+ arena_run_t *run;
+ arena_chunk_t *chunk;
+
+ assert((size & PAGE_MASK) == 0);
+
+ alignment = PAGE_CEILING(alignment);
+ alloc_size = size + alignment - PAGE;
+
+ malloc_mutex_lock(&arena->lock);
+ run = arena_run_alloc(arena, alloc_size, true, zero);
+ if (run == NULL) {
+ malloc_mutex_unlock(&arena->lock);
+ return (NULL);
+ }
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+
+ leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
+ (uintptr_t)run;
+ assert(alloc_size >= leadsize + size);
+ trailsize = alloc_size - leadsize - size;
+ ret = (void *)((uintptr_t)run + leadsize);
+ if (leadsize != 0) {
+ arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
+ leadsize);
+ }
+ if (trailsize != 0) {
+ arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
+ false);
+ }
+
+ if (config_stats) {
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ }
+ malloc_mutex_unlock(&arena->lock);
+
+ if (config_fill && zero == false) {
+ if (opt_junk)
+ memset(ret, 0xa5, size);
+ else if (opt_zero)
+ memset(ret, 0, size);
+ }
+ return (ret);
+}
+
+/* Return the size of the allocation pointed to by ptr. */
+size_t
+arena_salloc(const void *ptr, bool demote)
+{
+ size_t ret;
+ arena_chunk_t *chunk;
+ size_t pageind, mapbits;
+
+ assert(ptr != NULL);
+ assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ mapbits = chunk->map[pageind-map_bias].bits;
+ assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
+ if ((mapbits & CHUNK_MAP_LARGE) == 0) {
+ arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
+ (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE));
+ size_t binind = arena_bin_index(chunk->arena, run->bin);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ assert(((uintptr_t)ptr - ((uintptr_t)run +
+ (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
+ == 0);
+ ret = bin_info->reg_size;
+ } else {
+ assert(((uintptr_t)ptr & PAGE_MASK) == 0);
+ ret = mapbits & ~PAGE_MASK;
+ if (demote && prof_promote && ret == PAGE && (mapbits &
+ CHUNK_MAP_CLASS_MASK) != 0) {
+ size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
+ CHUNK_MAP_CLASS_SHIFT) - 1;
+ assert(binind < NBINS);
+ ret = arena_bin_info[binind].reg_size;
+ }
+ assert(ret != 0);
+ }
+
+ return (ret);
+}
+
+void
+arena_prof_promoted(const void *ptr, size_t size)
+{
+ arena_chunk_t *chunk;
+ size_t pageind, binind;
+
+ assert(config_prof);
+ assert(ptr != NULL);
+ assert(CHUNK_ADDR2BASE(ptr) != ptr);
+ assert(isalloc(ptr, false) == PAGE);
+ assert(isalloc(ptr, true) == PAGE);
+ assert(size <= SMALL_MAXCLASS);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ binind = SMALL_SIZE2BIN(size);
+ assert(binind < NBINS);
+ chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits &
+ ~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT);
+
+ assert(isalloc(ptr, false) == PAGE);
+ assert(isalloc(ptr, true) == size);
+}
+
+static void
+arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
+ arena_bin_t *bin)
+{
+
+ /* Dissociate run from bin. */
+ if (run == bin->runcur)
+ bin->runcur = NULL;
+ else {
+ size_t binind = arena_bin_index(chunk->arena, bin);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+
+ if (bin_info->nregs != 1) {
+ /*
+ * This block's conditional is necessary because if the
+ * run only contains one region, then it never gets
+ * inserted into the non-full runs tree.
+ */
+ arena_bin_runs_remove(bin, run);
+ }
+ }
+}
+
+static void
+arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ arena_bin_t *bin)
+{
+ size_t binind;
+ arena_bin_info_t *bin_info;
+ size_t npages, run_ind, past;
+
+ assert(run != bin->runcur);
+ assert(arena_run_tree_search(&bin->runs, &chunk->map[
+ (((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)-map_bias]) == NULL);
+
+ binind = arena_bin_index(chunk->arena, run->bin);
+ bin_info = &arena_bin_info[binind];
+
+ malloc_mutex_unlock(&bin->lock);
+ /******************************/
+ npages = bin_info->run_size >> LG_PAGE;
+ run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ past = (size_t)(PAGE_CEILING((uintptr_t)run +
+ (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
+ bin_info->reg_interval - bin_info->redzone_size) -
+ (uintptr_t)chunk) >> LG_PAGE);
+ malloc_mutex_lock(&arena->lock);
+
+ /*
+ * If the run was originally clean, and some pages were never touched,
+ * trim the clean pages before deallocating the dirty portion of the
+ * run.
+ */
+ if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == 0 && past
+ - run_ind < npages) {
+ /*
+ * Trim clean pages. Convert to large run beforehand. Set the
+ * last map element first, in case this is a one-page run.
+ */
+ chunk->map[run_ind+npages-1-map_bias].bits = CHUNK_MAP_LARGE |
+ (chunk->map[run_ind+npages-1-map_bias].bits &
+ CHUNK_MAP_FLAGS_MASK);
+ chunk->map[run_ind-map_bias].bits = bin_info->run_size |
+ CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits &
+ CHUNK_MAP_FLAGS_MASK);
+ arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
+ ((past - run_ind) << LG_PAGE), false);
+ /* npages = past - run_ind; */
+ }
+ arena_run_dalloc(arena, run, true);
+ malloc_mutex_unlock(&arena->lock);
+ /****************************/
+ malloc_mutex_lock(&bin->lock);
+ if (config_stats)
+ bin->stats.curruns--;
+}
+
+static void
+arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ arena_bin_t *bin)
+{
+
+ /*
+ * Make sure that if bin->runcur is non-NULL, it refers to the lowest
+ * non-full run. It is okay to NULL runcur out rather than proactively
+ * keeping it pointing at the lowest non-full run.
+ */
+ if ((uintptr_t)run < (uintptr_t)bin->runcur) {
+ /* Switch runcur. */
+ if (bin->runcur->nfree > 0)
+ arena_bin_runs_insert(bin, bin->runcur);
+ bin->runcur = run;
+ if (config_stats)
+ bin->stats.reruns++;
+ } else
+ arena_bin_runs_insert(bin, run);
+}
+
+void
+arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ arena_chunk_map_t *mapelm)
+{
+ size_t pageind;
+ arena_run_t *run;
+ arena_bin_t *bin;
+ size_t size;
+
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
+ (mapelm->bits >> LG_PAGE)) << LG_PAGE));
+ bin = run->bin;
+ size_t binind = arena_bin_index(arena, bin);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ if (config_fill || config_stats)
+ size = bin_info->reg_size;
+
+ if (config_fill && opt_junk)
+ arena_dalloc_junk_small(ptr, bin_info);
+
+ arena_run_reg_dalloc(run, ptr);
+ if (run->nfree == bin_info->nregs) {
+ arena_dissociate_bin_run(chunk, run, bin);
+ arena_dalloc_bin_run(arena, chunk, run, bin);
+ } else if (run->nfree == 1 && run != bin->runcur)
+ arena_bin_lower_run(arena, chunk, run, bin);
+
+ if (config_stats) {
+ bin->stats.allocated -= size;
+ bin->stats.ndalloc++;
+ }
+}
+
+void
+arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
+ arena_stats_t *astats, malloc_bin_stats_t *bstats,
+ malloc_large_stats_t *lstats)
+{
+ unsigned i;
+
+ malloc_mutex_lock(&arena->lock);
+ *nactive += arena->nactive;
+ *ndirty += arena->ndirty;
+
+ astats->mapped += arena->stats.mapped;
+ astats->npurge += arena->stats.npurge;
+ astats->nmadvise += arena->stats.nmadvise;
+ astats->purged += arena->stats.purged;
+ astats->allocated_large += arena->stats.allocated_large;
+ astats->nmalloc_large += arena->stats.nmalloc_large;
+ astats->ndalloc_large += arena->stats.ndalloc_large;
+ astats->nrequests_large += arena->stats.nrequests_large;
+
+ for (i = 0; i < nlclasses; i++) {
+ lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
+ lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
+ lstats[i].nrequests += arena->stats.lstats[i].nrequests;
+ lstats[i].curruns += arena->stats.lstats[i].curruns;
+ }
+ malloc_mutex_unlock(&arena->lock);
+
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+
+ malloc_mutex_lock(&bin->lock);
+ bstats[i].allocated += bin->stats.allocated;
+ bstats[i].nmalloc += bin->stats.nmalloc;
+ bstats[i].ndalloc += bin->stats.ndalloc;
+ bstats[i].nrequests += bin->stats.nrequests;
+ if (config_tcache) {
+ bstats[i].nfills += bin->stats.nfills;
+ bstats[i].nflushes += bin->stats.nflushes;
+ }
+ bstats[i].nruns += bin->stats.nruns;
+ bstats[i].reruns += bin->stats.reruns;
+ bstats[i].curruns += bin->stats.curruns;
+ malloc_mutex_unlock(&bin->lock);
+ }
+}
+
+void
+arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+{
+
+ if (config_fill || config_stats) {
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK;
+
+ if (config_fill && config_stats && opt_junk)
+ memset(ptr, 0x5a, size);
+ if (config_stats) {
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= size;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
+ }
+ }
+
+ arena_run_dalloc(arena, (arena_run_t *)ptr, true);
+}
+
+static void
+arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t oldsize, size_t size)
+{
+
+ assert(size < oldsize);
+
+ /*
+ * Shrink the run, and make trailing pages available for other
+ * allocations.
+ */
+ malloc_mutex_lock(&arena->lock);
+ arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
+ true);
+ if (config_stats) {
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= oldsize;
+ arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
+ arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
+
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ }
+ malloc_mutex_unlock(&arena->lock);
+}
+
+static bool
+arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t oldsize, size_t size, size_t extra, bool zero)
+{
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ size_t npages = oldsize >> LG_PAGE;
+ size_t followsize;
+
+ assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK));
+
+ /* Try to extend the run. */
+ assert(size + extra > oldsize);
+ malloc_mutex_lock(&arena->lock);
+ if (pageind + npages < chunk_npages &&
+ (chunk->map[pageind+npages-map_bias].bits
+ & CHUNK_MAP_ALLOCATED) == 0 && (followsize =
+ chunk->map[pageind+npages-map_bias].bits & ~PAGE_MASK) >= size -
+ oldsize) {
+ /*
+ * The next run is available and sufficiently large. Split the
+ * following run, then merge the first part with the existing
+ * allocation.
+ */
+ size_t flag_dirty;
+ size_t splitsize = (oldsize + followsize <= size + extra)
+ ? followsize : size + extra - oldsize;
+ arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
+ ((pageind+npages) << LG_PAGE)), splitsize, true, zero);
+
+ size = oldsize + splitsize;
+ npages = size >> LG_PAGE;
+
+ /*
+ * Mark the extended run as dirty if either portion of the run
+ * was dirty before allocation. This is rather pedantic,
+ * because there's not actually any sequence of events that
+ * could cause the resulting run to be passed to
+ * arena_run_dalloc() with the dirty argument set to false
+ * (which is when dirty flag consistency would really matter).
+ */
+ flag_dirty = (chunk->map[pageind-map_bias].bits &
+ CHUNK_MAP_DIRTY) |
+ (chunk->map[pageind+npages-1-map_bias].bits &
+ CHUNK_MAP_DIRTY);
+ chunk->map[pageind-map_bias].bits = size | flag_dirty
+ | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ chunk->map[pageind+npages-1-map_bias].bits = flag_dirty |
+ CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+
+ if (config_stats) {
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= oldsize;
+ arena->stats.lstats[(oldsize >> LG_PAGE)
+ - 1].ndalloc++;
+ arena->stats.lstats[(oldsize >> LG_PAGE)
+ - 1].curruns--;
+
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
+ arena->stats.lstats[(size >> LG_PAGE)
+ - 1].nrequests++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ }
+ malloc_mutex_unlock(&arena->lock);
+ return (false);
+ }
+ malloc_mutex_unlock(&arena->lock);
+
+ return (true);
+}
+
+/*
+ * Try to resize a large allocation, in order to avoid copying. This will
+ * always fail if growing an object, and the following run is already in use.
+ */
+static bool
+arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
+ bool zero)
+{
+ size_t psize;
+
+ psize = PAGE_CEILING(size + extra);
+ if (psize == oldsize) {
+ /* Same size class. */
+ if (config_fill && opt_junk && size < oldsize) {
+ memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
+ size);
+ }
+ return (false);
+ } else {
+ arena_chunk_t *chunk;
+ arena_t *arena;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ arena = chunk->arena;
+
+ if (psize < oldsize) {
+ /* Fill before shrinking in order avoid a race. */
+ if (config_fill && opt_junk) {
+ memset((void *)((uintptr_t)ptr + size), 0x5a,
+ oldsize - size);
+ }
+ arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
+ psize);
+ return (false);
+ } else {
+ bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
+ oldsize, PAGE_CEILING(size),
+ psize - PAGE_CEILING(size), zero);
+ if (config_fill && ret == false && zero == false &&
+ opt_zero) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ size - oldsize);
+ }
+ return (ret);
+ }
+ }
+}
+
+void *
+arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
+ bool zero)
+{
+
+ /*
+ * Avoid moving the allocation if the size class can be left the same.
+ */
+ if (oldsize <= arena_maxclass) {
+ if (oldsize <= SMALL_MAXCLASS) {
+ assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
+ == oldsize);
+ if ((size + extra <= SMALL_MAXCLASS &&
+ SMALL_SIZE2BIN(size + extra) ==
+ SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
+ size + extra >= oldsize)) {
+ if (config_fill && opt_junk && size < oldsize) {
+ memset((void *)((uintptr_t)ptr + size),
+ 0x5a, oldsize - size);
+ }
+ return (ptr);
+ }
+ } else {
+ assert(size <= arena_maxclass);
+ if (size + extra > SMALL_MAXCLASS) {
+ if (arena_ralloc_large(ptr, oldsize, size,
+ extra, zero) == false)
+ return (ptr);
+ }
+ }
+ }
+
+ /* Reallocation would require a move. */
+ return (NULL);
+}
+
+void *
+arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero, bool try_tcache)
+{
+ void *ret;
+ size_t copysize;
+
+ /* Try to avoid moving the allocation. */
+ ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
+ if (ret != NULL)
+ return (ret);
+
+ /*
+ * size and oldsize are different enough that we need to move the
+ * object. In that case, fall back to allocating new space and
+ * copying.
+ */
+ if (alignment != 0) {
+ size_t usize = sa2u(size + extra, alignment);
+ if (usize == 0)
+ return (NULL);
+ ret = ipalloc(usize, alignment, zero);
+ } else
+ ret = arena_malloc(NULL, size + extra, zero, try_tcache);
+
+ if (ret == NULL) {
+ if (extra == 0)
+ return (NULL);
+ /* Try again, this time without extra. */
+ if (alignment != 0) {
+ size_t usize = sa2u(size, alignment);
+ if (usize == 0)
+ return (NULL);
+ ret = ipalloc(usize, alignment, zero);
+ } else
+ ret = arena_malloc(NULL, size, zero, try_tcache);
+
+ if (ret == NULL)
+ return (NULL);
+ }
+
+ /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
+
+ /*
+ * Copy at most size bytes (not size+extra), since the caller has no
+ * expectation that the extra bytes will be reliably preserved.
+ */
+ copysize = (size < oldsize) ? size : oldsize;
+ memcpy(ret, ptr, copysize);
+ iqalloc(ptr);
+ return (ret);
+}
+
+bool
+arena_new(arena_t *arena, unsigned ind)
+{
+ unsigned i;
+ arena_bin_t *bin;
+
+ arena->ind = ind;
+ arena->nthreads = 0;
+
+ if (malloc_mutex_init(&arena->lock))
+ return (true);
+
+ if (config_stats) {
+ memset(&arena->stats, 0, sizeof(arena_stats_t));
+ arena->stats.lstats =
+ (malloc_large_stats_t *)base_alloc(nlclasses *
+ sizeof(malloc_large_stats_t));
+ if (arena->stats.lstats == NULL)
+ return (true);
+ memset(arena->stats.lstats, 0, nlclasses *
+ sizeof(malloc_large_stats_t));
+ if (config_tcache)
+ ql_new(&arena->tcache_ql);
+ }
+
+ if (config_prof)
+ arena->prof_accumbytes = 0;
+
+ /* Initialize chunks. */
+ ql_new(&arena->chunks_dirty);
+ arena->spare = NULL;
+
+ arena->nactive = 0;
+ arena->ndirty = 0;
+ arena->npurgatory = 0;
+
+ arena_avail_tree_new(&arena->runs_avail_clean);
+ arena_avail_tree_new(&arena->runs_avail_dirty);
+
+ /* Initialize bins. */
+ for (i = 0; i < NBINS; i++) {
+ bin = &arena->bins[i];
+ if (malloc_mutex_init(&bin->lock))
+ return (true);
+ bin->runcur = NULL;
+ arena_run_tree_new(&bin->runs);
+ if (config_stats)
+ memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+ }
+
+ return (false);
+}
+
+/*
+ * Calculate bin_info->run_size such that it meets the following constraints:
+ *
+ * *) bin_info->run_size >= min_run_size
+ * *) bin_info->run_size <= arena_maxclass
+ * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
+ * *) bin_info->nregs <= RUN_MAXREGS
+ *
+ * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
+ * calculated here, since these settings are all interdependent.
+ */
+static size_t
+bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
+{
+ size_t pad_size;
+ size_t try_run_size, good_run_size;
+ uint32_t try_nregs, good_nregs;
+ uint32_t try_hdr_size, good_hdr_size;
+ uint32_t try_bitmap_offset, good_bitmap_offset;
+ uint32_t try_ctx0_offset, good_ctx0_offset;
+ uint32_t try_redzone0_offset, good_redzone0_offset;
+
+ assert(min_run_size >= PAGE);
+ assert(min_run_size <= arena_maxclass);
+
+ /*
+ * Determine redzone size based on minimum alignment and minimum
+ * redzone size. Add padding to the end of the run if it is needed to
+ * align the regions. The padding allows each redzone to be half the
+ * minimum alignment; without the padding, each redzone would have to
+ * be twice as large in order to maintain alignment.
+ */
+ if (config_fill && opt_redzone) {
+ size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
+ if (align_min <= REDZONE_MINSIZE) {
+ bin_info->redzone_size = REDZONE_MINSIZE;
+ pad_size = 0;
+ } else {
+ bin_info->redzone_size = align_min >> 1;
+ pad_size = bin_info->redzone_size;
+ }
+ } else {
+ bin_info->redzone_size = 0;
+ pad_size = 0;
+ }
+ bin_info->reg_interval = bin_info->reg_size +
+ (bin_info->redzone_size << 1);
+
+ /*
+ * Calculate known-valid settings before entering the run_size
+ * expansion loop, so that the first part of the loop always copies
+ * valid settings.
+ *
+ * The do..while loop iteratively reduces the number of regions until
+ * the run header and the regions no longer overlap. A closed formula
+ * would be quite messy, since there is an interdependency between the
+ * header's mask length and the number of regions.
+ */
+ try_run_size = min_run_size;
+ try_nregs = ((try_run_size - sizeof(arena_run_t)) /
+ bin_info->reg_interval)
+ + 1; /* Counter-act try_nregs-- in loop. */
+ if (try_nregs > RUN_MAXREGS) {
+ try_nregs = RUN_MAXREGS
+ + 1; /* Counter-act try_nregs-- in loop. */
+ }
+ do {
+ try_nregs--;
+ try_hdr_size = sizeof(arena_run_t);
+ /* Pad to a long boundary. */
+ try_hdr_size = LONG_CEILING(try_hdr_size);
+ try_bitmap_offset = try_hdr_size;
+ /* Add space for bitmap. */
+ try_hdr_size += bitmap_size(try_nregs);
+ if (config_prof && opt_prof && prof_promote == false) {
+ /* Pad to a quantum boundary. */
+ try_hdr_size = QUANTUM_CEILING(try_hdr_size);
+ try_ctx0_offset = try_hdr_size;
+ /* Add space for one (prof_ctx_t *) per region. */
+ try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
+ } else
+ try_ctx0_offset = 0;
+ try_redzone0_offset = try_run_size - (try_nregs *
+ bin_info->reg_interval) - pad_size;
+ } while (try_hdr_size > try_redzone0_offset);
+
+ /* run_size expansion loop. */
+ do {
+ /*
+ * Copy valid settings before trying more aggressive settings.
+ */
+ good_run_size = try_run_size;
+ good_nregs = try_nregs;
+ good_hdr_size = try_hdr_size;
+ good_bitmap_offset = try_bitmap_offset;
+ good_ctx0_offset = try_ctx0_offset;
+ good_redzone0_offset = try_redzone0_offset;
+
+ /* Try more aggressive settings. */
+ try_run_size += PAGE;
+ try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
+ bin_info->reg_interval)
+ + 1; /* Counter-act try_nregs-- in loop. */
+ if (try_nregs > RUN_MAXREGS) {
+ try_nregs = RUN_MAXREGS
+ + 1; /* Counter-act try_nregs-- in loop. */
+ }
+ do {
+ try_nregs--;
+ try_hdr_size = sizeof(arena_run_t);
+ /* Pad to a long boundary. */
+ try_hdr_size = LONG_CEILING(try_hdr_size);
+ try_bitmap_offset = try_hdr_size;
+ /* Add space for bitmap. */
+ try_hdr_size += bitmap_size(try_nregs);
+ if (config_prof && opt_prof && prof_promote == false) {
+ /* Pad to a quantum boundary. */
+ try_hdr_size = QUANTUM_CEILING(try_hdr_size);
+ try_ctx0_offset = try_hdr_size;
+ /*
+ * Add space for one (prof_ctx_t *) per region.
+ */
+ try_hdr_size += try_nregs *
+ sizeof(prof_ctx_t *);
+ }
+ try_redzone0_offset = try_run_size - (try_nregs *
+ bin_info->reg_interval) - pad_size;
+ } while (try_hdr_size > try_redzone0_offset);
+ } while (try_run_size <= arena_maxclass
+ && try_run_size <= arena_maxclass
+ && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
+ RUN_MAX_OVRHD_RELAX
+ && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
+ && try_nregs < RUN_MAXREGS);
+
+ assert(good_hdr_size <= good_redzone0_offset);
+
+ /* Copy final settings. */
+ bin_info->run_size = good_run_size;
+ bin_info->nregs = good_nregs;
+ bin_info->bitmap_offset = good_bitmap_offset;
+ bin_info->ctx0_offset = good_ctx0_offset;
+ bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
+
+ assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
+ * bin_info->reg_interval) + pad_size == bin_info->run_size);
+
+ return (good_run_size);
+}
+
+static void
+bin_info_init(void)
+{
+ arena_bin_info_t *bin_info;
+ size_t prev_run_size = PAGE;
+
+#define SIZE_CLASS(bin, delta, size) \
+ bin_info = &arena_bin_info[bin]; \
+ bin_info->reg_size = size; \
+ prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
+ bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
+ SIZE_CLASSES
+#undef SIZE_CLASS
+}
+
+void
+arena_boot(void)
+{
+ size_t header_size;
+ unsigned i;
+
+ /*
+ * Compute the header size such that it is large enough to contain the
+ * page map. The page map is biased to omit entries for the header
+ * itself, so some iteration is necessary to compute the map bias.
+ *
+ * 1) Compute safe header_size and map_bias values that include enough
+ * space for an unbiased page map.
+ * 2) Refine map_bias based on (1) to omit the header pages in the page
+ * map. The resulting map_bias may be one too small.
+ * 3) Refine map_bias based on (2). The result will be >= the result
+ * from (2), and will always be correct.
+ */
+ map_bias = 0;
+ for (i = 0; i < 3; i++) {
+ header_size = offsetof(arena_chunk_t, map) +
+ (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
+ map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
+ != 0);
+ }
+ assert(map_bias > 0);
+
+ arena_maxclass = chunksize - (map_bias << LG_PAGE);
+
+ bin_info_init();
+}
+
+void
+arena_prefork(arena_t *arena)
+{
+ unsigned i;
+
+ malloc_mutex_prefork(&arena->lock);
+ for (i = 0; i < NBINS; i++)
+ malloc_mutex_prefork(&arena->bins[i].lock);
+}
+
+void
+arena_postfork_parent(arena_t *arena)
+{
+ unsigned i;
+
+ for (i = 0; i < NBINS; i++)
+ malloc_mutex_postfork_parent(&arena->bins[i].lock);
+ malloc_mutex_postfork_parent(&arena->lock);
+}
+
+void
+arena_postfork_child(arena_t *arena)
+{
+ unsigned i;
+
+ for (i = 0; i < NBINS; i++)
+ malloc_mutex_postfork_child(&arena->bins[i].lock);
+ malloc_mutex_postfork_child(&arena->lock);
+}
diff --git a/contrib/jemalloc/src/atomic.c b/contrib/jemalloc/src/atomic.c
new file mode 100644
index 0000000..77ee313
--- /dev/null
+++ b/contrib/jemalloc/src/atomic.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_ATOMIC_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/contrib/jemalloc/src/base.c b/contrib/jemalloc/src/base.c
new file mode 100644
index 0000000..bafaa74
--- /dev/null
+++ b/contrib/jemalloc/src/base.c
@@ -0,0 +1,138 @@
+#define JEMALLOC_BASE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+static malloc_mutex_t base_mtx;
+
+/*
+ * Current pages that are being used for internal memory allocations. These
+ * pages are carved up in cacheline-size quanta, so that there is no chance of
+ * false cache line sharing.
+ */
+static void *base_pages;
+static void *base_next_addr;
+static void *base_past_addr; /* Addr immediately past base_pages. */
+static extent_node_t *base_nodes;
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static bool base_pages_alloc(size_t minsize);
+
+/******************************************************************************/
+
+static bool
+base_pages_alloc(size_t minsize)
+{
+ size_t csize;
+ bool zero;
+
+ assert(minsize != 0);
+ csize = CHUNK_CEILING(minsize);
+ zero = false;
+ base_pages = chunk_alloc(csize, chunksize, true, &zero);
+ if (base_pages == NULL)
+ return (true);
+ base_next_addr = base_pages;
+ base_past_addr = (void *)((uintptr_t)base_pages + csize);
+
+ return (false);
+}
+
+void *
+base_alloc(size_t size)
+{
+ void *ret;
+ size_t csize;
+
+ /* Round size up to nearest multiple of the cacheline size. */
+ csize = CACHELINE_CEILING(size);
+
+ malloc_mutex_lock(&base_mtx);
+ /* Make sure there's enough space for the allocation. */
+ if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
+ if (base_pages_alloc(csize)) {
+ malloc_mutex_unlock(&base_mtx);
+ return (NULL);
+ }
+ }
+ /* Allocate. */
+ ret = base_next_addr;
+ base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
+ malloc_mutex_unlock(&base_mtx);
+
+ return (ret);
+}
+
+void *
+base_calloc(size_t number, size_t size)
+{
+ void *ret = base_alloc(number * size);
+
+ if (ret != NULL)
+ memset(ret, 0, number * size);
+
+ return (ret);
+}
+
+extent_node_t *
+base_node_alloc(void)
+{
+ extent_node_t *ret;
+
+ malloc_mutex_lock(&base_mtx);
+ if (base_nodes != NULL) {
+ ret = base_nodes;
+ base_nodes = *(extent_node_t **)ret;
+ malloc_mutex_unlock(&base_mtx);
+ } else {
+ malloc_mutex_unlock(&base_mtx);
+ ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
+ }
+
+ return (ret);
+}
+
+void
+base_node_dealloc(extent_node_t *node)
+{
+
+ malloc_mutex_lock(&base_mtx);
+ *(extent_node_t **)node = base_nodes;
+ base_nodes = node;
+ malloc_mutex_unlock(&base_mtx);
+}
+
+bool
+base_boot(void)
+{
+
+ base_nodes = NULL;
+ if (malloc_mutex_init(&base_mtx))
+ return (true);
+
+ return (false);
+}
+
+void
+base_prefork(void)
+{
+
+ malloc_mutex_prefork(&base_mtx);
+}
+
+void
+base_postfork_parent(void)
+{
+
+ malloc_mutex_postfork_parent(&base_mtx);
+}
+
+void
+base_postfork_child(void)
+{
+
+ malloc_mutex_postfork_child(&base_mtx);
+}
diff --git a/contrib/jemalloc/src/bitmap.c b/contrib/jemalloc/src/bitmap.c
new file mode 100644
index 0000000..b47e262
--- /dev/null
+++ b/contrib/jemalloc/src/bitmap.c
@@ -0,0 +1,90 @@
+#define JEMALLOC_BITMAP_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static size_t bits2groups(size_t nbits);
+
+/******************************************************************************/
+
+static size_t
+bits2groups(size_t nbits)
+{
+
+ return ((nbits >> LG_BITMAP_GROUP_NBITS) +
+ !!(nbits & BITMAP_GROUP_NBITS_MASK));
+}
+
+void
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
+{
+ unsigned i;
+ size_t group_count;
+
+ assert(nbits > 0);
+ assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
+
+ /*
+ * Compute the number of groups necessary to store nbits bits, and
+ * progressively work upward through the levels until reaching a level
+ * that requires only one group.
+ */
+ binfo->levels[0].group_offset = 0;
+ group_count = bits2groups(nbits);
+ for (i = 1; group_count > 1; i++) {
+ assert(i < BITMAP_MAX_LEVELS);
+ binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ + group_count;
+ group_count = bits2groups(group_count);
+ }
+ binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ + group_count;
+ binfo->nlevels = i;
+ binfo->nbits = nbits;
+}
+
+size_t
+bitmap_info_ngroups(const bitmap_info_t *binfo)
+{
+
+ return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
+}
+
+size_t
+bitmap_size(size_t nbits)
+{
+ bitmap_info_t binfo;
+
+ bitmap_info_init(&binfo, nbits);
+ return (bitmap_info_ngroups(&binfo));
+}
+
+void
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
+{
+ size_t extra;
+ unsigned i;
+
+ /*
+ * Bits are actually inverted with regard to the external bitmap
+ * interface, so the bitmap starts out with all 1 bits, except for
+ * trailing unused bits (if any). Note that each group uses bit 0 to
+ * correspond to the first logical bit in the group, so extra bits
+ * are the most significant bits of the last group.
+ */
+ memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
+ LG_SIZEOF_BITMAP);
+ extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
+ & BITMAP_GROUP_NBITS_MASK;
+ if (extra != 0)
+ bitmap[binfo->levels[1].group_offset - 1] >>= extra;
+ for (i = 1; i < binfo->nlevels; i++) {
+ size_t group_count = binfo->levels[i].group_offset -
+ binfo->levels[i-1].group_offset;
+ extra = (BITMAP_GROUP_NBITS - (group_count &
+ BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
+ if (extra != 0)
+ bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
+ }
+}
diff --git a/contrib/jemalloc/src/chunk.c b/contrib/jemalloc/src/chunk.c
new file mode 100644
index 0000000..67e0d50
--- /dev/null
+++ b/contrib/jemalloc/src/chunk.c
@@ -0,0 +1,304 @@
+#define JEMALLOC_CHUNK_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
+
+malloc_mutex_t chunks_mtx;
+chunk_stats_t stats_chunks;
+
+/*
+ * Trees of chunks that were previously allocated (trees differ only in node
+ * ordering). These are used when allocating chunks, in an attempt to re-use
+ * address space. Depending on function, different tree orderings are needed,
+ * which is why there are two trees with the same contents.
+ */
+static extent_tree_t chunks_szad;
+static extent_tree_t chunks_ad;
+
+rtree_t *chunks_rtree;
+
+/* Various chunk-related settings. */
+size_t chunksize;
+size_t chunksize_mask; /* (chunksize - 1). */
+size_t chunk_npages;
+size_t map_bias;
+size_t arena_maxclass; /* Max size class for arenas. */
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void *chunk_recycle(size_t size, size_t alignment, bool *zero);
+static void chunk_record(void *chunk, size_t size);
+
+/******************************************************************************/
+
+static void *
+chunk_recycle(size_t size, size_t alignment, bool *zero)
+{
+ void *ret;
+ extent_node_t *node;
+ extent_node_t key;
+ size_t alloc_size, leadsize, trailsize;
+
+ alloc_size = size + alignment - chunksize;
+ /* Beware size_t wrap-around. */
+ if (alloc_size < size)
+ return (NULL);
+ key.addr = NULL;
+ key.size = alloc_size;
+ malloc_mutex_lock(&chunks_mtx);
+ node = extent_tree_szad_nsearch(&chunks_szad, &key);
+ if (node == NULL) {
+ malloc_mutex_unlock(&chunks_mtx);
+ return (NULL);
+ }
+ leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
+ (uintptr_t)node->addr;
+ assert(alloc_size >= leadsize + size);
+ trailsize = alloc_size - leadsize - size;
+ ret = (void *)((uintptr_t)node->addr + leadsize);
+ /* Remove node from the tree. */
+ extent_tree_szad_remove(&chunks_szad, node);
+ extent_tree_ad_remove(&chunks_ad, node);
+ if (leadsize != 0) {
+ /* Insert the leading space as a smaller chunk. */
+ node->size = leadsize;
+ extent_tree_szad_insert(&chunks_szad, node);
+ extent_tree_ad_insert(&chunks_ad, node);
+ node = NULL;
+ }
+ if (trailsize != 0) {
+ /* Insert the trailing space as a smaller chunk. */
+ if (node == NULL) {
+ /*
+ * An additional node is required, but
+ * base_node_alloc() can cause a new base chunk to be
+ * allocated. Drop chunks_mtx in order to avoid
+ * deadlock, and if node allocation fails, deallocate
+ * the result before returning an error.
+ */
+ malloc_mutex_unlock(&chunks_mtx);
+ node = base_node_alloc();
+ if (node == NULL) {
+ chunk_dealloc(ret, size, true);
+ return (NULL);
+ }
+ malloc_mutex_lock(&chunks_mtx);
+ }
+ node->addr = (void *)((uintptr_t)(ret) + size);
+ node->size = trailsize;
+ extent_tree_szad_insert(&chunks_szad, node);
+ extent_tree_ad_insert(&chunks_ad, node);
+ node = NULL;
+ }
+ malloc_mutex_unlock(&chunks_mtx);
+
+ if (node != NULL)
+ base_node_dealloc(node);
+#ifdef JEMALLOC_PURGE_MADVISE_FREE
+ if (*zero) {
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ memset(ret, 0, size);
+ }
+#endif
+ return (ret);
+}
+
+/*
+ * If the caller specifies (*zero == false), it is still possible to receive
+ * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
+ * takes advantage of this to avoid demanding zeroed chunks, but taking
+ * advantage of them if they are returned.
+ */
+void *
+chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
+{
+ void *ret;
+
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+ assert((alignment & chunksize_mask) == 0);
+
+ ret = chunk_recycle(size, alignment, zero);
+ if (ret != NULL)
+ goto label_return;
+ if (config_dss) {
+ ret = chunk_alloc_dss(size, alignment, zero);
+ if (ret != NULL)
+ goto label_return;
+ }
+ ret = chunk_alloc_mmap(size, alignment);
+ if (ret != NULL) {
+ *zero = true;
+ goto label_return;
+ }
+
+ /* All strategies for allocation failed. */
+ ret = NULL;
+label_return:
+ if (config_ivsalloc && base == false && ret != NULL) {
+ if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
+ chunk_dealloc(ret, size, true);
+ return (NULL);
+ }
+ }
+ if ((config_stats || config_prof) && ret != NULL) {
+ bool gdump;
+ malloc_mutex_lock(&chunks_mtx);
+ if (config_stats)
+ stats_chunks.nchunks += (size / chunksize);
+ stats_chunks.curchunks += (size / chunksize);
+ if (stats_chunks.curchunks > stats_chunks.highchunks) {
+ stats_chunks.highchunks = stats_chunks.curchunks;
+ if (config_prof)
+ gdump = true;
+ } else if (config_prof)
+ gdump = false;
+ malloc_mutex_unlock(&chunks_mtx);
+ if (config_prof && opt_prof && opt_prof_gdump && gdump)
+ prof_gdump();
+ }
+
+ assert(CHUNK_ADDR2BASE(ret) == ret);
+ return (ret);
+}
+
+static void
+chunk_record(void *chunk, size_t size)
+{
+ extent_node_t *xnode, *node, *prev, key;
+
+ madvise(chunk, size, JEMALLOC_MADV_PURGE);
+
+ xnode = NULL;
+ malloc_mutex_lock(&chunks_mtx);
+ while (true) {
+ key.addr = (void *)((uintptr_t)chunk + size);
+ node = extent_tree_ad_nsearch(&chunks_ad, &key);
+ /* Try to coalesce forward. */
+ if (node != NULL && node->addr == key.addr) {
+ /*
+ * Coalesce chunk with the following address range.
+ * This does not change the position within chunks_ad,
+ * so only remove/insert from/into chunks_szad.
+ */
+ extent_tree_szad_remove(&chunks_szad, node);
+ node->addr = chunk;
+ node->size += size;
+ extent_tree_szad_insert(&chunks_szad, node);
+ break;
+ } else if (xnode == NULL) {
+ /*
+ * It is possible that base_node_alloc() will cause a
+ * new base chunk to be allocated, so take care not to
+ * deadlock on chunks_mtx, and recover if another thread
+ * deallocates an adjacent chunk while this one is busy
+ * allocating xnode.
+ */
+ malloc_mutex_unlock(&chunks_mtx);
+ xnode = base_node_alloc();
+ if (xnode == NULL)
+ return;
+ malloc_mutex_lock(&chunks_mtx);
+ } else {
+ /* Coalescing forward failed, so insert a new node. */
+ node = xnode;
+ xnode = NULL;
+ node->addr = chunk;
+ node->size = size;
+ extent_tree_ad_insert(&chunks_ad, node);
+ extent_tree_szad_insert(&chunks_szad, node);
+ break;
+ }
+ }
+ /* Discard xnode if it ended up unused due to a race. */
+ if (xnode != NULL)
+ base_node_dealloc(xnode);
+
+ /* Try to coalesce backward. */
+ prev = extent_tree_ad_prev(&chunks_ad, node);
+ if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
+ chunk) {
+ /*
+ * Coalesce chunk with the previous address range. This does
+ * not change the position within chunks_ad, so only
+ * remove/insert node from/into chunks_szad.
+ */
+ extent_tree_szad_remove(&chunks_szad, prev);
+ extent_tree_ad_remove(&chunks_ad, prev);
+
+ extent_tree_szad_remove(&chunks_szad, node);
+ node->addr = prev->addr;
+ node->size += prev->size;
+ extent_tree_szad_insert(&chunks_szad, node);
+
+ base_node_dealloc(prev);
+ }
+ malloc_mutex_unlock(&chunks_mtx);
+}
+
+void
+chunk_dealloc(void *chunk, size_t size, bool unmap)
+{
+
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+
+ if (config_ivsalloc)
+ rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
+ if (config_stats || config_prof) {
+ malloc_mutex_lock(&chunks_mtx);
+ stats_chunks.curchunks -= (size / chunksize);
+ malloc_mutex_unlock(&chunks_mtx);
+ }
+
+ if (unmap) {
+ if (chunk_dealloc_mmap(chunk, size) == false)
+ return;
+ chunk_record(chunk, size);
+ }
+}
+
+bool
+chunk_boot0(void)
+{
+
+ /* Set variables according to the value of opt_lg_chunk. */
+ chunksize = (ZU(1) << opt_lg_chunk);
+ assert(chunksize >= PAGE);
+ chunksize_mask = chunksize - 1;
+ chunk_npages = (chunksize >> LG_PAGE);
+
+ if (config_stats || config_prof) {
+ if (malloc_mutex_init(&chunks_mtx))
+ return (true);
+ memset(&stats_chunks, 0, sizeof(chunk_stats_t));
+ }
+ if (config_dss && chunk_dss_boot())
+ return (true);
+ extent_tree_szad_new(&chunks_szad);
+ extent_tree_ad_new(&chunks_ad);
+ if (config_ivsalloc) {
+ chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
+ opt_lg_chunk);
+ if (chunks_rtree == NULL)
+ return (true);
+ }
+
+ return (false);
+}
+
+bool
+chunk_boot1(void)
+{
+
+ if (chunk_mmap_boot())
+ return (true);
+
+ return (false);
+}
diff --git a/contrib/jemalloc/src/chunk_dss.c b/contrib/jemalloc/src/chunk_dss.c
new file mode 100644
index 0000000..b05509a
--- /dev/null
+++ b/contrib/jemalloc/src/chunk_dss.c
@@ -0,0 +1,159 @@
+#define JEMALLOC_CHUNK_DSS_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+/******************************************************************************/
+/* Data. */
+
+/*
+ * Protects sbrk() calls. This avoids malloc races among threads, though it
+ * does not protect against races with threads that call sbrk() directly.
+ */
+static malloc_mutex_t dss_mtx;
+
+/* Base address of the DSS. */
+static void *dss_base;
+/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
+static void *dss_prev;
+/* Current upper limit on DSS addresses. */
+static void *dss_max;
+
+/******************************************************************************/
+
+#ifndef JEMALLOC_HAVE_SBRK
+static void *
+sbrk(intptr_t increment)
+{
+
+ not_implemented();
+
+ return (NULL);
+}
+#endif
+
+void *
+chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
+{
+ void *ret;
+
+ cassert(config_dss);
+ assert(size > 0 && (size & chunksize_mask) == 0);
+ assert(alignment > 0 && (alignment & chunksize_mask) == 0);
+
+ /*
+ * sbrk() uses a signed increment argument, so take care not to
+ * interpret a huge allocation request as a negative increment.
+ */
+ if ((intptr_t)size < 0)
+ return (NULL);
+
+ malloc_mutex_lock(&dss_mtx);
+ if (dss_prev != (void *)-1) {
+ size_t gap_size, cpad_size;
+ void *cpad, *dss_next;
+ intptr_t incr;
+
+ /*
+ * The loop is necessary to recover from races with other
+ * threads that are using the DSS for something other than
+ * malloc.
+ */
+ do {
+ /* Get the current end of the DSS. */
+ dss_max = sbrk(0);
+ /*
+ * Calculate how much padding is necessary to
+ * chunk-align the end of the DSS.
+ */
+ gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
+ chunksize_mask;
+ /*
+ * Compute how much chunk-aligned pad space (if any) is
+ * necessary to satisfy alignment. This space can be
+ * recycled for later use.
+ */
+ cpad = (void *)((uintptr_t)dss_max + gap_size);
+ ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
+ alignment);
+ cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
+ dss_next = (void *)((uintptr_t)ret + size);
+ if ((uintptr_t)ret < (uintptr_t)dss_max ||
+ (uintptr_t)dss_next < (uintptr_t)dss_max) {
+ /* Wrap-around. */
+ malloc_mutex_unlock(&dss_mtx);
+ return (NULL);
+ }
+ incr = gap_size + cpad_size + size;
+ dss_prev = sbrk(incr);
+ if (dss_prev == dss_max) {
+ /* Success. */
+ dss_max = dss_next;
+ malloc_mutex_unlock(&dss_mtx);
+ if (cpad_size != 0)
+ chunk_dealloc(cpad, cpad_size, true);
+ *zero = true;
+ return (ret);
+ }
+ } while (dss_prev != (void *)-1);
+ }
+ malloc_mutex_unlock(&dss_mtx);
+
+ return (NULL);
+}
+
+bool
+chunk_in_dss(void *chunk)
+{
+ bool ret;
+
+ cassert(config_dss);
+
+ malloc_mutex_lock(&dss_mtx);
+ if ((uintptr_t)chunk >= (uintptr_t)dss_base
+ && (uintptr_t)chunk < (uintptr_t)dss_max)
+ ret = true;
+ else
+ ret = false;
+ malloc_mutex_unlock(&dss_mtx);
+
+ return (ret);
+}
+
+bool
+chunk_dss_boot(void)
+{
+
+ cassert(config_dss);
+
+ if (malloc_mutex_init(&dss_mtx))
+ return (true);
+ dss_base = sbrk(0);
+ dss_prev = dss_base;
+ dss_max = dss_base;
+
+ return (false);
+}
+
+void
+chunk_dss_prefork(void)
+{
+
+ if (config_dss)
+ malloc_mutex_prefork(&dss_mtx);
+}
+
+void
+chunk_dss_postfork_parent(void)
+{
+
+ if (config_dss)
+ malloc_mutex_postfork_parent(&dss_mtx);
+}
+
+void
+chunk_dss_postfork_child(void)
+{
+
+ if (config_dss)
+ malloc_mutex_postfork_child(&dss_mtx);
+}
+
+/******************************************************************************/
diff --git a/contrib/jemalloc/src/chunk_mmap.c b/contrib/jemalloc/src/chunk_mmap.c
new file mode 100644
index 0000000..e11cc0e
--- /dev/null
+++ b/contrib/jemalloc/src/chunk_mmap.c
@@ -0,0 +1,207 @@
+#define JEMALLOC_CHUNK_MMAP_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+/*
+ * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
+ * potentially avoid some system calls.
+ */
+malloc_tsd_data(static, mmap_unaligned, bool, false)
+malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
+ malloc_tsd_no_cleanup)
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void *pages_map(void *addr, size_t size);
+static void pages_unmap(void *addr, size_t size);
+static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
+ bool unaligned);
+
+/******************************************************************************/
+
+static void *
+pages_map(void *addr, size_t size)
+{
+ void *ret;
+
+ /*
+ * We don't use MAP_FIXED here, because it can cause the *replacement*
+ * of existing mappings, and we only want to create new mappings.
+ */
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
+ -1, 0);
+ assert(ret != NULL);
+
+ if (ret == MAP_FAILED)
+ ret = NULL;
+ else if (addr != NULL && ret != addr) {
+ /*
+ * We succeeded in mapping memory, but not in the right place.
+ */
+ if (munmap(ret, size) == -1) {
+ char buf[BUFERROR_BUF];
+
+ buferror(errno, buf, sizeof(buf));
+ malloc_printf("<jemalloc: Error in munmap(): %s\n",
+ buf);
+ if (opt_abort)
+ abort();
+ }
+ ret = NULL;
+ }
+
+ assert(ret == NULL || (addr == NULL && ret != addr)
+ || (addr != NULL && ret == addr));
+ return (ret);
+}
+
+static void
+pages_unmap(void *addr, size_t size)
+{
+
+ if (munmap(addr, size) == -1) {
+ char buf[BUFERROR_BUF];
+
+ buferror(errno, buf, sizeof(buf));
+ malloc_printf("<jemalloc>: Error in munmap(): %s\n", buf);
+ if (opt_abort)
+ abort();
+ }
+}
+
+static void *
+chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
+{
+ void *ret, *pages;
+ size_t alloc_size, leadsize, trailsize;
+
+ alloc_size = size + alignment - PAGE;
+ /* Beware size_t wrap-around. */
+ if (alloc_size < size)
+ return (NULL);
+ pages = pages_map(NULL, alloc_size);
+ if (pages == NULL)
+ return (NULL);
+ leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
+ (uintptr_t)pages;
+ assert(alloc_size >= leadsize + size);
+ trailsize = alloc_size - leadsize - size;
+ ret = (void *)((uintptr_t)pages + leadsize);
+ if (leadsize != 0) {
+ /* Note that mmap() returned an unaligned mapping. */
+ unaligned = true;
+ pages_unmap(pages, leadsize);
+ }
+ if (trailsize != 0)
+ pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+
+ /*
+ * If mmap() returned an aligned mapping, reset mmap_unaligned so that
+ * the next chunk_alloc_mmap() execution tries the fast allocation
+ * method.
+ */
+ if (unaligned == false && mmap_unaligned_booted) {
+ bool mu = false;
+ mmap_unaligned_tsd_set(&mu);
+ }
+
+ return (ret);
+}
+
+void *
+chunk_alloc_mmap(size_t size, size_t alignment)
+{
+ void *ret;
+
+ /*
+ * Ideally, there would be a way to specify alignment to mmap() (like
+ * NetBSD has), but in the absence of such a feature, we have to work
+ * hard to efficiently create aligned mappings. The reliable, but
+ * slow method is to create a mapping that is over-sized, then trim the
+ * excess. However, that always results in at least one call to
+ * pages_unmap().
+ *
+ * A more optimistic approach is to try mapping precisely the right
+ * amount, then try to append another mapping if alignment is off. In
+ * practice, this works out well as long as the application is not
+ * interleaving mappings via direct mmap() calls. If we do run into a
+ * situation where there is an interleaved mapping and we are unable to
+ * extend an unaligned mapping, our best option is to switch to the
+ * slow method until mmap() returns another aligned mapping. This will
+ * tend to leave a gap in the memory map that is too small to cause
+ * later problems for the optimistic method.
+ *
+ * Another possible confounding factor is address space layout
+ * randomization (ASLR), which causes mmap(2) to disregard the
+ * requested address. mmap_unaligned tracks whether the previous
+ * chunk_alloc_mmap() execution received any unaligned or relocated
+ * mappings, and if so, the current execution will immediately fall
+ * back to the slow method. However, we keep track of whether the fast
+ * method would have succeeded, and if so, we make a note to try the
+ * fast method next time.
+ */
+
+ if (mmap_unaligned_booted && *mmap_unaligned_tsd_get() == false) {
+ size_t offset;
+
+ ret = pages_map(NULL, size);
+ if (ret == NULL)
+ return (NULL);
+
+ offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
+ if (offset != 0) {
+ bool mu = true;
+ mmap_unaligned_tsd_set(&mu);
+ /* Try to extend chunk boundary. */
+ if (pages_map((void *)((uintptr_t)ret + size),
+ chunksize - offset) == NULL) {
+ /*
+ * Extension failed. Clean up, then revert to
+ * the reliable-but-expensive method.
+ */
+ pages_unmap(ret, size);
+ ret = chunk_alloc_mmap_slow(size, alignment,
+ true);
+ } else {
+ /* Clean up unneeded leading space. */
+ pages_unmap(ret, chunksize - offset);
+ ret = (void *)((uintptr_t)ret + (chunksize -
+ offset));
+ }
+ }
+ } else
+ ret = chunk_alloc_mmap_slow(size, alignment, false);
+
+ return (ret);
+}
+
+bool
+chunk_dealloc_mmap(void *chunk, size_t size)
+{
+
+ if (config_munmap)
+ pages_unmap(chunk, size);
+
+ return (config_munmap == false);
+}
+
+bool
+chunk_mmap_boot(void)
+{
+
+ /*
+ * XXX For the non-TLS implementation of tsd, the first access from
+ * each thread causes memory allocation. The result is a bootstrapping
+ * problem for this particular use case, so for now just disable it by
+ * leaving it in an unbooted state.
+ */
+#ifdef JEMALLOC_TLS
+ if (mmap_unaligned_tsd_boot())
+ return (true);
+#endif
+
+ return (false);
+}
diff --git a/contrib/jemalloc/src/ckh.c b/contrib/jemalloc/src/ckh.c
new file mode 100644
index 0000000..742a950
--- /dev/null
+++ b/contrib/jemalloc/src/ckh.c
@@ -0,0 +1,609 @@
+/*
+ *******************************************************************************
+ * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
+ * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
+ * functions are employed. The original cuckoo hashing algorithm was described
+ * in:
+ *
+ * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms
+ * 51(2):122-144.
+ *
+ * Generalization of cuckoo hashing was discussed in:
+ *
+ * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
+ * alternative to traditional hash tables. In Proceedings of the 7th
+ * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
+ * January 2006.
+ *
+ * This implementation uses precisely two hash functions because that is the
+ * fewest that can work, and supporting multiple hashes is an implementation
+ * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
+ * that shows approximate expected maximum load factors for various
+ * configurations:
+ *
+ * | #cells/bucket |
+ * #hashes | 1 | 2 | 4 | 8 |
+ * --------+-------+-------+-------+-------+
+ * 1 | 0.006 | 0.006 | 0.03 | 0.12 |
+ * 2 | 0.49 | 0.86 |>0.93< |>0.96< |
+ * 3 | 0.91 | 0.97 | 0.98 | 0.999 |
+ * 4 | 0.97 | 0.99 | 0.999 | |
+ *
+ * The number of cells per bucket is chosen such that a bucket fits in one cache
+ * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
+ * respectively.
+ *
+ ******************************************************************************/
+#define JEMALLOC_CKH_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static bool ckh_grow(ckh_t *ckh);
+static void ckh_shrink(ckh_t *ckh);
+
+/******************************************************************************/
+
+/*
+ * Search bucket for key and return the cell number if found; SIZE_T_MAX
+ * otherwise.
+ */
+JEMALLOC_INLINE size_t
+ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
+{
+ ckhc_t *cell;
+ unsigned i;
+
+ for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
+ cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
+ if (cell->key != NULL && ckh->keycomp(key, cell->key))
+ return ((bucket << LG_CKH_BUCKET_CELLS) + i);
+ }
+
+ return (SIZE_T_MAX);
+}
+
+/*
+ * Search table for key and return cell number if found; SIZE_T_MAX otherwise.
+ */
+JEMALLOC_INLINE size_t
+ckh_isearch(ckh_t *ckh, const void *key)
+{
+ size_t hash1, hash2, bucket, cell;
+
+ assert(ckh != NULL);
+
+ ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
+
+ /* Search primary bucket. */
+ bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ cell = ckh_bucket_search(ckh, bucket, key);
+ if (cell != SIZE_T_MAX)
+ return (cell);
+
+ /* Search secondary bucket. */
+ bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ cell = ckh_bucket_search(ckh, bucket, key);
+ return (cell);
+}
+
+JEMALLOC_INLINE bool
+ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
+ const void *data)
+{
+ ckhc_t *cell;
+ unsigned offset, i;
+
+ /*
+ * Cycle through the cells in the bucket, starting at a random position.
+ * The randomness avoids worst-case search overhead as buckets fill up.
+ */
+ prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
+ for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
+ cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
+ ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
+ if (cell->key == NULL) {
+ cell->key = key;
+ cell->data = data;
+ ckh->count++;
+ return (false);
+ }
+ }
+
+ return (true);
+}
+
+/*
+ * No space is available in bucket. Randomly evict an item, then try to find an
+ * alternate location for that item. Iteratively repeat this
+ * eviction/relocation procedure until either success or detection of an
+ * eviction/relocation bucket cycle.
+ */
+JEMALLOC_INLINE bool
+ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
+ void const **argdata)
+{
+ const void *key, *data, *tkey, *tdata;
+ ckhc_t *cell;
+ size_t hash1, hash2, bucket, tbucket;
+ unsigned i;
+
+ bucket = argbucket;
+ key = *argkey;
+ data = *argdata;
+ while (true) {
+ /*
+ * Choose a random item within the bucket to evict. This is
+ * critical to correct function, because without (eventually)
+ * evicting all items within a bucket during iteration, it
+ * would be possible to get stuck in an infinite loop if there
+ * were an item for which both hashes indicated the same
+ * bucket.
+ */
+ prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
+ cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
+ assert(cell->key != NULL);
+
+ /* Swap cell->{key,data} and {key,data} (evict). */
+ tkey = cell->key; tdata = cell->data;
+ cell->key = key; cell->data = data;
+ key = tkey; data = tdata;
+
+#ifdef CKH_COUNT
+ ckh->nrelocs++;
+#endif
+
+ /* Find the alternate bucket for the evicted item. */
+ ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
+ tbucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ if (tbucket == bucket) {
+ tbucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ /*
+ * It may be that (tbucket == bucket) still, if the
+ * item's hashes both indicate this bucket. However,
+ * we are guaranteed to eventually escape this bucket
+ * during iteration, assuming pseudo-random item
+ * selection (true randomness would make infinite
+ * looping a remote possibility). The reason we can
+ * never get trapped forever is that there are two
+ * cases:
+ *
+ * 1) This bucket == argbucket, so we will quickly
+ * detect an eviction cycle and terminate.
+ * 2) An item was evicted to this bucket from another,
+ * which means that at least one item in this bucket
+ * has hashes that indicate distinct buckets.
+ */
+ }
+ /* Check for a cycle. */
+ if (tbucket == argbucket) {
+ *argkey = key;
+ *argdata = data;
+ return (true);
+ }
+
+ bucket = tbucket;
+ if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
+ return (false);
+ }
+}
+
+JEMALLOC_INLINE bool
+ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
+{
+ size_t hash1, hash2, bucket;
+ const void *key = *argkey;
+ const void *data = *argdata;
+
+ ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
+
+ /* Try to insert in primary bucket. */
+ bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
+ return (false);
+
+ /* Try to insert in secondary bucket. */
+ bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
+ return (false);
+
+ /*
+ * Try to find a place for this item via iterative eviction/relocation.
+ */
+ return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
+}
+
+/*
+ * Try to rebuild the hash table from scratch by inserting all items from the
+ * old table into the new.
+ */
+JEMALLOC_INLINE bool
+ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
+{
+ size_t count, i, nins;
+ const void *key, *data;
+
+ count = ckh->count;
+ ckh->count = 0;
+ for (i = nins = 0; nins < count; i++) {
+ if (aTab[i].key != NULL) {
+ key = aTab[i].key;
+ data = aTab[i].data;
+ if (ckh_try_insert(ckh, &key, &data)) {
+ ckh->count = count;
+ return (true);
+ }
+ nins++;
+ }
+ }
+
+ return (false);
+}
+
+static bool
+ckh_grow(ckh_t *ckh)
+{
+ bool ret;
+ ckhc_t *tab, *ttab;
+ size_t lg_curcells;
+ unsigned lg_prevbuckets;
+
+#ifdef CKH_COUNT
+ ckh->ngrows++;
+#endif
+
+ /*
+ * It is possible (though unlikely, given well behaved hashes) that the
+ * table will have to be doubled more than once in order to create a
+ * usable table.
+ */
+ lg_prevbuckets = ckh->lg_curbuckets;
+ lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
+ while (true) {
+ size_t usize;
+
+ lg_curcells++;
+ usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+ if (usize == 0) {
+ ret = true;
+ goto label_return;
+ }
+ tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+ if (tab == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ /* Swap in new table. */
+ ttab = ckh->tab;
+ ckh->tab = tab;
+ tab = ttab;
+ ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
+
+ if (ckh_rebuild(ckh, tab) == false) {
+ idalloc(tab);
+ break;
+ }
+
+ /* Rebuilding failed, so back out partially rebuilt table. */
+ idalloc(ckh->tab);
+ ckh->tab = tab;
+ ckh->lg_curbuckets = lg_prevbuckets;
+ }
+
+ ret = false;
+label_return:
+ return (ret);
+}
+
+static void
+ckh_shrink(ckh_t *ckh)
+{
+ ckhc_t *tab, *ttab;
+ size_t lg_curcells, usize;
+ unsigned lg_prevbuckets;
+
+ /*
+ * It is possible (though unlikely, given well behaved hashes) that the
+ * table rebuild will fail.
+ */
+ lg_prevbuckets = ckh->lg_curbuckets;
+ lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
+ usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+ if (usize == 0)
+ return;
+ tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+ if (tab == NULL) {
+ /*
+ * An OOM error isn't worth propagating, since it doesn't
+ * prevent this or future operations from proceeding.
+ */
+ return;
+ }
+ /* Swap in new table. */
+ ttab = ckh->tab;
+ ckh->tab = tab;
+ tab = ttab;
+ ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
+
+ if (ckh_rebuild(ckh, tab) == false) {
+ idalloc(tab);
+#ifdef CKH_COUNT
+ ckh->nshrinks++;
+#endif
+ return;
+ }
+
+ /* Rebuilding failed, so back out partially rebuilt table. */
+ idalloc(ckh->tab);
+ ckh->tab = tab;
+ ckh->lg_curbuckets = lg_prevbuckets;
+#ifdef CKH_COUNT
+ ckh->nshrinkfails++;
+#endif
+}
+
+bool
+ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
+{
+ bool ret;
+ size_t mincells, usize;
+ unsigned lg_mincells;
+
+ assert(minitems > 0);
+ assert(hash != NULL);
+ assert(keycomp != NULL);
+
+#ifdef CKH_COUNT
+ ckh->ngrows = 0;
+ ckh->nshrinks = 0;
+ ckh->nshrinkfails = 0;
+ ckh->ninserts = 0;
+ ckh->nrelocs = 0;
+#endif
+ ckh->prng_state = 42; /* Value doesn't really matter. */
+ ckh->count = 0;
+
+ /*
+ * Find the minimum power of 2 that is large enough to fit aBaseCount
+ * entries. We are using (2+,2) cuckoo hashing, which has an expected
+ * maximum load factor of at least ~0.86, so 0.75 is a conservative load
+ * factor that will typically allow 2^aLgMinItems to fit without ever
+ * growing the table.
+ */
+ assert(LG_CKH_BUCKET_CELLS > 0);
+ mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
+ for (lg_mincells = LG_CKH_BUCKET_CELLS;
+ (ZU(1) << lg_mincells) < mincells;
+ lg_mincells++)
+ ; /* Do nothing. */
+ ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
+ ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
+ ckh->hash = hash;
+ ckh->keycomp = keycomp;
+
+ usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
+ if (usize == 0) {
+ ret = true;
+ goto label_return;
+ }
+ ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+ if (ckh->tab == NULL) {
+ ret = true;
+ goto label_return;
+ }
+
+ ret = false;
+label_return:
+ return (ret);
+}
+
+void
+ckh_delete(ckh_t *ckh)
+{
+
+ assert(ckh != NULL);
+
+#ifdef CKH_VERBOSE
+ malloc_printf(
+ "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
+ " nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
+ " nrelocs: %"PRIu64"\n", __func__, ckh,
+ (unsigned long long)ckh->ngrows,
+ (unsigned long long)ckh->nshrinks,
+ (unsigned long long)ckh->nshrinkfails,
+ (unsigned long long)ckh->ninserts,
+ (unsigned long long)ckh->nrelocs);
+#endif
+
+ idalloc(ckh->tab);
+#ifdef JEMALLOC_DEBUG
+ memset(ckh, 0x5a, sizeof(ckh_t));
+#endif
+}
+
+size_t
+ckh_count(ckh_t *ckh)
+{
+
+ assert(ckh != NULL);
+
+ return (ckh->count);
+}
+
+bool
+ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
+{
+ size_t i, ncells;
+
+ for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
+ LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
+ if (ckh->tab[i].key != NULL) {
+ if (key != NULL)
+ *key = (void *)ckh->tab[i].key;
+ if (data != NULL)
+ *data = (void *)ckh->tab[i].data;
+ *tabind = i + 1;
+ return (false);
+ }
+ }
+
+ return (true);
+}
+
+bool
+ckh_insert(ckh_t *ckh, const void *key, const void *data)
+{
+ bool ret;
+
+ assert(ckh != NULL);
+ assert(ckh_search(ckh, key, NULL, NULL));
+
+#ifdef CKH_COUNT
+ ckh->ninserts++;
+#endif
+
+ while (ckh_try_insert(ckh, &key, &data)) {
+ if (ckh_grow(ckh)) {
+ ret = true;
+ goto label_return;
+ }
+ }
+
+ ret = false;
+label_return:
+ return (ret);
+}
+
+bool
+ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
+{
+ size_t cell;
+
+ assert(ckh != NULL);
+
+ cell = ckh_isearch(ckh, searchkey);
+ if (cell != SIZE_T_MAX) {
+ if (key != NULL)
+ *key = (void *)ckh->tab[cell].key;
+ if (data != NULL)
+ *data = (void *)ckh->tab[cell].data;
+ ckh->tab[cell].key = NULL;
+ ckh->tab[cell].data = NULL; /* Not necessary. */
+
+ ckh->count--;
+ /* Try to halve the table if it is less than 1/4 full. */
+ if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
+ + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
+ > ckh->lg_minbuckets) {
+ /* Ignore error due to OOM. */
+ ckh_shrink(ckh);
+ }
+
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
+{
+ size_t cell;
+
+ assert(ckh != NULL);
+
+ cell = ckh_isearch(ckh, searchkey);
+ if (cell != SIZE_T_MAX) {
+ if (key != NULL)
+ *key = (void *)ckh->tab[cell].key;
+ if (data != NULL)
+ *data = (void *)ckh->tab[cell].data;
+ return (false);
+ }
+
+ return (true);
+}
+
+void
+ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
+{
+ size_t ret1, ret2;
+ uint64_t h;
+
+ assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
+ assert(hash1 != NULL);
+ assert(hash2 != NULL);
+
+ h = hash(key, strlen((const char *)key), UINT64_C(0x94122f335b332aea));
+ if (minbits <= 32) {
+ /*
+ * Avoid doing multiple hashes, since a single hash provides
+ * enough bits.
+ */
+ ret1 = h & ZU(0xffffffffU);
+ ret2 = h >> 32;
+ } else {
+ ret1 = h;
+ ret2 = hash(key, strlen((const char *)key),
+ UINT64_C(0x8432a476666bbc13));
+ }
+
+ *hash1 = ret1;
+ *hash2 = ret2;
+}
+
+bool
+ckh_string_keycomp(const void *k1, const void *k2)
+{
+
+ assert(k1 != NULL);
+ assert(k2 != NULL);
+
+ return (strcmp((char *)k1, (char *)k2) ? false : true);
+}
+
+void
+ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
+ size_t *hash2)
+{
+ size_t ret1, ret2;
+ uint64_t h;
+ union {
+ const void *v;
+ uint64_t i;
+ } u;
+
+ assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
+ assert(hash1 != NULL);
+ assert(hash2 != NULL);
+
+ assert(sizeof(u.v) == sizeof(u.i));
+#if (LG_SIZEOF_PTR != LG_SIZEOF_INT)
+ u.i = 0;
+#endif
+ u.v = key;
+ h = hash(&u.i, sizeof(u.i), UINT64_C(0xd983396e68886082));
+ if (minbits <= 32) {
+ /*
+ * Avoid doing multiple hashes, since a single hash provides
+ * enough bits.
+ */
+ ret1 = h & ZU(0xffffffffU);
+ ret2 = h >> 32;
+ } else {
+ assert(SIZEOF_PTR == 8);
+ ret1 = h;
+ ret2 = hash(&u.i, sizeof(u.i), UINT64_C(0x5e2be9aff8709a5d));
+ }
+
+ *hash1 = ret1;
+ *hash2 = ret2;
+}
+
+bool
+ckh_pointer_keycomp(const void *k1, const void *k2)
+{
+
+ return ((k1 == k2) ? true : false);
+}
diff --git a/contrib/jemalloc/src/ctl.c b/contrib/jemalloc/src/ctl.c
new file mode 100644
index 0000000..a6a02cc
--- /dev/null
+++ b/contrib/jemalloc/src/ctl.c
@@ -0,0 +1,1385 @@
+#define JEMALLOC_CTL_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+/*
+ * ctl_mtx protects the following:
+ * - ctl_stats.*
+ * - opt_prof_active
+ */
+static malloc_mutex_t ctl_mtx;
+static bool ctl_initialized;
+static uint64_t ctl_epoch;
+static ctl_stats_t ctl_stats;
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+#define CTL_PROTO(n) \
+static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen);
+
+#define INDEX_PROTO(n) \
+const ctl_node_t *n##_index(const size_t *mib, size_t miblen, \
+ size_t i);
+
+static bool ctl_arena_init(ctl_arena_stats_t *astats);
+static void ctl_arena_clear(ctl_arena_stats_t *astats);
+static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
+ arena_t *arena);
+static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
+ ctl_arena_stats_t *astats);
+static void ctl_arena_refresh(arena_t *arena, unsigned i);
+static void ctl_refresh(void);
+static bool ctl_init(void);
+static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
+ size_t *mibp, size_t *depthp);
+
+CTL_PROTO(version)
+CTL_PROTO(epoch)
+CTL_PROTO(thread_tcache_enabled)
+CTL_PROTO(thread_tcache_flush)
+CTL_PROTO(thread_arena)
+CTL_PROTO(thread_allocated)
+CTL_PROTO(thread_allocatedp)
+CTL_PROTO(thread_deallocated)
+CTL_PROTO(thread_deallocatedp)
+CTL_PROTO(config_debug)
+CTL_PROTO(config_dss)
+CTL_PROTO(config_fill)
+CTL_PROTO(config_lazy_lock)
+CTL_PROTO(config_munmap)
+CTL_PROTO(config_prof)
+CTL_PROTO(config_prof_libgcc)
+CTL_PROTO(config_prof_libunwind)
+CTL_PROTO(config_stats)
+CTL_PROTO(config_tcache)
+CTL_PROTO(config_tls)
+CTL_PROTO(config_utrace)
+CTL_PROTO(config_valgrind)
+CTL_PROTO(config_xmalloc)
+CTL_PROTO(opt_abort)
+CTL_PROTO(opt_lg_chunk)
+CTL_PROTO(opt_narenas)
+CTL_PROTO(opt_lg_dirty_mult)
+CTL_PROTO(opt_stats_print)
+CTL_PROTO(opt_junk)
+CTL_PROTO(opt_zero)
+CTL_PROTO(opt_quarantine)
+CTL_PROTO(opt_redzone)
+CTL_PROTO(opt_utrace)
+CTL_PROTO(opt_valgrind)
+CTL_PROTO(opt_xmalloc)
+CTL_PROTO(opt_tcache)
+CTL_PROTO(opt_lg_tcache_max)
+CTL_PROTO(opt_prof)
+CTL_PROTO(opt_prof_prefix)
+CTL_PROTO(opt_prof_active)
+CTL_PROTO(opt_lg_prof_sample)
+CTL_PROTO(opt_lg_prof_interval)
+CTL_PROTO(opt_prof_gdump)
+CTL_PROTO(opt_prof_leak)
+CTL_PROTO(opt_prof_accum)
+CTL_PROTO(arenas_bin_i_size)
+CTL_PROTO(arenas_bin_i_nregs)
+CTL_PROTO(arenas_bin_i_run_size)
+INDEX_PROTO(arenas_bin_i)
+CTL_PROTO(arenas_lrun_i_size)
+INDEX_PROTO(arenas_lrun_i)
+CTL_PROTO(arenas_narenas)
+CTL_PROTO(arenas_initialized)
+CTL_PROTO(arenas_quantum)
+CTL_PROTO(arenas_page)
+CTL_PROTO(arenas_tcache_max)
+CTL_PROTO(arenas_nbins)
+CTL_PROTO(arenas_nhbins)
+CTL_PROTO(arenas_nlruns)
+CTL_PROTO(arenas_purge)
+CTL_PROTO(prof_active)
+CTL_PROTO(prof_dump)
+CTL_PROTO(prof_interval)
+CTL_PROTO(stats_chunks_current)
+CTL_PROTO(stats_chunks_total)
+CTL_PROTO(stats_chunks_high)
+CTL_PROTO(stats_huge_allocated)
+CTL_PROTO(stats_huge_nmalloc)
+CTL_PROTO(stats_huge_ndalloc)
+CTL_PROTO(stats_arenas_i_small_allocated)
+CTL_PROTO(stats_arenas_i_small_nmalloc)
+CTL_PROTO(stats_arenas_i_small_ndalloc)
+CTL_PROTO(stats_arenas_i_small_nrequests)
+CTL_PROTO(stats_arenas_i_large_allocated)
+CTL_PROTO(stats_arenas_i_large_nmalloc)
+CTL_PROTO(stats_arenas_i_large_ndalloc)
+CTL_PROTO(stats_arenas_i_large_nrequests)
+CTL_PROTO(stats_arenas_i_bins_j_allocated)
+CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
+CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
+CTL_PROTO(stats_arenas_i_bins_j_nrequests)
+CTL_PROTO(stats_arenas_i_bins_j_nfills)
+CTL_PROTO(stats_arenas_i_bins_j_nflushes)
+CTL_PROTO(stats_arenas_i_bins_j_nruns)
+CTL_PROTO(stats_arenas_i_bins_j_nreruns)
+CTL_PROTO(stats_arenas_i_bins_j_curruns)
+INDEX_PROTO(stats_arenas_i_bins_j)
+CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
+CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
+CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
+CTL_PROTO(stats_arenas_i_lruns_j_curruns)
+INDEX_PROTO(stats_arenas_i_lruns_j)
+CTL_PROTO(stats_arenas_i_nthreads)
+CTL_PROTO(stats_arenas_i_pactive)
+CTL_PROTO(stats_arenas_i_pdirty)
+CTL_PROTO(stats_arenas_i_mapped)
+CTL_PROTO(stats_arenas_i_npurge)
+CTL_PROTO(stats_arenas_i_nmadvise)
+CTL_PROTO(stats_arenas_i_purged)
+INDEX_PROTO(stats_arenas_i)
+CTL_PROTO(stats_cactive)
+CTL_PROTO(stats_allocated)
+CTL_PROTO(stats_active)
+CTL_PROTO(stats_mapped)
+
+/******************************************************************************/
+/* mallctl tree. */
+
+/* Maximum tree depth. */
+#define CTL_MAX_DEPTH 6
+
+#define NAME(n) true, {.named = {n
+#define CHILD(c) sizeof(c##_node) / sizeof(ctl_node_t), c##_node}}, NULL
+#define CTL(c) 0, NULL}}, c##_ctl
+
+/*
+ * Only handles internal indexed nodes, since there are currently no external
+ * ones.
+ */
+#define INDEX(i) false, {.indexed = {i##_index}}, NULL
+
+static const ctl_node_t tcache_node[] = {
+ {NAME("enabled"), CTL(thread_tcache_enabled)},
+ {NAME("flush"), CTL(thread_tcache_flush)}
+};
+
+static const ctl_node_t thread_node[] = {
+ {NAME("arena"), CTL(thread_arena)},
+ {NAME("allocated"), CTL(thread_allocated)},
+ {NAME("allocatedp"), CTL(thread_allocatedp)},
+ {NAME("deallocated"), CTL(thread_deallocated)},
+ {NAME("deallocatedp"), CTL(thread_deallocatedp)},
+ {NAME("tcache"), CHILD(tcache)}
+};
+
+static const ctl_node_t config_node[] = {
+ {NAME("debug"), CTL(config_debug)},
+ {NAME("dss"), CTL(config_dss)},
+ {NAME("fill"), CTL(config_fill)},
+ {NAME("lazy_lock"), CTL(config_lazy_lock)},
+ {NAME("munmap"), CTL(config_munmap)},
+ {NAME("prof"), CTL(config_prof)},
+ {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
+ {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
+ {NAME("stats"), CTL(config_stats)},
+ {NAME("tcache"), CTL(config_tcache)},
+ {NAME("tls"), CTL(config_tls)},
+ {NAME("utrace"), CTL(config_utrace)},
+ {NAME("valgrind"), CTL(config_valgrind)},
+ {NAME("xmalloc"), CTL(config_xmalloc)}
+};
+
+static const ctl_node_t opt_node[] = {
+ {NAME("abort"), CTL(opt_abort)},
+ {NAME("lg_chunk"), CTL(opt_lg_chunk)},
+ {NAME("narenas"), CTL(opt_narenas)},
+ {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
+ {NAME("stats_print"), CTL(opt_stats_print)},
+ {NAME("junk"), CTL(opt_junk)},
+ {NAME("zero"), CTL(opt_zero)},
+ {NAME("quarantine"), CTL(opt_quarantine)},
+ {NAME("redzone"), CTL(opt_redzone)},
+ {NAME("utrace"), CTL(opt_utrace)},
+ {NAME("valgrind"), CTL(opt_valgrind)},
+ {NAME("xmalloc"), CTL(opt_xmalloc)},
+ {NAME("tcache"), CTL(opt_tcache)},
+ {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
+ {NAME("prof"), CTL(opt_prof)},
+ {NAME("prof_prefix"), CTL(opt_prof_prefix)},
+ {NAME("prof_active"), CTL(opt_prof_active)},
+ {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
+ {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
+ {NAME("prof_gdump"), CTL(opt_prof_gdump)},
+ {NAME("prof_leak"), CTL(opt_prof_leak)},
+ {NAME("prof_accum"), CTL(opt_prof_accum)}
+};
+
+static const ctl_node_t arenas_bin_i_node[] = {
+ {NAME("size"), CTL(arenas_bin_i_size)},
+ {NAME("nregs"), CTL(arenas_bin_i_nregs)},
+ {NAME("run_size"), CTL(arenas_bin_i_run_size)}
+};
+static const ctl_node_t super_arenas_bin_i_node[] = {
+ {NAME(""), CHILD(arenas_bin_i)}
+};
+
+static const ctl_node_t arenas_bin_node[] = {
+ {INDEX(arenas_bin_i)}
+};
+
+static const ctl_node_t arenas_lrun_i_node[] = {
+ {NAME("size"), CTL(arenas_lrun_i_size)}
+};
+static const ctl_node_t super_arenas_lrun_i_node[] = {
+ {NAME(""), CHILD(arenas_lrun_i)}
+};
+
+static const ctl_node_t arenas_lrun_node[] = {
+ {INDEX(arenas_lrun_i)}
+};
+
+static const ctl_node_t arenas_node[] = {
+ {NAME("narenas"), CTL(arenas_narenas)},
+ {NAME("initialized"), CTL(arenas_initialized)},
+ {NAME("quantum"), CTL(arenas_quantum)},
+ {NAME("page"), CTL(arenas_page)},
+ {NAME("tcache_max"), CTL(arenas_tcache_max)},
+ {NAME("nbins"), CTL(arenas_nbins)},
+ {NAME("nhbins"), CTL(arenas_nhbins)},
+ {NAME("bin"), CHILD(arenas_bin)},
+ {NAME("nlruns"), CTL(arenas_nlruns)},
+ {NAME("lrun"), CHILD(arenas_lrun)},
+ {NAME("purge"), CTL(arenas_purge)}
+};
+
+static const ctl_node_t prof_node[] = {
+ {NAME("active"), CTL(prof_active)},
+ {NAME("dump"), CTL(prof_dump)},
+ {NAME("interval"), CTL(prof_interval)}
+};
+
+static const ctl_node_t stats_chunks_node[] = {
+ {NAME("current"), CTL(stats_chunks_current)},
+ {NAME("total"), CTL(stats_chunks_total)},
+ {NAME("high"), CTL(stats_chunks_high)}
+};
+
+static const ctl_node_t stats_huge_node[] = {
+ {NAME("allocated"), CTL(stats_huge_allocated)},
+ {NAME("nmalloc"), CTL(stats_huge_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_huge_ndalloc)}
+};
+
+static const ctl_node_t stats_arenas_i_small_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
+};
+
+static const ctl_node_t stats_arenas_i_large_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
+};
+
+static const ctl_node_t stats_arenas_i_bins_j_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
+ {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
+ {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
+ {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
+ {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
+ {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
+};
+static const ctl_node_t super_stats_arenas_i_bins_j_node[] = {
+ {NAME(""), CHILD(stats_arenas_i_bins_j)}
+};
+
+static const ctl_node_t stats_arenas_i_bins_node[] = {
+ {INDEX(stats_arenas_i_bins_j)}
+};
+
+static const ctl_node_t stats_arenas_i_lruns_j_node[] = {
+ {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
+ {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
+};
+static const ctl_node_t super_stats_arenas_i_lruns_j_node[] = {
+ {NAME(""), CHILD(stats_arenas_i_lruns_j)}
+};
+
+static const ctl_node_t stats_arenas_i_lruns_node[] = {
+ {INDEX(stats_arenas_i_lruns_j)}
+};
+
+static const ctl_node_t stats_arenas_i_node[] = {
+ {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
+ {NAME("pactive"), CTL(stats_arenas_i_pactive)},
+ {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
+ {NAME("mapped"), CTL(stats_arenas_i_mapped)},
+ {NAME("npurge"), CTL(stats_arenas_i_npurge)},
+ {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
+ {NAME("purged"), CTL(stats_arenas_i_purged)},
+ {NAME("small"), CHILD(stats_arenas_i_small)},
+ {NAME("large"), CHILD(stats_arenas_i_large)},
+ {NAME("bins"), CHILD(stats_arenas_i_bins)},
+ {NAME("lruns"), CHILD(stats_arenas_i_lruns)}
+};
+static const ctl_node_t super_stats_arenas_i_node[] = {
+ {NAME(""), CHILD(stats_arenas_i)}
+};
+
+static const ctl_node_t stats_arenas_node[] = {
+ {INDEX(stats_arenas_i)}
+};
+
+static const ctl_node_t stats_node[] = {
+ {NAME("cactive"), CTL(stats_cactive)},
+ {NAME("allocated"), CTL(stats_allocated)},
+ {NAME("active"), CTL(stats_active)},
+ {NAME("mapped"), CTL(stats_mapped)},
+ {NAME("chunks"), CHILD(stats_chunks)},
+ {NAME("huge"), CHILD(stats_huge)},
+ {NAME("arenas"), CHILD(stats_arenas)}
+};
+
+static const ctl_node_t root_node[] = {
+ {NAME("version"), CTL(version)},
+ {NAME("epoch"), CTL(epoch)},
+ {NAME("thread"), CHILD(thread)},
+ {NAME("config"), CHILD(config)},
+ {NAME("opt"), CHILD(opt)},
+ {NAME("arenas"), CHILD(arenas)},
+ {NAME("prof"), CHILD(prof)},
+ {NAME("stats"), CHILD(stats)}
+};
+static const ctl_node_t super_root_node[] = {
+ {NAME(""), CHILD(root)}
+};
+
+#undef NAME
+#undef CHILD
+#undef CTL
+#undef INDEX
+
+/******************************************************************************/
+
+static bool
+ctl_arena_init(ctl_arena_stats_t *astats)
+{
+
+ if (astats->lstats == NULL) {
+ astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
+ sizeof(malloc_large_stats_t));
+ if (astats->lstats == NULL)
+ return (true);
+ }
+
+ return (false);
+}
+
+static void
+ctl_arena_clear(ctl_arena_stats_t *astats)
+{
+
+ astats->pactive = 0;
+ astats->pdirty = 0;
+ if (config_stats) {
+ memset(&astats->astats, 0, sizeof(arena_stats_t));
+ astats->allocated_small = 0;
+ astats->nmalloc_small = 0;
+ astats->ndalloc_small = 0;
+ astats->nrequests_small = 0;
+ memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
+ memset(astats->lstats, 0, nlclasses *
+ sizeof(malloc_large_stats_t));
+ }
+}
+
+static void
+ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
+{
+ unsigned i;
+
+ arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty,
+ &cstats->astats, cstats->bstats, cstats->lstats);
+
+ for (i = 0; i < NBINS; i++) {
+ cstats->allocated_small += cstats->bstats[i].allocated;
+ cstats->nmalloc_small += cstats->bstats[i].nmalloc;
+ cstats->ndalloc_small += cstats->bstats[i].ndalloc;
+ cstats->nrequests_small += cstats->bstats[i].nrequests;
+ }
+}
+
+static void
+ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
+{
+ unsigned i;
+
+ sstats->pactive += astats->pactive;
+ sstats->pdirty += astats->pdirty;
+
+ sstats->astats.mapped += astats->astats.mapped;
+ sstats->astats.npurge += astats->astats.npurge;
+ sstats->astats.nmadvise += astats->astats.nmadvise;
+ sstats->astats.purged += astats->astats.purged;
+
+ sstats->allocated_small += astats->allocated_small;
+ sstats->nmalloc_small += astats->nmalloc_small;
+ sstats->ndalloc_small += astats->ndalloc_small;
+ sstats->nrequests_small += astats->nrequests_small;
+
+ sstats->astats.allocated_large += astats->astats.allocated_large;
+ sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
+ sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
+ sstats->astats.nrequests_large += astats->astats.nrequests_large;
+
+ for (i = 0; i < nlclasses; i++) {
+ sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
+ sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
+ sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
+ sstats->lstats[i].curruns += astats->lstats[i].curruns;
+ }
+
+ for (i = 0; i < NBINS; i++) {
+ sstats->bstats[i].allocated += astats->bstats[i].allocated;
+ sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
+ sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
+ sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
+ if (config_tcache) {
+ sstats->bstats[i].nfills += astats->bstats[i].nfills;
+ sstats->bstats[i].nflushes +=
+ astats->bstats[i].nflushes;
+ }
+ sstats->bstats[i].nruns += astats->bstats[i].nruns;
+ sstats->bstats[i].reruns += astats->bstats[i].reruns;
+ sstats->bstats[i].curruns += astats->bstats[i].curruns;
+ }
+}
+
+static void
+ctl_arena_refresh(arena_t *arena, unsigned i)
+{
+ ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
+ ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas];
+
+ ctl_arena_clear(astats);
+
+ sstats->nthreads += astats->nthreads;
+ if (config_stats) {
+ ctl_arena_stats_amerge(astats, arena);
+ /* Merge into sum stats as well. */
+ ctl_arena_stats_smerge(sstats, astats);
+ } else {
+ astats->pactive += arena->nactive;
+ astats->pdirty += arena->ndirty;
+ /* Merge into sum stats as well. */
+ sstats->pactive += arena->nactive;
+ sstats->pdirty += arena->ndirty;
+ }
+}
+
+static void
+ctl_refresh(void)
+{
+ unsigned i;
+ arena_t *tarenas[narenas];
+
+ if (config_stats) {
+ malloc_mutex_lock(&chunks_mtx);
+ ctl_stats.chunks.current = stats_chunks.curchunks;
+ ctl_stats.chunks.total = stats_chunks.nchunks;
+ ctl_stats.chunks.high = stats_chunks.highchunks;
+ malloc_mutex_unlock(&chunks_mtx);
+
+ malloc_mutex_lock(&huge_mtx);
+ ctl_stats.huge.allocated = huge_allocated;
+ ctl_stats.huge.nmalloc = huge_nmalloc;
+ ctl_stats.huge.ndalloc = huge_ndalloc;
+ malloc_mutex_unlock(&huge_mtx);
+ }
+
+ /*
+ * Clear sum stats, since they will be merged into by
+ * ctl_arena_refresh().
+ */
+ ctl_stats.arenas[narenas].nthreads = 0;
+ ctl_arena_clear(&ctl_stats.arenas[narenas]);
+
+ malloc_mutex_lock(&arenas_lock);
+ memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
+ for (i = 0; i < narenas; i++) {
+ if (arenas[i] != NULL)
+ ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
+ else
+ ctl_stats.arenas[i].nthreads = 0;
+ }
+ malloc_mutex_unlock(&arenas_lock);
+ for (i = 0; i < narenas; i++) {
+ bool initialized = (tarenas[i] != NULL);
+
+ ctl_stats.arenas[i].initialized = initialized;
+ if (initialized)
+ ctl_arena_refresh(tarenas[i], i);
+ }
+
+ if (config_stats) {
+ ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small
+ + ctl_stats.arenas[narenas].astats.allocated_large
+ + ctl_stats.huge.allocated;
+ ctl_stats.active = (ctl_stats.arenas[narenas].pactive <<
+ LG_PAGE) + ctl_stats.huge.allocated;
+ ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
+ }
+
+ ctl_epoch++;
+}
+
+static bool
+ctl_init(void)
+{
+ bool ret;
+
+ malloc_mutex_lock(&ctl_mtx);
+ if (ctl_initialized == false) {
+ /*
+ * Allocate space for one extra arena stats element, which
+ * contains summed stats across all arenas.
+ */
+ ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
+ (narenas + 1) * sizeof(ctl_arena_stats_t));
+ if (ctl_stats.arenas == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ memset(ctl_stats.arenas, 0, (narenas + 1) *
+ sizeof(ctl_arena_stats_t));
+
+ /*
+ * Initialize all stats structures, regardless of whether they
+ * ever get used. Lazy initialization would allow errors to
+ * cause inconsistent state to be viewable by the application.
+ */
+ if (config_stats) {
+ unsigned i;
+ for (i = 0; i <= narenas; i++) {
+ if (ctl_arena_init(&ctl_stats.arenas[i])) {
+ ret = true;
+ goto label_return;
+ }
+ }
+ }
+ ctl_stats.arenas[narenas].initialized = true;
+
+ ctl_epoch = 0;
+ ctl_refresh();
+ ctl_initialized = true;
+ }
+
+ ret = false;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+static int
+ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
+ size_t *depthp)
+{
+ int ret;
+ const char *elm, *tdot, *dot;
+ size_t elen, i, j;
+ const ctl_node_t *node;
+
+ elm = name;
+ /* Equivalent to strchrnul(). */
+ dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
+ elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
+ if (elen == 0) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ node = super_root_node;
+ for (i = 0; i < *depthp; i++) {
+ assert(node->named);
+ assert(node->u.named.nchildren > 0);
+ if (node->u.named.children[0].named) {
+ const ctl_node_t *pnode = node;
+
+ /* Children are named. */
+ for (j = 0; j < node->u.named.nchildren; j++) {
+ const ctl_node_t *child =
+ &node->u.named.children[j];
+ if (strlen(child->u.named.name) == elen
+ && strncmp(elm, child->u.named.name,
+ elen) == 0) {
+ node = child;
+ if (nodesp != NULL)
+ nodesp[i] = node;
+ mibp[i] = j;
+ break;
+ }
+ }
+ if (node == pnode) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ } else {
+ uintmax_t index;
+ const ctl_node_t *inode;
+
+ /* Children are indexed. */
+ index = malloc_strtoumax(elm, NULL, 10);
+ if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ inode = &node->u.named.children[0];
+ node = inode->u.indexed.index(mibp, *depthp,
+ (size_t)index);
+ if (node == NULL) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ if (nodesp != NULL)
+ nodesp[i] = node;
+ mibp[i] = (size_t)index;
+ }
+
+ if (node->ctl != NULL) {
+ /* Terminal node. */
+ if (*dot != '\0') {
+ /*
+ * The name contains more elements than are
+ * in this path through the tree.
+ */
+ ret = ENOENT;
+ goto label_return;
+ }
+ /* Complete lookup successful. */
+ *depthp = i + 1;
+ break;
+ }
+
+ /* Update elm. */
+ if (*dot == '\0') {
+ /* No more elements. */
+ ret = ENOENT;
+ goto label_return;
+ }
+ elm = &dot[1];
+ dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
+ strchr(elm, '\0');
+ elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+int
+ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen)
+{
+ int ret;
+ size_t depth;
+ ctl_node_t const *nodes[CTL_MAX_DEPTH];
+ size_t mib[CTL_MAX_DEPTH];
+
+ if (ctl_initialized == false && ctl_init()) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ depth = CTL_MAX_DEPTH;
+ ret = ctl_lookup(name, nodes, mib, &depth);
+ if (ret != 0)
+ goto label_return;
+
+ if (nodes[depth-1]->ctl == NULL) {
+ /* The name refers to a partial path through the ctl tree. */
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ ret = nodes[depth-1]->ctl(mib, depth, oldp, oldlenp, newp, newlen);
+label_return:
+ return(ret);
+}
+
+int
+ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
+{
+ int ret;
+
+ if (ctl_initialized == false && ctl_init()) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ ret = ctl_lookup(name, NULL, mibp, miblenp);
+label_return:
+ return(ret);
+}
+
+int
+ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ const ctl_node_t *node;
+ size_t i;
+
+ if (ctl_initialized == false && ctl_init()) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ /* Iterate down the tree. */
+ node = super_root_node;
+ for (i = 0; i < miblen; i++) {
+ if (node->u.named.children[0].named) {
+ /* Children are named. */
+ if (node->u.named.nchildren <= mib[i]) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ node = &node->u.named.children[mib[i]];
+ } else {
+ const ctl_node_t *inode;
+
+ /* Indexed element. */
+ inode = &node->u.named.children[0];
+ node = inode->u.indexed.index(mib, miblen, mib[i]);
+ if (node == NULL) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ }
+ }
+
+ /* Call the ctl function. */
+ if (node->ctl == NULL) {
+ /* Partial MIB. */
+ ret = ENOENT;
+ goto label_return;
+ }
+ ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
+
+label_return:
+ return(ret);
+}
+
+bool
+ctl_boot(void)
+{
+
+ if (malloc_mutex_init(&ctl_mtx))
+ return (true);
+
+ ctl_initialized = false;
+
+ return (false);
+}
+
+/******************************************************************************/
+/* *_ctl() functions. */
+
+#define READONLY() do { \
+ if (newp != NULL || newlen != 0) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
+#define WRITEONLY() do { \
+ if (oldp != NULL || oldlenp != NULL) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
+#define VOID() do { \
+ READONLY(); \
+ WRITEONLY(); \
+} while (0)
+
+#define READ(v, t) do { \
+ if (oldp != NULL && oldlenp != NULL) { \
+ if (*oldlenp != sizeof(t)) { \
+ size_t copylen = (sizeof(t) <= *oldlenp) \
+ ? sizeof(t) : *oldlenp; \
+ memcpy(oldp, (void *)&v, copylen); \
+ ret = EINVAL; \
+ goto label_return; \
+ } else \
+ *(t *)oldp = v; \
+ } \
+} while (0)
+
+#define WRITE(v, t) do { \
+ if (newp != NULL) { \
+ if (newlen != sizeof(t)) { \
+ ret = EINVAL; \
+ goto label_return; \
+ } \
+ v = *(t *)newp; \
+ } \
+} while (0)
+
+/*
+ * There's a lot of code duplication in the following macros due to limitations
+ * in how nested cpp macros are expanded.
+ */
+#define CTL_RO_CLGEN(c, l, n, v, t) \
+static int \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if ((c) == false) \
+ return (ENOENT); \
+ if (l) \
+ malloc_mutex_lock(&ctl_mtx); \
+ READONLY(); \
+ oldval = v; \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ if (l) \
+ malloc_mutex_unlock(&ctl_mtx); \
+ return (ret); \
+}
+
+#define CTL_RO_CGEN(c, n, v, t) \
+static int \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if ((c) == false) \
+ return (ENOENT); \
+ malloc_mutex_lock(&ctl_mtx); \
+ READONLY(); \
+ oldval = v; \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ malloc_mutex_unlock(&ctl_mtx); \
+ return (ret); \
+}
+
+#define CTL_RO_GEN(n, v, t) \
+static int \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ malloc_mutex_lock(&ctl_mtx); \
+ READONLY(); \
+ oldval = v; \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ malloc_mutex_unlock(&ctl_mtx); \
+ return (ret); \
+}
+
+/*
+ * ctl_mtx is not acquired, under the assumption that no pertinent data will
+ * mutate during the call.
+ */
+#define CTL_RO_NL_CGEN(c, n, v, t) \
+static int \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if ((c) == false) \
+ return (ENOENT); \
+ READONLY(); \
+ oldval = v; \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ return (ret); \
+}
+
+#define CTL_RO_NL_GEN(n, v, t) \
+static int \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ READONLY(); \
+ oldval = v; \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ return (ret); \
+}
+
+#define CTL_RO_BOOL_CONFIG_GEN(n) \
+static int \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
+{ \
+ int ret; \
+ bool oldval; \
+ \
+ READONLY(); \
+ oldval = n; \
+ READ(oldval, bool); \
+ \
+ ret = 0; \
+label_return: \
+ return (ret); \
+}
+
+CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
+
+static int
+epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ uint64_t newval;
+
+ malloc_mutex_lock(&ctl_mtx);
+ newval = 0;
+ WRITE(newval, uint64_t);
+ if (newval != 0)
+ ctl_refresh();
+ READ(ctl_epoch, uint64_t);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+static int
+thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (config_tcache == false)
+ return (ENOENT);
+
+ oldval = tcache_enabled_get();
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ tcache_enabled_set(*(bool *)newp);
+ }
+ READ(oldval, bool);
+
+label_return:
+ ret = 0;
+ return (ret);
+}
+
+static int
+thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (config_tcache == false)
+ return (ENOENT);
+
+ VOID();
+
+ tcache_flush();
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ unsigned newind, oldind;
+
+ newind = oldind = choose_arena(NULL)->ind;
+ WRITE(newind, unsigned);
+ READ(oldind, unsigned);
+ if (newind != oldind) {
+ arena_t *arena;
+
+ if (newind >= narenas) {
+ /* New arena index is out of range. */
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ /* Initialize arena if necessary. */
+ malloc_mutex_lock(&arenas_lock);
+ if ((arena = arenas[newind]) == NULL && (arena =
+ arenas_extend(newind)) == NULL) {
+ malloc_mutex_unlock(&arenas_lock);
+ ret = EAGAIN;
+ goto label_return;
+ }
+ assert(arena == arenas[newind]);
+ arenas[oldind]->nthreads--;
+ arenas[newind]->nthreads++;
+ malloc_mutex_unlock(&arenas_lock);
+
+ /* Set new arena association. */
+ if (config_tcache) {
+ tcache_t *tcache;
+ if ((uintptr_t)(tcache = *tcache_tsd_get()) >
+ (uintptr_t)TCACHE_STATE_MAX) {
+ tcache_arena_dissociate(tcache);
+ tcache_arena_associate(tcache, arena);
+ }
+ }
+ arenas_tsd_set(&arena);
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+CTL_RO_NL_CGEN(config_stats, thread_allocated,
+ thread_allocated_tsd_get()->allocated, uint64_t)
+CTL_RO_NL_CGEN(config_stats, thread_allocatedp,
+ &thread_allocated_tsd_get()->allocated, uint64_t *)
+CTL_RO_NL_CGEN(config_stats, thread_deallocated,
+ thread_allocated_tsd_get()->deallocated, uint64_t)
+CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
+ &thread_allocated_tsd_get()->deallocated, uint64_t *)
+
+/******************************************************************************/
+
+CTL_RO_BOOL_CONFIG_GEN(config_debug)
+CTL_RO_BOOL_CONFIG_GEN(config_dss)
+CTL_RO_BOOL_CONFIG_GEN(config_fill)
+CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
+CTL_RO_BOOL_CONFIG_GEN(config_munmap)
+CTL_RO_BOOL_CONFIG_GEN(config_prof)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
+CTL_RO_BOOL_CONFIG_GEN(config_stats)
+CTL_RO_BOOL_CONFIG_GEN(config_tcache)
+CTL_RO_BOOL_CONFIG_GEN(config_tls)
+CTL_RO_BOOL_CONFIG_GEN(config_utrace)
+CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
+CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
+
+/******************************************************************************/
+
+CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
+CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
+CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
+CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
+CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
+CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
+CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
+CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
+CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
+CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
+CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
+CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
+CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
+
+/******************************************************************************/
+
+CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
+CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
+CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
+const ctl_node_t *
+arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
+{
+
+ if (i > NBINS)
+ return (NULL);
+ return (super_arenas_bin_i_node);
+}
+
+CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
+const ctl_node_t *
+arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
+{
+
+ if (i > nlclasses)
+ return (NULL);
+ return (super_arenas_lrun_i_node);
+}
+
+CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned)
+
+static int
+arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned nread, i;
+
+ malloc_mutex_lock(&ctl_mtx);
+ READONLY();
+ if (*oldlenp != narenas * sizeof(bool)) {
+ ret = EINVAL;
+ nread = (*oldlenp < narenas * sizeof(bool))
+ ? (*oldlenp / sizeof(bool)) : narenas;
+ } else {
+ ret = 0;
+ nread = narenas;
+ }
+
+ for (i = 0; i < nread; i++)
+ ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
+
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
+CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
+CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
+CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
+CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
+CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
+
+static int
+arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena;
+
+ WRITEONLY();
+ arena = UINT_MAX;
+ WRITE(arena, unsigned);
+ if (newp != NULL && arena >= narenas) {
+ ret = EFAULT;
+ goto label_return;
+ } else {
+ arena_t *tarenas[narenas];
+
+ malloc_mutex_lock(&arenas_lock);
+ memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
+ malloc_mutex_unlock(&arenas_lock);
+
+ if (arena == UINT_MAX) {
+ unsigned i;
+ for (i = 0; i < narenas; i++) {
+ if (tarenas[i] != NULL)
+ arena_purge_all(tarenas[i]);
+ }
+ } else {
+ assert(arena < narenas);
+ if (tarenas[arena] != NULL)
+ arena_purge_all(tarenas[arena]);
+ }
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+/******************************************************************************/
+
+static int
+prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (config_prof == false)
+ return (ENOENT);
+
+ malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
+ oldval = opt_prof_active;
+ if (newp != NULL) {
+ /*
+ * The memory barriers will tend to make opt_prof_active
+ * propagate faster on systems with weak memory ordering.
+ */
+ mb_write();
+ WRITE(opt_prof_active, bool);
+ mb_write();
+ }
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+static int
+prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ const char *filename = NULL;
+
+ if (config_prof == false)
+ return (ENOENT);
+
+ WRITEONLY();
+ WRITE(filename, const char *);
+
+ if (prof_mdump(filename)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
+
+/******************************************************************************/
+
+CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
+ size_t)
+CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
+CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
+CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
+ ctl_stats.arenas[mib[2]].allocated_small, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
+ ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
+ ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
+ ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
+ ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
+ ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
+ ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
+ ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
+CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
+CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
+
+const ctl_node_t *
+stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
+{
+
+ if (j > NBINS)
+ return (NULL);
+ return (super_stats_arenas_i_bins_j_node);
+}
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
+ ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
+ ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
+ ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
+ ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
+
+const ctl_node_t *
+stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
+{
+
+ if (j > nlclasses)
+ return (NULL);
+ return (super_stats_arenas_i_lruns_j_node);
+}
+
+CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
+CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
+CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
+ ctl_stats.arenas[mib[2]].astats.mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
+ ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
+ ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
+ ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
+
+const ctl_node_t *
+stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
+{
+ const ctl_node_t * ret;
+
+ malloc_mutex_lock(&ctl_mtx);
+ if (ctl_stats.arenas[i].initialized == false) {
+ ret = NULL;
+ goto label_return;
+ }
+
+ ret = super_stats_arenas_i_node;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
+CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
+CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
diff --git a/contrib/jemalloc/src/extent.c b/contrib/jemalloc/src/extent.c
new file mode 100644
index 0000000..8c09b48
--- /dev/null
+++ b/contrib/jemalloc/src/extent.c
@@ -0,0 +1,39 @@
+#define JEMALLOC_EXTENT_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+
+static inline int
+extent_szad_comp(extent_node_t *a, extent_node_t *b)
+{
+ int ret;
+ size_t a_size = a->size;
+ size_t b_size = b->size;
+
+ ret = (a_size > b_size) - (a_size < b_size);
+ if (ret == 0) {
+ uintptr_t a_addr = (uintptr_t)a->addr;
+ uintptr_t b_addr = (uintptr_t)b->addr;
+
+ ret = (a_addr > b_addr) - (a_addr < b_addr);
+ }
+
+ return (ret);
+}
+
+/* Generate red-black tree functions. */
+rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
+ extent_szad_comp)
+
+static inline int
+extent_ad_comp(extent_node_t *a, extent_node_t *b)
+{
+ uintptr_t a_addr = (uintptr_t)a->addr;
+ uintptr_t b_addr = (uintptr_t)b->addr;
+
+ return ((a_addr > b_addr) - (a_addr < b_addr));
+}
+
+/* Generate red-black tree functions. */
+rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
+ extent_ad_comp)
diff --git a/contrib/jemalloc/src/hash.c b/contrib/jemalloc/src/hash.c
new file mode 100644
index 0000000..cfa4da0
--- /dev/null
+++ b/contrib/jemalloc/src/hash.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_HASH_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/contrib/jemalloc/src/huge.c b/contrib/jemalloc/src/huge.c
new file mode 100644
index 0000000..daf0c62
--- /dev/null
+++ b/contrib/jemalloc/src/huge.c
@@ -0,0 +1,306 @@
+#define JEMALLOC_HUGE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+uint64_t huge_nmalloc;
+uint64_t huge_ndalloc;
+size_t huge_allocated;
+
+malloc_mutex_t huge_mtx;
+
+/******************************************************************************/
+
+/* Tree of chunks that are stand-alone huge allocations. */
+static extent_tree_t huge;
+
+void *
+huge_malloc(size_t size, bool zero)
+{
+
+ return (huge_palloc(size, chunksize, zero));
+}
+
+void *
+huge_palloc(size_t size, size_t alignment, bool zero)
+{
+ void *ret;
+ size_t csize;
+ extent_node_t *node;
+
+ /* Allocate one or more contiguous chunks for this request. */
+
+ csize = CHUNK_CEILING(size);
+ if (csize == 0) {
+ /* size is large enough to cause size_t wrap-around. */
+ return (NULL);
+ }
+
+ /* Allocate an extent node with which to track the chunk. */
+ node = base_node_alloc();
+ if (node == NULL)
+ return (NULL);
+
+ ret = chunk_alloc(csize, alignment, false, &zero);
+ if (ret == NULL) {
+ base_node_dealloc(node);
+ return (NULL);
+ }
+
+ /* Insert node into huge. */
+ node->addr = ret;
+ node->size = csize;
+
+ malloc_mutex_lock(&huge_mtx);
+ extent_tree_ad_insert(&huge, node);
+ if (config_stats) {
+ stats_cactive_add(csize);
+ huge_nmalloc++;
+ huge_allocated += csize;
+ }
+ malloc_mutex_unlock(&huge_mtx);
+
+ if (config_fill && zero == false) {
+ if (opt_junk)
+ memset(ret, 0xa5, csize);
+ else if (opt_zero)
+ memset(ret, 0, csize);
+ }
+
+ return (ret);
+}
+
+void *
+huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
+{
+
+ /*
+ * Avoid moving the allocation if the size class can be left the same.
+ */
+ if (oldsize > arena_maxclass
+ && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
+ && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
+ assert(CHUNK_CEILING(oldsize) == oldsize);
+ if (config_fill && opt_junk && size < oldsize) {
+ memset((void *)((uintptr_t)ptr + size), 0x5a,
+ oldsize - size);
+ }
+ return (ptr);
+ }
+
+ /* Reallocation would require a move. */
+ return (NULL);
+}
+
+void *
+huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero)
+{
+ void *ret;
+ size_t copysize;
+
+ /* Try to avoid moving the allocation. */
+ ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
+ if (ret != NULL)
+ return (ret);
+
+ /*
+ * size and oldsize are different enough that we need to use a
+ * different size class. In that case, fall back to allocating new
+ * space and copying.
+ */
+ if (alignment > chunksize)
+ ret = huge_palloc(size + extra, alignment, zero);
+ else
+ ret = huge_malloc(size + extra, zero);
+
+ if (ret == NULL) {
+ if (extra == 0)
+ return (NULL);
+ /* Try again, this time without extra. */
+ if (alignment > chunksize)
+ ret = huge_palloc(size, alignment, zero);
+ else
+ ret = huge_malloc(size, zero);
+
+ if (ret == NULL)
+ return (NULL);
+ }
+
+ /*
+ * Copy at most size bytes (not size+extra), since the caller has no
+ * expectation that the extra bytes will be reliably preserved.
+ */
+ copysize = (size < oldsize) ? size : oldsize;
+
+ /*
+ * Use mremap(2) if this is a huge-->huge reallocation, and neither the
+ * source nor the destination are in dss.
+ */
+#ifdef JEMALLOC_MREMAP_FIXED
+ if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
+ == false && chunk_in_dss(ret) == false))) {
+ size_t newsize = huge_salloc(ret);
+
+ /*
+ * Remove ptr from the tree of huge allocations before
+ * performing the remap operation, in order to avoid the
+ * possibility of another thread acquiring that mapping before
+ * this one removes it from the tree.
+ */
+ huge_dalloc(ptr, false);
+ if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
+ ret) == MAP_FAILED) {
+ /*
+ * Assuming no chunk management bugs in the allocator,
+ * the only documented way an error can occur here is
+ * if the application changed the map type for a
+ * portion of the old allocation. This is firmly in
+ * undefined behavior territory, so write a diagnostic
+ * message, and optionally abort.
+ */
+ char buf[BUFERROR_BUF];
+
+ buferror(errno, buf, sizeof(buf));
+ malloc_printf("<jemalloc>: Error in mremap(): %s\n",
+ buf);
+ if (opt_abort)
+ abort();
+ memcpy(ret, ptr, copysize);
+ chunk_dealloc_mmap(ptr, oldsize);
+ }
+ } else
+#endif
+ {
+ memcpy(ret, ptr, copysize);
+ iqalloc(ptr);
+ }
+ return (ret);
+}
+
+void
+huge_dalloc(void *ptr, bool unmap)
+{
+ extent_node_t *node, key;
+
+ malloc_mutex_lock(&huge_mtx);
+
+ /* Extract from tree of huge allocations. */
+ key.addr = ptr;
+ node = extent_tree_ad_search(&huge, &key);
+ assert(node != NULL);
+ assert(node->addr == ptr);
+ extent_tree_ad_remove(&huge, node);
+
+ if (config_stats) {
+ stats_cactive_sub(node->size);
+ huge_ndalloc++;
+ huge_allocated -= node->size;
+ }
+
+ malloc_mutex_unlock(&huge_mtx);
+
+ if (unmap && config_fill && config_dss && opt_junk)
+ memset(node->addr, 0x5a, node->size);
+
+ chunk_dealloc(node->addr, node->size, unmap);
+
+ base_node_dealloc(node);
+}
+
+size_t
+huge_salloc(const void *ptr)
+{
+ size_t ret;
+ extent_node_t *node, key;
+
+ malloc_mutex_lock(&huge_mtx);
+
+ /* Extract from tree of huge allocations. */
+ key.addr = __DECONST(void *, ptr);
+ node = extent_tree_ad_search(&huge, &key);
+ assert(node != NULL);
+
+ ret = node->size;
+
+ malloc_mutex_unlock(&huge_mtx);
+
+ return (ret);
+}
+
+prof_ctx_t *
+huge_prof_ctx_get(const void *ptr)
+{
+ prof_ctx_t *ret;
+ extent_node_t *node, key;
+
+ malloc_mutex_lock(&huge_mtx);
+
+ /* Extract from tree of huge allocations. */
+ key.addr = __DECONST(void *, ptr);
+ node = extent_tree_ad_search(&huge, &key);
+ assert(node != NULL);
+
+ ret = node->prof_ctx;
+
+ malloc_mutex_unlock(&huge_mtx);
+
+ return (ret);
+}
+
+void
+huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
+{
+ extent_node_t *node, key;
+
+ malloc_mutex_lock(&huge_mtx);
+
+ /* Extract from tree of huge allocations. */
+ key.addr = __DECONST(void *, ptr);
+ node = extent_tree_ad_search(&huge, &key);
+ assert(node != NULL);
+
+ node->prof_ctx = ctx;
+
+ malloc_mutex_unlock(&huge_mtx);
+}
+
+bool
+huge_boot(void)
+{
+
+ /* Initialize chunks data. */
+ if (malloc_mutex_init(&huge_mtx))
+ return (true);
+ extent_tree_ad_new(&huge);
+
+ if (config_stats) {
+ huge_nmalloc = 0;
+ huge_ndalloc = 0;
+ huge_allocated = 0;
+ }
+
+ return (false);
+}
+
+void
+huge_prefork(void)
+{
+
+ malloc_mutex_prefork(&huge_mtx);
+}
+
+void
+huge_postfork_parent(void)
+{
+
+ malloc_mutex_postfork_parent(&huge_mtx);
+}
+
+void
+huge_postfork_child(void)
+{
+
+ malloc_mutex_postfork_child(&huge_mtx);
+}
diff --git a/contrib/jemalloc/src/jemalloc.c b/contrib/jemalloc/src/jemalloc.c
new file mode 100644
index 0000000..73fad29
--- /dev/null
+++ b/contrib/jemalloc/src/jemalloc.c
@@ -0,0 +1,1733 @@
+#define JEMALLOC_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+malloc_tsd_data(, arenas, arena_t *, NULL)
+malloc_tsd_data(, thread_allocated, thread_allocated_t,
+ THREAD_ALLOCATED_INITIALIZER)
+
+const char *__malloc_options_1_0;
+__sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
+
+/* Runtime configuration options. */
+const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
+#ifdef JEMALLOC_DEBUG
+bool opt_abort = true;
+# ifdef JEMALLOC_FILL
+bool opt_junk = true;
+# else
+bool opt_junk = false;
+# endif
+#else
+bool opt_abort = false;
+bool opt_junk = false;
+#endif
+size_t opt_quarantine = ZU(0);
+bool opt_redzone = false;
+bool opt_utrace = false;
+bool opt_valgrind = false;
+bool opt_xmalloc = false;
+bool opt_zero = false;
+size_t opt_narenas = 0;
+
+unsigned ncpus;
+
+malloc_mutex_t arenas_lock;
+arena_t **arenas;
+unsigned narenas;
+
+/* Set to true once the allocator has been initialized. */
+static bool malloc_initialized = false;
+
+#ifdef JEMALLOC_THREADED_INIT
+/* Used to let the initializing thread recursively allocate. */
+# define NO_INITIALIZER ((unsigned long)0)
+# define INITIALIZER pthread_self()
+# define IS_INITIALIZER (malloc_initializer == pthread_self())
+static pthread_t malloc_initializer = NO_INITIALIZER;
+#else
+# define NO_INITIALIZER false
+# define INITIALIZER true
+# define IS_INITIALIZER malloc_initializer
+static bool malloc_initializer = NO_INITIALIZER;
+#endif
+
+/* Used to avoid initialization races. */
+static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
+
+typedef struct {
+ void *p; /* Input pointer (as in realloc(p, s)). */
+ size_t s; /* Request size. */
+ void *r; /* Result pointer. */
+} malloc_utrace_t;
+
+#ifdef JEMALLOC_UTRACE
+# define UTRACE(a, b, c) do { \
+ if (opt_utrace) { \
+ malloc_utrace_t ut; \
+ ut.p = (a); \
+ ut.s = (b); \
+ ut.r = (c); \
+ utrace(&ut, sizeof(ut)); \
+ } \
+} while (0)
+#else
+# define UTRACE(a, b, c)
+#endif
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void stats_print_atexit(void);
+static unsigned malloc_ncpus(void);
+static bool malloc_conf_next(char const **opts_p, char const **k_p,
+ size_t *klen_p, char const **v_p, size_t *vlen_p);
+static void malloc_conf_error(const char *msg, const char *k, size_t klen,
+ const char *v, size_t vlen);
+static void malloc_conf_init(void);
+static bool malloc_init_hard(void);
+static int imemalign(void **memptr, size_t alignment, size_t size,
+ size_t min_alignment);
+
+/******************************************************************************/
+/*
+ * Begin miscellaneous support functions.
+ */
+
+/* Create a new arena and insert it into the arenas array at index ind. */
+arena_t *
+arenas_extend(unsigned ind)
+{
+ arena_t *ret;
+
+ ret = (arena_t *)base_alloc(sizeof(arena_t));
+ if (ret != NULL && arena_new(ret, ind) == false) {
+ arenas[ind] = ret;
+ return (ret);
+ }
+ /* Only reached if there is an OOM error. */
+
+ /*
+ * OOM here is quite inconvenient to propagate, since dealing with it
+ * would require a check for failure in the fast path. Instead, punt
+ * by using arenas[0]. In practice, this is an extremely unlikely
+ * failure.
+ */
+ malloc_write("<jemalloc>: Error initializing arena\n");
+ if (opt_abort)
+ abort();
+
+ return (arenas[0]);
+}
+
+/* Slow path, called only by choose_arena(). */
+arena_t *
+choose_arena_hard(void)
+{
+ arena_t *ret;
+
+ if (narenas > 1) {
+ unsigned i, choose, first_null;
+
+ choose = 0;
+ first_null = narenas;
+ malloc_mutex_lock(&arenas_lock);
+ assert(arenas[0] != NULL);
+ for (i = 1; i < narenas; i++) {
+ if (arenas[i] != NULL) {
+ /*
+ * Choose the first arena that has the lowest
+ * number of threads assigned to it.
+ */
+ if (arenas[i]->nthreads <
+ arenas[choose]->nthreads)
+ choose = i;
+ } else if (first_null == narenas) {
+ /*
+ * Record the index of the first uninitialized
+ * arena, in case all extant arenas are in use.
+ *
+ * NB: It is possible for there to be
+ * discontinuities in terms of initialized
+ * versus uninitialized arenas, due to the
+ * "thread.arena" mallctl.
+ */
+ first_null = i;
+ }
+ }
+
+ if (arenas[choose]->nthreads == 0 || first_null == narenas) {
+ /*
+ * Use an unloaded arena, or the least loaded arena if
+ * all arenas are already initialized.
+ */
+ ret = arenas[choose];
+ } else {
+ /* Initialize a new arena. */
+ ret = arenas_extend(first_null);
+ }
+ ret->nthreads++;
+ malloc_mutex_unlock(&arenas_lock);
+ } else {
+ ret = arenas[0];
+ malloc_mutex_lock(&arenas_lock);
+ ret->nthreads++;
+ malloc_mutex_unlock(&arenas_lock);
+ }
+
+ arenas_tsd_set(&ret);
+
+ return (ret);
+}
+
+static void
+stats_print_atexit(void)
+{
+
+ if (config_tcache && config_stats) {
+ unsigned i;
+
+ /*
+ * Merge stats from extant threads. This is racy, since
+ * individual threads do not lock when recording tcache stats
+ * events. As a consequence, the final stats may be slightly
+ * out of date by the time they are reported, if other threads
+ * continue to allocate.
+ */
+ for (i = 0; i < narenas; i++) {
+ arena_t *arena = arenas[i];
+ if (arena != NULL) {
+ tcache_t *tcache;
+
+ /*
+ * tcache_stats_merge() locks bins, so if any
+ * code is introduced that acquires both arena
+ * and bin locks in the opposite order,
+ * deadlocks may result.
+ */
+ malloc_mutex_lock(&arena->lock);
+ ql_foreach(tcache, &arena->tcache_ql, link) {
+ tcache_stats_merge(tcache, arena);
+ }
+ malloc_mutex_unlock(&arena->lock);
+ }
+ }
+ }
+ je_malloc_stats_print(NULL, NULL, NULL);
+}
+
+/*
+ * End miscellaneous support functions.
+ */
+/******************************************************************************/
+/*
+ * Begin initialization functions.
+ */
+
+static unsigned
+malloc_ncpus(void)
+{
+ unsigned ret;
+ long result;
+
+ result = sysconf(_SC_NPROCESSORS_ONLN);
+ if (result == -1) {
+ /* Error. */
+ ret = 1;
+ }
+ ret = (unsigned)result;
+
+ return (ret);
+}
+
+void
+arenas_cleanup(void *arg)
+{
+ arena_t *arena = *(arena_t **)arg;
+
+ malloc_mutex_lock(&arenas_lock);
+ arena->nthreads--;
+ malloc_mutex_unlock(&arenas_lock);
+}
+
+static inline bool
+malloc_init(void)
+{
+
+ if (malloc_initialized == false)
+ return (malloc_init_hard());
+
+ return (false);
+}
+
+static bool
+malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
+ char const **v_p, size_t *vlen_p)
+{
+ bool accept;
+ const char *opts = *opts_p;
+
+ *k_p = opts;
+
+ for (accept = false; accept == false;) {
+ switch (*opts) {
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
+ case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
+ case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
+ case 'Y': case 'Z':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
+ case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
+ case 's': case 't': case 'u': case 'v': case 'w': case 'x':
+ case 'y': case 'z':
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ case '_':
+ opts++;
+ break;
+ case ':':
+ opts++;
+ *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
+ *v_p = opts;
+ accept = true;
+ break;
+ case '\0':
+ if (opts != *opts_p) {
+ malloc_write("<jemalloc>: Conf string ends "
+ "with key\n");
+ }
+ return (true);
+ default:
+ malloc_write("<jemalloc>: Malformed conf string\n");
+ return (true);
+ }
+ }
+
+ for (accept = false; accept == false;) {
+ switch (*opts) {
+ case ',':
+ opts++;
+ /*
+ * Look ahead one character here, because the next time
+ * this function is called, it will assume that end of
+ * input has been cleanly reached if no input remains,
+ * but we have optimistically already consumed the
+ * comma if one exists.
+ */
+ if (*opts == '\0') {
+ malloc_write("<jemalloc>: Conf string ends "
+ "with comma\n");
+ }
+ *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
+ accept = true;
+ break;
+ case '\0':
+ *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
+ accept = true;
+ break;
+ default:
+ opts++;
+ break;
+ }
+ }
+
+ *opts_p = opts;
+ return (false);
+}
+
+static void
+malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
+ size_t vlen)
+{
+
+ malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
+ (int)vlen, v);
+}
+
+static void
+malloc_conf_init(void)
+{
+ unsigned i;
+ char buf[PATH_MAX + 1];
+ const char *opts, *k, *v;
+ size_t klen, vlen;
+
+ for (i = 0; i < 3; i++) {
+ /* Get runtime configuration. */
+ switch (i) {
+ case 0:
+ if (je_malloc_conf != NULL) {
+ /*
+ * Use options that were compiled into the
+ * program.
+ */
+ opts = je_malloc_conf;
+ } else {
+ /* No configuration specified. */
+ buf[0] = '\0';
+ opts = buf;
+ }
+ break;
+ case 1: {
+ int linklen;
+ const char *linkname =
+#ifdef JEMALLOC_PREFIX
+ "/etc/"JEMALLOC_PREFIX"malloc.conf"
+#else
+ "/etc/malloc.conf"
+#endif
+ ;
+
+ if ((linklen = readlink(linkname, buf,
+ sizeof(buf) - 1)) != -1) {
+ /*
+ * Use the contents of the "/etc/malloc.conf"
+ * symbolic link's name.
+ */
+ buf[linklen] = '\0';
+ opts = buf;
+ } else {
+ /* No configuration specified. */
+ buf[0] = '\0';
+ opts = buf;
+ }
+ break;
+ } case 2: {
+ const char *envname =
+#ifdef JEMALLOC_PREFIX
+ JEMALLOC_CPREFIX"MALLOC_CONF"
+#else
+ "MALLOC_CONF"
+#endif
+ ;
+
+ if (issetugid() == 0 && (opts = getenv(envname)) !=
+ NULL) {
+ /*
+ * Do nothing; opts is already initialized to
+ * the value of the MALLOC_CONF environment
+ * variable.
+ */
+ } else {
+ /* No configuration specified. */
+ buf[0] = '\0';
+ opts = buf;
+ }
+ break;
+ } default:
+ /* NOTREACHED */
+ assert(false);
+ buf[0] = '\0';
+ opts = buf;
+ }
+
+ while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
+ &vlen) == false) {
+#define CONF_HANDLE_BOOL_HIT(o, n, hit) \
+ if (sizeof(#n)-1 == klen && strncmp(#n, k, \
+ klen) == 0) { \
+ if (strncmp("true", v, vlen) == 0 && \
+ vlen == sizeof("true")-1) \
+ o = true; \
+ else if (strncmp("false", v, vlen) == \
+ 0 && vlen == sizeof("false")-1) \
+ o = false; \
+ else { \
+ malloc_conf_error( \
+ "Invalid conf value", \
+ k, klen, v, vlen); \
+ } \
+ hit = true; \
+ } else \
+ hit = false;
+#define CONF_HANDLE_BOOL(o, n) { \
+ bool hit; \
+ CONF_HANDLE_BOOL_HIT(o, n, hit); \
+ if (hit) \
+ continue; \
+}
+#define CONF_HANDLE_SIZE_T(o, n, min, max) \
+ if (sizeof(#n)-1 == klen && strncmp(#n, k, \
+ klen) == 0) { \
+ uintmax_t um; \
+ char *end; \
+ \
+ errno = 0; \
+ um = malloc_strtoumax(v, &end, 0); \
+ if (errno != 0 || (uintptr_t)end - \
+ (uintptr_t)v != vlen) { \
+ malloc_conf_error( \
+ "Invalid conf value", \
+ k, klen, v, vlen); \
+ } else if (um < min || um > max) { \
+ malloc_conf_error( \
+ "Out-of-range conf value", \
+ k, klen, v, vlen); \
+ } else \
+ o = um; \
+ continue; \
+ }
+#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
+ if (sizeof(#n)-1 == klen && strncmp(#n, k, \
+ klen) == 0) { \
+ long l; \
+ char *end; \
+ \
+ errno = 0; \
+ l = strtol(v, &end, 0); \
+ if (errno != 0 || (uintptr_t)end - \
+ (uintptr_t)v != vlen) { \
+ malloc_conf_error( \
+ "Invalid conf value", \
+ k, klen, v, vlen); \
+ } else if (l < (ssize_t)min || l > \
+ (ssize_t)max) { \
+ malloc_conf_error( \
+ "Out-of-range conf value", \
+ k, klen, v, vlen); \
+ } else \
+ o = l; \
+ continue; \
+ }
+#define CONF_HANDLE_CHAR_P(o, n, d) \
+ if (sizeof(#n)-1 == klen && strncmp(#n, k, \
+ klen) == 0) { \
+ size_t cpylen = (vlen <= \
+ sizeof(o)-1) ? vlen : \
+ sizeof(o)-1; \
+ strncpy(o, v, cpylen); \
+ o[cpylen] = '\0'; \
+ continue; \
+ }
+
+ CONF_HANDLE_BOOL(opt_abort, abort)
+ /*
+ * Chunks always require at least one header page, plus
+ * one data page in the absence of redzones, or three
+ * pages in the presence of redzones. In order to
+ * simplify options processing, fix the limit based on
+ * config_fill.
+ */
+ CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, LG_PAGE +
+ (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
+ CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
+ -1, (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_BOOL(opt_stats_print, stats_print)
+ if (config_fill) {
+ CONF_HANDLE_BOOL(opt_junk, junk)
+ CONF_HANDLE_SIZE_T(opt_quarantine, quarantine,
+ 0, SIZE_T_MAX)
+ CONF_HANDLE_BOOL(opt_redzone, redzone)
+ CONF_HANDLE_BOOL(opt_zero, zero)
+ }
+ if (config_utrace) {
+ CONF_HANDLE_BOOL(opt_utrace, utrace)
+ }
+ if (config_valgrind) {
+ bool hit;
+ CONF_HANDLE_BOOL_HIT(opt_valgrind,
+ valgrind, hit)
+ if (config_fill && opt_valgrind && hit) {
+ opt_junk = false;
+ opt_zero = false;
+ if (opt_quarantine == 0) {
+ opt_quarantine =
+ JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
+ }
+ opt_redzone = true;
+ }
+ if (hit)
+ continue;
+ }
+ if (config_xmalloc) {
+ CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
+ }
+ if (config_tcache) {
+ CONF_HANDLE_BOOL(opt_tcache, tcache)
+ CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
+ lg_tcache_max, -1,
+ (sizeof(size_t) << 3) - 1)
+ }
+ if (config_prof) {
+ CONF_HANDLE_BOOL(opt_prof, prof)
+ CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
+ "jeprof")
+ CONF_HANDLE_BOOL(opt_prof_active, prof_active)
+ CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
+ lg_prof_sample, 0,
+ (sizeof(uint64_t) << 3) - 1)
+ CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
+ CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
+ lg_prof_interval, -1,
+ (sizeof(uint64_t) << 3) - 1)
+ CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
+ CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
+ }
+ malloc_conf_error("Invalid conf pair", k, klen, v,
+ vlen);
+#undef CONF_HANDLE_BOOL
+#undef CONF_HANDLE_SIZE_T
+#undef CONF_HANDLE_SSIZE_T
+#undef CONF_HANDLE_CHAR_P
+ }
+ }
+}
+
+static bool
+malloc_init_hard(void)
+{
+ arena_t *init_arenas[1];
+
+ malloc_mutex_lock(&init_lock);
+ if (malloc_initialized || IS_INITIALIZER) {
+ /*
+ * Another thread initialized the allocator before this one
+ * acquired init_lock, or this thread is the initializing
+ * thread, and it is recursively allocating.
+ */
+ malloc_mutex_unlock(&init_lock);
+ return (false);
+ }
+#ifdef JEMALLOC_THREADED_INIT
+ if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
+ /* Busy-wait until the initializing thread completes. */
+ do {
+ malloc_mutex_unlock(&init_lock);
+ CPU_SPINWAIT;
+ malloc_mutex_lock(&init_lock);
+ } while (malloc_initialized == false);
+ malloc_mutex_unlock(&init_lock);
+ return (false);
+ }
+#endif
+ malloc_initializer = INITIALIZER;
+
+ malloc_tsd_boot();
+ if (config_prof)
+ prof_boot0();
+
+ malloc_conf_init();
+
+#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
+ /* Register fork handlers. */
+ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
+ jemalloc_postfork_child) != 0) {
+ malloc_write("<jemalloc>: Error in pthread_atfork()\n");
+ if (opt_abort)
+ abort();
+ }
+#endif
+
+ if (opt_stats_print) {
+ /* Print statistics at exit. */
+ if (atexit(stats_print_atexit) != 0) {
+ malloc_write("<jemalloc>: Error in atexit()\n");
+ if (opt_abort)
+ abort();
+ }
+ }
+
+ if (base_boot()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ if (chunk_boot0()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ if (ctl_boot()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ if (config_prof)
+ prof_boot1();
+
+ arena_boot();
+
+ if (config_tcache && tcache_boot0()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ if (huge_boot()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ if (malloc_mutex_init(&arenas_lock))
+ return (true);
+
+ /*
+ * Create enough scaffolding to allow recursive allocation in
+ * malloc_ncpus().
+ */
+ narenas = 1;
+ arenas = init_arenas;
+ memset(arenas, 0, sizeof(arena_t *) * narenas);
+
+ /*
+ * Initialize one arena here. The rest are lazily created in
+ * choose_arena_hard().
+ */
+ arenas_extend(0);
+ if (arenas[0] == NULL) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ /* Initialize allocation counters before any allocations can occur. */
+ if (config_stats && thread_allocated_tsd_boot()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ if (arenas_tsd_boot()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ if (config_tcache && tcache_boot1()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ if (config_fill && quarantine_boot()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ if (config_prof && prof_boot2()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ /* Get number of CPUs. */
+ malloc_mutex_unlock(&init_lock);
+ ncpus = malloc_ncpus();
+ malloc_mutex_lock(&init_lock);
+
+ if (chunk_boot1()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ if (mutex_boot()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ if (opt_narenas == 0) {
+ /*
+ * For SMP systems, create more than one arena per CPU by
+ * default.
+ */
+ if (ncpus > 1)
+ opt_narenas = ncpus << 2;
+ else
+ opt_narenas = 1;
+ }
+ narenas = opt_narenas;
+ /*
+ * Make sure that the arenas array can be allocated. In practice, this
+ * limit is enough to allow the allocator to function, but the ctl
+ * machinery will fail to allocate memory at far lower limits.
+ */
+ if (narenas > chunksize / sizeof(arena_t *)) {
+ narenas = chunksize / sizeof(arena_t *);
+ malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
+ narenas);
+ }
+
+ /* Allocate and initialize arenas. */
+ arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
+ if (arenas == NULL) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+ /*
+ * Zero the array. In practice, this should always be pre-zeroed,
+ * since it was just mmap()ed, but let's be sure.
+ */
+ memset(arenas, 0, sizeof(arena_t *) * narenas);
+ /* Copy the pointer to the one arena that was already initialized. */
+ arenas[0] = init_arenas[0];
+
+ malloc_initialized = true;
+ malloc_mutex_unlock(&init_lock);
+ return (false);
+}
+
+/*
+ * End initialization functions.
+ */
+/******************************************************************************/
+/*
+ * Begin malloc(3)-compatible functions.
+ */
+
+JEMALLOC_ATTR(malloc)
+JEMALLOC_ATTR(visibility("default"))
+void *
+je_malloc(size_t size)
+{
+ void *ret;
+ size_t usize;
+ prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
+
+ if (malloc_init()) {
+ ret = NULL;
+ goto label_oom;
+ }
+
+ if (size == 0)
+ size = 1;
+
+ if (config_prof && opt_prof) {
+ usize = s2u(size);
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL) {
+ ret = NULL;
+ goto label_oom;
+ }
+ if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
+ SMALL_MAXCLASS) {
+ ret = imalloc(SMALL_MAXCLASS+1);
+ if (ret != NULL)
+ arena_prof_promoted(ret, usize);
+ } else
+ ret = imalloc(size);
+ } else {
+ if (config_stats || (config_valgrind && opt_valgrind))
+ usize = s2u(size);
+ ret = imalloc(size);
+ }
+
+label_oom:
+ if (ret == NULL) {
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error in malloc(): "
+ "out of memory\n");
+ abort();
+ }
+ errno = ENOMEM;
+ }
+ if (config_prof && opt_prof && ret != NULL)
+ prof_malloc(ret, usize, cnt);
+ if (config_stats && ret != NULL) {
+ assert(usize == isalloc(ret, config_prof));
+ thread_allocated_tsd_get()->allocated += usize;
+ }
+ UTRACE(0, size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
+ return (ret);
+}
+
+JEMALLOC_ATTR(nonnull(1))
+#ifdef JEMALLOC_PROF
+/*
+ * Avoid any uncertainty as to how many backtrace frames to ignore in
+ * PROF_ALLOC_PREP().
+ */
+JEMALLOC_ATTR(noinline)
+#endif
+static int
+imemalign(void **memptr, size_t alignment, size_t size,
+ size_t min_alignment)
+{
+ int ret;
+ size_t usize;
+ void *result;
+ prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
+
+ assert(min_alignment != 0);
+
+ if (malloc_init())
+ result = NULL;
+ else {
+ if (size == 0)
+ size = 1;
+
+ /* Make sure that alignment is a large enough power of 2. */
+ if (((alignment - 1) & alignment) != 0
+ || (alignment < min_alignment)) {
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error allocating "
+ "aligned memory: invalid alignment\n");
+ abort();
+ }
+ result = NULL;
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ usize = sa2u(size, alignment);
+ if (usize == 0) {
+ result = NULL;
+ ret = ENOMEM;
+ goto label_return;
+ }
+
+ if (config_prof && opt_prof) {
+ PROF_ALLOC_PREP(2, usize, cnt);
+ if (cnt == NULL) {
+ result = NULL;
+ ret = EINVAL;
+ } else {
+ if (prof_promote && (uintptr_t)cnt !=
+ (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
+ assert(sa2u(SMALL_MAXCLASS+1,
+ alignment) != 0);
+ result = ipalloc(sa2u(SMALL_MAXCLASS+1,
+ alignment), alignment, false);
+ if (result != NULL) {
+ arena_prof_promoted(result,
+ usize);
+ }
+ } else {
+ result = ipalloc(usize, alignment,
+ false);
+ }
+ }
+ } else
+ result = ipalloc(usize, alignment, false);
+ }
+
+ if (result == NULL) {
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error allocating aligned "
+ "memory: out of memory\n");
+ abort();
+ }
+ ret = ENOMEM;
+ goto label_return;
+ }
+
+ *memptr = result;
+ ret = 0;
+
+label_return:
+ if (config_stats && result != NULL) {
+ assert(usize == isalloc(result, config_prof));
+ thread_allocated_tsd_get()->allocated += usize;
+ }
+ if (config_prof && opt_prof && result != NULL)
+ prof_malloc(result, usize, cnt);
+ UTRACE(0, size, result);
+ return (ret);
+}
+
+JEMALLOC_ATTR(nonnull(1))
+JEMALLOC_ATTR(visibility("default"))
+int
+je_posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+ int ret = imemalign(memptr, alignment, size, sizeof(void *));
+ JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
+ config_prof), false);
+ return (ret);
+}
+
+JEMALLOC_ATTR(malloc)
+JEMALLOC_ATTR(visibility("default"))
+void *
+je_aligned_alloc(size_t alignment, size_t size)
+{
+ void *ret;
+ int err;
+
+ if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
+ ret = NULL;
+ errno = err;
+ }
+ JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
+ false);
+ return (ret);
+}
+
+JEMALLOC_ATTR(malloc)
+JEMALLOC_ATTR(visibility("default"))
+void *
+je_calloc(size_t num, size_t size)
+{
+ void *ret;
+ size_t num_size;
+ size_t usize;
+ prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
+
+ if (malloc_init()) {
+ num_size = 0;
+ ret = NULL;
+ goto label_return;
+ }
+
+ num_size = num * size;
+ if (num_size == 0) {
+ if (num == 0 || size == 0)
+ num_size = 1;
+ else {
+ ret = NULL;
+ goto label_return;
+ }
+ /*
+ * Try to avoid division here. We know that it isn't possible to
+ * overflow during multiplication if neither operand uses any of the
+ * most significant half of the bits in a size_t.
+ */
+ } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
+ && (num_size / size != num)) {
+ /* size_t overflow. */
+ ret = NULL;
+ goto label_return;
+ }
+
+ if (config_prof && opt_prof) {
+ usize = s2u(num_size);
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL) {
+ ret = NULL;
+ goto label_return;
+ }
+ if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
+ <= SMALL_MAXCLASS) {
+ ret = icalloc(SMALL_MAXCLASS+1);
+ if (ret != NULL)
+ arena_prof_promoted(ret, usize);
+ } else
+ ret = icalloc(num_size);
+ } else {
+ if (config_stats || (config_valgrind && opt_valgrind))
+ usize = s2u(num_size);
+ ret = icalloc(num_size);
+ }
+
+label_return:
+ if (ret == NULL) {
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error in calloc(): out of "
+ "memory\n");
+ abort();
+ }
+ errno = ENOMEM;
+ }
+
+ if (config_prof && opt_prof && ret != NULL)
+ prof_malloc(ret, usize, cnt);
+ if (config_stats && ret != NULL) {
+ assert(usize == isalloc(ret, config_prof));
+ thread_allocated_tsd_get()->allocated += usize;
+ }
+ UTRACE(0, num_size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
+ return (ret);
+}
+
+JEMALLOC_ATTR(visibility("default"))
+void *
+je_realloc(void *ptr, size_t size)
+{
+ void *ret;
+ size_t usize;
+ size_t old_size = 0;
+ size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
+ prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
+
+ if (size == 0) {
+ if (ptr != NULL) {
+ /* realloc(ptr, 0) is equivalent to free(p). */
+ if (config_prof) {
+ old_size = isalloc(ptr, true);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = p2rz(ptr);
+ } else if (config_stats) {
+ old_size = isalloc(ptr, false);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_size);
+ } else if (config_valgrind && opt_valgrind) {
+ old_size = isalloc(ptr, false);
+ old_rzsize = u2rz(old_size);
+ }
+ if (config_prof && opt_prof) {
+ old_ctx = prof_ctx_get(ptr);
+ cnt = NULL;
+ }
+ iqalloc(ptr);
+ ret = NULL;
+ goto label_return;
+ } else
+ size = 1;
+ }
+
+ if (ptr != NULL) {
+ assert(malloc_initialized || IS_INITIALIZER);
+
+ if (config_prof) {
+ old_size = isalloc(ptr, true);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = p2rz(ptr);
+ } else if (config_stats) {
+ old_size = isalloc(ptr, false);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_size);
+ } else if (config_valgrind && opt_valgrind) {
+ old_size = isalloc(ptr, false);
+ old_rzsize = u2rz(old_size);
+ }
+ if (config_prof && opt_prof) {
+ usize = s2u(size);
+ old_ctx = prof_ctx_get(ptr);
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL) {
+ old_ctx = NULL;
+ ret = NULL;
+ goto label_oom;
+ }
+ if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
+ usize <= SMALL_MAXCLASS) {
+ ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
+ false, false);
+ if (ret != NULL)
+ arena_prof_promoted(ret, usize);
+ else
+ old_ctx = NULL;
+ } else {
+ ret = iralloc(ptr, size, 0, 0, false, false);
+ if (ret == NULL)
+ old_ctx = NULL;
+ }
+ } else {
+ if (config_stats || (config_valgrind && opt_valgrind))
+ usize = s2u(size);
+ ret = iralloc(ptr, size, 0, 0, false, false);
+ }
+
+label_oom:
+ if (ret == NULL) {
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error in realloc(): "
+ "out of memory\n");
+ abort();
+ }
+ errno = ENOMEM;
+ }
+ } else {
+ /* realloc(NULL, size) is equivalent to malloc(size). */
+ if (config_prof && opt_prof)
+ old_ctx = NULL;
+ if (malloc_init()) {
+ if (config_prof && opt_prof)
+ cnt = NULL;
+ ret = NULL;
+ } else {
+ if (config_prof && opt_prof) {
+ usize = s2u(size);
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL)
+ ret = NULL;
+ else {
+ if (prof_promote && (uintptr_t)cnt !=
+ (uintptr_t)1U && usize <=
+ SMALL_MAXCLASS) {
+ ret = imalloc(SMALL_MAXCLASS+1);
+ if (ret != NULL) {
+ arena_prof_promoted(ret,
+ usize);
+ }
+ } else
+ ret = imalloc(size);
+ }
+ } else {
+ if (config_stats || (config_valgrind &&
+ opt_valgrind))
+ usize = s2u(size);
+ ret = imalloc(size);
+ }
+ }
+
+ if (ret == NULL) {
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error in realloc(): "
+ "out of memory\n");
+ abort();
+ }
+ errno = ENOMEM;
+ }
+ }
+
+label_return:
+ if (config_prof && opt_prof)
+ prof_realloc(ret, usize, cnt, old_size, old_ctx);
+ if (config_stats && ret != NULL) {
+ thread_allocated_t *ta;
+ assert(usize == isalloc(ret, config_prof));
+ ta = thread_allocated_tsd_get();
+ ta->allocated += usize;
+ ta->deallocated += old_size;
+ }
+ UTRACE(ptr, size, ret);
+ JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
+ return (ret);
+}
+
+JEMALLOC_ATTR(visibility("default"))
+void
+je_free(void *ptr)
+{
+
+ UTRACE(ptr, 0, 0);
+ if (ptr != NULL) {
+ size_t usize;
+ size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+ assert(malloc_initialized || IS_INITIALIZER);
+
+ if (config_prof && opt_prof) {
+ usize = isalloc(ptr, config_prof);
+ prof_free(ptr, usize);
+ } else if (config_stats || config_valgrind)
+ usize = isalloc(ptr, config_prof);
+ if (config_stats)
+ thread_allocated_tsd_get()->deallocated += usize;
+ if (config_valgrind && opt_valgrind)
+ rzsize = p2rz(ptr);
+ iqalloc(ptr);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+ }
+}
+
+/*
+ * End malloc(3)-compatible functions.
+ */
+/******************************************************************************/
+/*
+ * Begin non-standard override functions.
+ */
+
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+JEMALLOC_ATTR(malloc)
+JEMALLOC_ATTR(visibility("default"))
+void *
+je_memalign(size_t alignment, size_t size)
+{
+ void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+ imemalign(&ret, alignment, size, 1);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
+ return (ret);
+}
+#endif
+
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+JEMALLOC_ATTR(malloc)
+JEMALLOC_ATTR(visibility("default"))
+void *
+je_valloc(size_t size)
+{
+ void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+ imemalign(&ret, PAGE, size, 1);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
+ return (ret);
+}
+#endif
+
+/*
+ * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
+ * #define je_malloc malloc
+ */
+#define malloc_is_malloc 1
+#define is_malloc_(a) malloc_is_ ## a
+#define is_malloc(a) is_malloc_(a)
+
+#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
+/*
+ * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
+ * to inconsistently reference libc's malloc(3)-compatible functions
+ * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
+ *
+ * These definitions interpose hooks in glibc. The functions are actually
+ * passed an extra argument for the caller return address, which will be
+ * ignored.
+ */
+JEMALLOC_ATTR(visibility("default"))
+void (* const __free_hook)(void *ptr) = je_free;
+
+JEMALLOC_ATTR(visibility("default"))
+void *(* const __malloc_hook)(size_t size) = je_malloc;
+
+JEMALLOC_ATTR(visibility("default"))
+void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
+
+JEMALLOC_ATTR(visibility("default"))
+void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
+#endif
+
+/*
+ * End non-standard override functions.
+ */
+/******************************************************************************/
+/*
+ * Begin non-standard functions.
+ */
+
+JEMALLOC_ATTR(visibility("default"))
+size_t
+je_malloc_usable_size(const void *ptr)
+{
+ size_t ret;
+
+ assert(malloc_initialized || IS_INITIALIZER);
+
+ if (config_ivsalloc)
+ ret = ivsalloc(ptr, config_prof);
+ else
+ ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
+
+ return (ret);
+}
+
+JEMALLOC_ATTR(visibility("default"))
+void
+je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts)
+{
+
+ stats_print(write_cb, cbopaque, opts);
+}
+
+JEMALLOC_ATTR(visibility("default"))
+int
+je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen)
+{
+
+ if (malloc_init())
+ return (EAGAIN);
+
+ return (ctl_byname(name, oldp, oldlenp, newp, newlen));
+}
+
+JEMALLOC_ATTR(visibility("default"))
+int
+je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
+{
+
+ if (malloc_init())
+ return (EAGAIN);
+
+ return (ctl_nametomib(name, mibp, miblenp));
+}
+
+JEMALLOC_ATTR(visibility("default"))
+int
+je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+
+ if (malloc_init())
+ return (EAGAIN);
+
+ return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
+}
+
+/*
+ * End non-standard functions.
+ */
+/******************************************************************************/
+/*
+ * Begin experimental functions.
+ */
+#ifdef JEMALLOC_EXPERIMENTAL
+
+JEMALLOC_INLINE void *
+iallocm(size_t usize, size_t alignment, bool zero)
+{
+
+ assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
+ alignment)));
+
+ if (alignment != 0)
+ return (ipalloc(usize, alignment, zero));
+ else if (zero)
+ return (icalloc(usize));
+ else
+ return (imalloc(usize));
+}
+
+JEMALLOC_ATTR(nonnull(1))
+JEMALLOC_ATTR(visibility("default"))
+int
+je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
+{
+ void *p;
+ size_t usize;
+ size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+ & (SIZE_T_MAX-1));
+ bool zero = flags & ALLOCM_ZERO;
+ prof_thr_cnt_t *cnt;
+
+ assert(ptr != NULL);
+ assert(size != 0);
+
+ if (malloc_init())
+ goto label_oom;
+
+ usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
+ if (usize == 0)
+ goto label_oom;
+
+ if (config_prof && opt_prof) {
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL)
+ goto label_oom;
+ if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
+ SMALL_MAXCLASS) {
+ size_t usize_promoted = (alignment == 0) ?
+ s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
+ alignment);
+ assert(usize_promoted != 0);
+ p = iallocm(usize_promoted, alignment, zero);
+ if (p == NULL)
+ goto label_oom;
+ arena_prof_promoted(p, usize);
+ } else {
+ p = iallocm(usize, alignment, zero);
+ if (p == NULL)
+ goto label_oom;
+ }
+ prof_malloc(p, usize, cnt);
+ } else {
+ p = iallocm(usize, alignment, zero);
+ if (p == NULL)
+ goto label_oom;
+ }
+ if (rsize != NULL)
+ *rsize = usize;
+
+ *ptr = p;
+ if (config_stats) {
+ assert(usize == isalloc(p, config_prof));
+ thread_allocated_tsd_get()->allocated += usize;
+ }
+ UTRACE(0, size, p);
+ JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
+ return (ALLOCM_SUCCESS);
+label_oom:
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error in allocm(): "
+ "out of memory\n");
+ abort();
+ }
+ *ptr = NULL;
+ UTRACE(0, size, 0);
+ return (ALLOCM_ERR_OOM);
+}
+
+JEMALLOC_ATTR(nonnull(1))
+JEMALLOC_ATTR(visibility("default"))
+int
+je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
+{
+ void *p, *q;
+ size_t usize;
+ size_t old_size;
+ size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+ & (SIZE_T_MAX-1));
+ bool zero = flags & ALLOCM_ZERO;
+ bool no_move = flags & ALLOCM_NO_MOVE;
+ prof_thr_cnt_t *cnt;
+
+ assert(ptr != NULL);
+ assert(*ptr != NULL);
+ assert(size != 0);
+ assert(SIZE_T_MAX - size >= extra);
+ assert(malloc_initialized || IS_INITIALIZER);
+
+ p = *ptr;
+ if (config_prof && opt_prof) {
+ /*
+ * usize isn't knowable before iralloc() returns when extra is
+ * non-zero. Therefore, compute its maximum possible value and
+ * use that in PROF_ALLOC_PREP() to decide whether to capture a
+ * backtrace. prof_realloc() will use the actual usize to
+ * decide whether to sample.
+ */
+ size_t max_usize = (alignment == 0) ? s2u(size+extra) :
+ sa2u(size+extra, alignment);
+ prof_ctx_t *old_ctx = prof_ctx_get(p);
+ old_size = isalloc(p, true);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = p2rz(p);
+ PROF_ALLOC_PREP(1, max_usize, cnt);
+ if (cnt == NULL)
+ goto label_oom;
+ /*
+ * Use minimum usize to determine whether promotion may happen.
+ */
+ if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
+ && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
+ <= SMALL_MAXCLASS) {
+ q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
+ size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
+ alignment, zero, no_move);
+ if (q == NULL)
+ goto label_err;
+ if (max_usize < PAGE) {
+ usize = max_usize;
+ arena_prof_promoted(q, usize);
+ } else
+ usize = isalloc(q, config_prof);
+ } else {
+ q = iralloc(p, size, extra, alignment, zero, no_move);
+ if (q == NULL)
+ goto label_err;
+ usize = isalloc(q, config_prof);
+ }
+ prof_realloc(q, usize, cnt, old_size, old_ctx);
+ if (rsize != NULL)
+ *rsize = usize;
+ } else {
+ if (config_stats) {
+ old_size = isalloc(p, false);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_size);
+ } else if (config_valgrind && opt_valgrind) {
+ old_size = isalloc(p, false);
+ old_rzsize = u2rz(old_size);
+ }
+ q = iralloc(p, size, extra, alignment, zero, no_move);
+ if (q == NULL)
+ goto label_err;
+ if (config_stats)
+ usize = isalloc(q, config_prof);
+ if (rsize != NULL) {
+ if (config_stats == false)
+ usize = isalloc(q, config_prof);
+ *rsize = usize;
+ }
+ }
+
+ *ptr = q;
+ if (config_stats) {
+ thread_allocated_t *ta;
+ ta = thread_allocated_tsd_get();
+ ta->allocated += usize;
+ ta->deallocated += old_size;
+ }
+ UTRACE(p, size, q);
+ JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
+ return (ALLOCM_SUCCESS);
+label_err:
+ if (no_move) {
+ UTRACE(p, size, q);
+ return (ALLOCM_ERR_NOT_MOVED);
+ }
+label_oom:
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error in rallocm(): "
+ "out of memory\n");
+ abort();
+ }
+ UTRACE(p, size, 0);
+ return (ALLOCM_ERR_OOM);
+}
+
+JEMALLOC_ATTR(nonnull(1))
+JEMALLOC_ATTR(visibility("default"))
+int
+je_sallocm(const void *ptr, size_t *rsize, int flags)
+{
+ size_t sz;
+
+ assert(malloc_initialized || IS_INITIALIZER);
+
+ if (config_ivsalloc)
+ sz = ivsalloc(ptr, config_prof);
+ else {
+ assert(ptr != NULL);
+ sz = isalloc(ptr, config_prof);
+ }
+ assert(rsize != NULL);
+ *rsize = sz;
+
+ return (ALLOCM_SUCCESS);
+}
+
+JEMALLOC_ATTR(nonnull(1))
+JEMALLOC_ATTR(visibility("default"))
+int
+je_dallocm(void *ptr, int flags)
+{
+ size_t usize;
+ size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+ assert(ptr != NULL);
+ assert(malloc_initialized || IS_INITIALIZER);
+
+ UTRACE(ptr, 0, 0);
+ if (config_stats || config_valgrind)
+ usize = isalloc(ptr, config_prof);
+ if (config_prof && opt_prof) {
+ if (config_stats == false && config_valgrind == false)
+ usize = isalloc(ptr, config_prof);
+ prof_free(ptr, usize);
+ }
+ if (config_stats)
+ thread_allocated_tsd_get()->deallocated += usize;
+ if (config_valgrind && opt_valgrind)
+ rzsize = p2rz(ptr);
+ iqalloc(ptr);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+
+ return (ALLOCM_SUCCESS);
+}
+
+JEMALLOC_ATTR(visibility("default"))
+int
+je_nallocm(size_t *rsize, size_t size, int flags)
+{
+ size_t usize;
+ size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+ & (SIZE_T_MAX-1));
+
+ assert(size != 0);
+
+ if (malloc_init())
+ return (ALLOCM_ERR_OOM);
+
+ usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
+ if (usize == 0)
+ return (ALLOCM_ERR_OOM);
+
+ if (rsize != NULL)
+ *rsize = usize;
+ return (ALLOCM_SUCCESS);
+}
+
+#endif
+/*
+ * End experimental functions.
+ */
+/******************************************************************************/
+/*
+ * The following functions are used by threading libraries for protection of
+ * malloc during fork().
+ */
+
+#ifndef JEMALLOC_MUTEX_INIT_CB
+void
+jemalloc_prefork(void)
+#else
+void
+_malloc_prefork(void)
+#endif
+{
+ unsigned i;
+
+ /* Acquire all mutexes in a safe order. */
+ malloc_mutex_prefork(&arenas_lock);
+ for (i = 0; i < narenas; i++) {
+ if (arenas[i] != NULL)
+ arena_prefork(arenas[i]);
+ }
+ base_prefork();
+ huge_prefork();
+ chunk_dss_prefork();
+}
+
+#ifndef JEMALLOC_MUTEX_INIT_CB
+void
+jemalloc_postfork_parent(void)
+#else
+void
+_malloc_postfork(void)
+#endif
+{
+ unsigned i;
+
+ /* Release all mutexes, now that fork() has completed. */
+ chunk_dss_postfork_parent();
+ huge_postfork_parent();
+ base_postfork_parent();
+ for (i = 0; i < narenas; i++) {
+ if (arenas[i] != NULL)
+ arena_postfork_parent(arenas[i]);
+ }
+ malloc_mutex_postfork_parent(&arenas_lock);
+}
+
+void
+jemalloc_postfork_child(void)
+{
+ unsigned i;
+
+ /* Release all mutexes, now that fork() has completed. */
+ chunk_dss_postfork_child();
+ huge_postfork_child();
+ base_postfork_child();
+ for (i = 0; i < narenas; i++) {
+ if (arenas[i] != NULL)
+ arena_postfork_child(arenas[i]);
+ }
+ malloc_mutex_postfork_child(&arenas_lock);
+}
+
+/******************************************************************************/
+/*
+ * The following functions are used for TLS allocation/deallocation in static
+ * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
+ * is that these avoid accessing TLS variables.
+ */
+
+static void *
+a0alloc(size_t size, bool zero)
+{
+
+ if (malloc_init())
+ return (NULL);
+
+ if (size == 0)
+ size = 1;
+
+ if (size <= arena_maxclass)
+ return (arena_malloc(arenas[0], size, zero, false));
+ else
+ return (huge_malloc(size, zero));
+}
+
+void *
+a0malloc(size_t size)
+{
+
+ return (a0alloc(size, false));
+}
+
+void *
+a0calloc(size_t num, size_t size)
+{
+
+ return (a0alloc(num * size, true));
+}
+
+void
+a0free(void *ptr)
+{
+ arena_chunk_t *chunk;
+
+ if (ptr == NULL)
+ return;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (chunk != ptr)
+ arena_dalloc(chunk->arena, chunk, ptr, false);
+ else
+ huge_dalloc(ptr, true);
+}
+
+/******************************************************************************/
diff --git a/contrib/jemalloc/src/mb.c b/contrib/jemalloc/src/mb.c
new file mode 100644
index 0000000..dc2c0a2
--- /dev/null
+++ b/contrib/jemalloc/src/mb.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_MB_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/contrib/jemalloc/src/mutex.c b/contrib/jemalloc/src/mutex.c
new file mode 100644
index 0000000..7be5fc9
--- /dev/null
+++ b/contrib/jemalloc/src/mutex.c
@@ -0,0 +1,153 @@
+#define JEMALLOC_MUTEX_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+#ifdef JEMALLOC_LAZY_LOCK
+#include <dlfcn.h>
+#endif
+
+/******************************************************************************/
+/* Data. */
+
+#ifdef JEMALLOC_LAZY_LOCK
+bool isthreaded = false;
+#endif
+#ifdef JEMALLOC_MUTEX_INIT_CB
+static bool postpone_init = true;
+static malloc_mutex_t *postponed_mutexes = NULL;
+#endif
+
+#ifdef JEMALLOC_LAZY_LOCK
+static void pthread_create_once(void);
+#endif
+
+/******************************************************************************/
+/*
+ * We intercept pthread_create() calls in order to toggle isthreaded if the
+ * process goes multi-threaded.
+ */
+
+#ifdef JEMALLOC_LAZY_LOCK
+static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
+ void *(*)(void *), void *__restrict);
+
+static void
+pthread_create_once(void)
+{
+
+ pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
+ if (pthread_create_fptr == NULL) {
+ malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
+ "\"pthread_create\")\n");
+ abort();
+ }
+
+ isthreaded = true;
+}
+
+JEMALLOC_ATTR(visibility("default"))
+int
+pthread_create(pthread_t *__restrict thread,
+ const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
+ void *__restrict arg)
+{
+ static pthread_once_t once_control = PTHREAD_ONCE_INIT;
+
+ pthread_once(&once_control, pthread_create_once);
+
+ return (pthread_create_fptr(thread, attr, start_routine, arg));
+}
+#endif
+
+/******************************************************************************/
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+ void *(calloc_cb)(size_t, size_t));
+
+__weak_reference(_pthread_mutex_init_calloc_cb_stub,
+ _pthread_mutex_init_calloc_cb);
+
+int
+_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex,
+ void *(calloc_cb)(size_t, size_t))
+{
+
+ return (0);
+}
+#endif
+
+bool
+malloc_mutex_init(malloc_mutex_t *mutex)
+{
+#ifdef JEMALLOC_OSSPIN
+ mutex->lock = 0;
+#elif (defined(JEMALLOC_MUTEX_INIT_CB))
+ if (postpone_init) {
+ mutex->postponed_next = postponed_mutexes;
+ postponed_mutexes = mutex;
+ } else {
+ if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) !=
+ 0)
+ return (true);
+ }
+#else
+ pthread_mutexattr_t attr;
+
+ if (pthread_mutexattr_init(&attr) != 0)
+ return (true);
+ pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
+ if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
+ pthread_mutexattr_destroy(&attr);
+ return (true);
+ }
+ pthread_mutexattr_destroy(&attr);
+
+#endif
+ return (false);
+}
+
+void
+malloc_mutex_prefork(malloc_mutex_t *mutex)
+{
+
+ malloc_mutex_lock(mutex);
+}
+
+void
+malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
+{
+
+ malloc_mutex_unlock(mutex);
+}
+
+void
+malloc_mutex_postfork_child(malloc_mutex_t *mutex)
+{
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+ malloc_mutex_unlock(mutex);
+#else
+ if (malloc_mutex_init(mutex)) {
+ malloc_printf("<jemalloc>: Error re-initializing mutex in "
+ "child\n");
+ if (opt_abort)
+ abort();
+ }
+#endif
+}
+
+bool
+mutex_boot(void)
+{
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+ postpone_init = false;
+ while (postponed_mutexes != NULL) {
+ if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
+ base_calloc) != 0)
+ return (true);
+ postponed_mutexes = postponed_mutexes->postponed_next;
+ }
+#endif
+ return (false);
+}
diff --git a/contrib/jemalloc/src/prof.c b/contrib/jemalloc/src/prof.c
new file mode 100644
index 0000000..b509aae
--- /dev/null
+++ b/contrib/jemalloc/src/prof.c
@@ -0,0 +1,1243 @@
+#define JEMALLOC_PROF_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+/******************************************************************************/
+
+#ifdef JEMALLOC_PROF_LIBUNWIND
+#define UNW_LOCAL_ONLY
+#include <libunwind.h>
+#endif
+
+#ifdef JEMALLOC_PROF_LIBGCC
+#include <unwind.h>
+#endif
+
+/******************************************************************************/
+/* Data. */
+
+malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL)
+
+bool opt_prof = false;
+bool opt_prof_active = true;
+size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
+ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
+bool opt_prof_gdump = false;
+bool opt_prof_leak = false;
+bool opt_prof_accum = true;
+char opt_prof_prefix[PATH_MAX + 1];
+
+uint64_t prof_interval;
+bool prof_promote;
+
+/*
+ * Table of mutexes that are shared among ctx's. These are leaf locks, so
+ * there is no problem with using them for more than one ctx at the same time.
+ * The primary motivation for this sharing though is that ctx's are ephemeral,
+ * and destroying mutexes causes complications for systems that allocate when
+ * creating/destroying mutexes.
+ */
+static malloc_mutex_t *ctx_locks;
+static unsigned cum_ctxs; /* Atomic counter. */
+
+/*
+ * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data
+ * structure that knows about all backtraces currently captured.
+ */
+static ckh_t bt2ctx;
+static malloc_mutex_t bt2ctx_mtx;
+
+static malloc_mutex_t prof_dump_seq_mtx;
+static uint64_t prof_dump_seq;
+static uint64_t prof_dump_iseq;
+static uint64_t prof_dump_mseq;
+static uint64_t prof_dump_useq;
+
+/*
+ * This buffer is rather large for stack allocation, so use a single buffer for
+ * all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since
+ * it must be locked anyway during dumping.
+ */
+static char prof_dump_buf[PROF_DUMP_BUFSIZE];
+static unsigned prof_dump_buf_end;
+static int prof_dump_fd;
+
+/* Do not dump any profiles until bootstrapping is complete. */
+static bool prof_booted = false;
+
+static malloc_mutex_t enq_mtx;
+static bool enq;
+static bool enq_idump;
+static bool enq_gdump;
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static prof_bt_t *bt_dup(prof_bt_t *bt);
+static void bt_destroy(prof_bt_t *bt);
+#ifdef JEMALLOC_PROF_LIBGCC
+static _Unwind_Reason_Code prof_unwind_init_callback(
+ struct _Unwind_Context *context, void *arg);
+static _Unwind_Reason_Code prof_unwind_callback(
+ struct _Unwind_Context *context, void *arg);
+#endif
+static bool prof_flush(bool propagate_err);
+static bool prof_write(bool propagate_err, const char *s);
+static bool prof_printf(bool propagate_err, const char *format, ...)
+ JEMALLOC_ATTR(format(printf, 2, 3));
+static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all,
+ size_t *leak_nctx);
+static void prof_ctx_destroy(prof_ctx_t *ctx);
+static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt);
+static bool prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx,
+ prof_bt_t *bt);
+static bool prof_dump_maps(bool propagate_err);
+static bool prof_dump(bool propagate_err, const char *filename,
+ bool leakcheck);
+static void prof_dump_filename(char *filename, char v, int64_t vseq);
+static void prof_fdump(void);
+static void prof_bt_hash(const void *key, unsigned minbits, size_t *hash1,
+ size_t *hash2);
+static bool prof_bt_keycomp(const void *k1, const void *k2);
+static malloc_mutex_t *prof_ctx_mutex_choose(void);
+
+/******************************************************************************/
+
+void
+bt_init(prof_bt_t *bt, void **vec)
+{
+
+ cassert(config_prof);
+
+ bt->vec = vec;
+ bt->len = 0;
+}
+
+static void
+bt_destroy(prof_bt_t *bt)
+{
+
+ cassert(config_prof);
+
+ idalloc(bt);
+}
+
+static prof_bt_t *
+bt_dup(prof_bt_t *bt)
+{
+ prof_bt_t *ret;
+
+ cassert(config_prof);
+
+ /*
+ * Create a single allocation that has space for vec immediately
+ * following the prof_bt_t structure. The backtraces that get
+ * stored in the backtrace caches are copied from stack-allocated
+ * temporary variables, so size is known at creation time. Making this
+ * a contiguous object improves cache locality.
+ */
+ ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) +
+ (bt->len * sizeof(void *)));
+ if (ret == NULL)
+ return (NULL);
+ ret->vec = (void **)((uintptr_t)ret +
+ QUANTUM_CEILING(sizeof(prof_bt_t)));
+ memcpy(ret->vec, bt->vec, bt->len * sizeof(void *));
+ ret->len = bt->len;
+
+ return (ret);
+}
+
+static inline void
+prof_enter(void)
+{
+
+ cassert(config_prof);
+
+ malloc_mutex_lock(&enq_mtx);
+ enq = true;
+ malloc_mutex_unlock(&enq_mtx);
+
+ malloc_mutex_lock(&bt2ctx_mtx);
+}
+
+static inline void
+prof_leave(void)
+{
+ bool idump, gdump;
+
+ cassert(config_prof);
+
+ malloc_mutex_unlock(&bt2ctx_mtx);
+
+ malloc_mutex_lock(&enq_mtx);
+ enq = false;
+ idump = enq_idump;
+ enq_idump = false;
+ gdump = enq_gdump;
+ enq_gdump = false;
+ malloc_mutex_unlock(&enq_mtx);
+
+ if (idump)
+ prof_idump();
+ if (gdump)
+ prof_gdump();
+}
+
+#ifdef JEMALLOC_PROF_LIBUNWIND
+void
+prof_backtrace(prof_bt_t *bt, unsigned nignore)
+{
+ unw_context_t uc;
+ unw_cursor_t cursor;
+ unsigned i;
+ int err;
+
+ cassert(config_prof);
+ assert(bt->len == 0);
+ assert(bt->vec != NULL);
+
+ unw_getcontext(&uc);
+ unw_init_local(&cursor, &uc);
+
+ /* Throw away (nignore+1) stack frames, if that many exist. */
+ for (i = 0; i < nignore + 1; i++) {
+ err = unw_step(&cursor);
+ if (err <= 0)
+ return;
+ }
+
+ /*
+ * Iterate over stack frames until there are no more, or until no space
+ * remains in bt.
+ */
+ for (i = 0; i < PROF_BT_MAX; i++) {
+ unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]);
+ bt->len++;
+ err = unw_step(&cursor);
+ if (err <= 0)
+ break;
+ }
+}
+#elif (defined(JEMALLOC_PROF_LIBGCC))
+static _Unwind_Reason_Code
+prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
+{
+
+ cassert(config_prof);
+
+ return (_URC_NO_REASON);
+}
+
+static _Unwind_Reason_Code
+prof_unwind_callback(struct _Unwind_Context *context, void *arg)
+{
+ prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
+
+ cassert(config_prof);
+
+ if (data->nignore > 0)
+ data->nignore--;
+ else {
+ data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context);
+ data->bt->len++;
+ if (data->bt->len == data->max)
+ return (_URC_END_OF_STACK);
+ }
+
+ return (_URC_NO_REASON);
+}
+
+void
+prof_backtrace(prof_bt_t *bt, unsigned nignore)
+{
+ prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX};
+
+ cassert(config_prof);
+
+ _Unwind_Backtrace(prof_unwind_callback, &data);
+}
+#elif (defined(JEMALLOC_PROF_GCC))
+void
+prof_backtrace(prof_bt_t *bt, unsigned nignore)
+{
+#define BT_FRAME(i) \
+ if ((i) < nignore + PROF_BT_MAX) { \
+ void *p; \
+ if (__builtin_frame_address(i) == 0) \
+ return; \
+ p = __builtin_return_address(i); \
+ if (p == NULL) \
+ return; \
+ if (i >= nignore) { \
+ bt->vec[(i) - nignore] = p; \
+ bt->len = (i) - nignore + 1; \
+ } \
+ } else \
+ return;
+
+ cassert(config_prof);
+ assert(nignore <= 3);
+
+ BT_FRAME(0)
+ BT_FRAME(1)
+ BT_FRAME(2)
+ BT_FRAME(3)
+ BT_FRAME(4)
+ BT_FRAME(5)
+ BT_FRAME(6)
+ BT_FRAME(7)
+ BT_FRAME(8)
+ BT_FRAME(9)
+
+ BT_FRAME(10)
+ BT_FRAME(11)
+ BT_FRAME(12)
+ BT_FRAME(13)
+ BT_FRAME(14)
+ BT_FRAME(15)
+ BT_FRAME(16)
+ BT_FRAME(17)
+ BT_FRAME(18)
+ BT_FRAME(19)
+
+ BT_FRAME(20)
+ BT_FRAME(21)
+ BT_FRAME(22)
+ BT_FRAME(23)
+ BT_FRAME(24)
+ BT_FRAME(25)
+ BT_FRAME(26)
+ BT_FRAME(27)
+ BT_FRAME(28)
+ BT_FRAME(29)
+
+ BT_FRAME(30)
+ BT_FRAME(31)
+ BT_FRAME(32)
+ BT_FRAME(33)
+ BT_FRAME(34)
+ BT_FRAME(35)
+ BT_FRAME(36)
+ BT_FRAME(37)
+ BT_FRAME(38)
+ BT_FRAME(39)
+
+ BT_FRAME(40)
+ BT_FRAME(41)
+ BT_FRAME(42)
+ BT_FRAME(43)
+ BT_FRAME(44)
+ BT_FRAME(45)
+ BT_FRAME(46)
+ BT_FRAME(47)
+ BT_FRAME(48)
+ BT_FRAME(49)
+
+ BT_FRAME(50)
+ BT_FRAME(51)
+ BT_FRAME(52)
+ BT_FRAME(53)
+ BT_FRAME(54)
+ BT_FRAME(55)
+ BT_FRAME(56)
+ BT_FRAME(57)
+ BT_FRAME(58)
+ BT_FRAME(59)
+
+ BT_FRAME(60)
+ BT_FRAME(61)
+ BT_FRAME(62)
+ BT_FRAME(63)
+ BT_FRAME(64)
+ BT_FRAME(65)
+ BT_FRAME(66)
+ BT_FRAME(67)
+ BT_FRAME(68)
+ BT_FRAME(69)
+
+ BT_FRAME(70)
+ BT_FRAME(71)
+ BT_FRAME(72)
+ BT_FRAME(73)
+ BT_FRAME(74)
+ BT_FRAME(75)
+ BT_FRAME(76)
+ BT_FRAME(77)
+ BT_FRAME(78)
+ BT_FRAME(79)
+
+ BT_FRAME(80)
+ BT_FRAME(81)
+ BT_FRAME(82)
+ BT_FRAME(83)
+ BT_FRAME(84)
+ BT_FRAME(85)
+ BT_FRAME(86)
+ BT_FRAME(87)
+ BT_FRAME(88)
+ BT_FRAME(89)
+
+ BT_FRAME(90)
+ BT_FRAME(91)
+ BT_FRAME(92)
+ BT_FRAME(93)
+ BT_FRAME(94)
+ BT_FRAME(95)
+ BT_FRAME(96)
+ BT_FRAME(97)
+ BT_FRAME(98)
+ BT_FRAME(99)
+
+ BT_FRAME(100)
+ BT_FRAME(101)
+ BT_FRAME(102)
+ BT_FRAME(103)
+ BT_FRAME(104)
+ BT_FRAME(105)
+ BT_FRAME(106)
+ BT_FRAME(107)
+ BT_FRAME(108)
+ BT_FRAME(109)
+
+ BT_FRAME(110)
+ BT_FRAME(111)
+ BT_FRAME(112)
+ BT_FRAME(113)
+ BT_FRAME(114)
+ BT_FRAME(115)
+ BT_FRAME(116)
+ BT_FRAME(117)
+ BT_FRAME(118)
+ BT_FRAME(119)
+
+ BT_FRAME(120)
+ BT_FRAME(121)
+ BT_FRAME(122)
+ BT_FRAME(123)
+ BT_FRAME(124)
+ BT_FRAME(125)
+ BT_FRAME(126)
+ BT_FRAME(127)
+
+ /* Extras to compensate for nignore. */
+ BT_FRAME(128)
+ BT_FRAME(129)
+ BT_FRAME(130)
+#undef BT_FRAME
+}
+#else
+void
+prof_backtrace(prof_bt_t *bt, unsigned nignore)
+{
+
+ cassert(config_prof);
+ assert(false);
+}
+#endif
+
+prof_thr_cnt_t *
+prof_lookup(prof_bt_t *bt)
+{
+ union {
+ prof_thr_cnt_t *p;
+ void *v;
+ } ret;
+ prof_tdata_t *prof_tdata;
+
+ cassert(config_prof);
+
+ prof_tdata = *prof_tdata_tsd_get();
+ if (prof_tdata == NULL) {
+ prof_tdata = prof_tdata_init();
+ if (prof_tdata == NULL)
+ return (NULL);
+ }
+
+ if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
+ union {
+ prof_bt_t *p;
+ void *v;
+ } btkey;
+ union {
+ prof_ctx_t *p;
+ void *v;
+ } ctx;
+ bool new_ctx;
+
+ /*
+ * This thread's cache lacks bt. Look for it in the global
+ * cache.
+ */
+ prof_enter();
+ if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
+ /* bt has never been seen before. Insert it. */
+ ctx.v = imalloc(sizeof(prof_ctx_t));
+ if (ctx.v == NULL) {
+ prof_leave();
+ return (NULL);
+ }
+ btkey.p = bt_dup(bt);
+ if (btkey.v == NULL) {
+ prof_leave();
+ idalloc(ctx.v);
+ return (NULL);
+ }
+ ctx.p->bt = btkey.p;
+ ctx.p->lock = prof_ctx_mutex_choose();
+ memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t));
+ ql_new(&ctx.p->cnts_ql);
+ if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
+ /* OOM. */
+ prof_leave();
+ idalloc(btkey.v);
+ idalloc(ctx.v);
+ return (NULL);
+ }
+ /*
+ * Artificially raise curobjs, in order to avoid a race
+ * condition with prof_ctx_merge()/prof_ctx_destroy().
+ *
+ * No locking is necessary for ctx here because no other
+ * threads have had the opportunity to fetch it from
+ * bt2ctx yet.
+ */
+ ctx.p->cnt_merged.curobjs++;
+ new_ctx = true;
+ } else {
+ /*
+ * Artificially raise curobjs, in order to avoid a race
+ * condition with prof_ctx_merge()/prof_ctx_destroy().
+ */
+ malloc_mutex_lock(ctx.p->lock);
+ ctx.p->cnt_merged.curobjs++;
+ malloc_mutex_unlock(ctx.p->lock);
+ new_ctx = false;
+ }
+ prof_leave();
+
+ /* Link a prof_thd_cnt_t into ctx for this thread. */
+ if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
+ assert(ckh_count(&prof_tdata->bt2cnt) > 0);
+ /*
+ * Flush the least recently used cnt in order to keep
+ * bt2cnt from becoming too large.
+ */
+ ret.p = ql_last(&prof_tdata->lru_ql, lru_link);
+ assert(ret.v != NULL);
+ if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
+ NULL, NULL))
+ assert(false);
+ ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
+ prof_ctx_merge(ret.p->ctx, ret.p);
+ /* ret can now be re-used. */
+ } else {
+ assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX);
+ /* Allocate and partially initialize a new cnt. */
+ ret.v = imalloc(sizeof(prof_thr_cnt_t));
+ if (ret.p == NULL) {
+ if (new_ctx)
+ prof_ctx_destroy(ctx.p);
+ return (NULL);
+ }
+ ql_elm_new(ret.p, cnts_link);
+ ql_elm_new(ret.p, lru_link);
+ }
+ /* Finish initializing ret. */
+ ret.p->ctx = ctx.p;
+ ret.p->epoch = 0;
+ memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
+ if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) {
+ if (new_ctx)
+ prof_ctx_destroy(ctx.p);
+ idalloc(ret.v);
+ return (NULL);
+ }
+ ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
+ malloc_mutex_lock(ctx.p->lock);
+ ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link);
+ ctx.p->cnt_merged.curobjs--;
+ malloc_mutex_unlock(ctx.p->lock);
+ } else {
+ /* Move ret to the front of the LRU. */
+ ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
+ ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
+ }
+
+ return (ret.p);
+}
+
+static bool
+prof_flush(bool propagate_err)
+{
+ bool ret = false;
+ ssize_t err;
+
+ cassert(config_prof);
+
+ err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
+ if (err == -1) {
+ if (propagate_err == false) {
+ malloc_write("<jemalloc>: write() failed during heap "
+ "profile flush\n");
+ if (opt_abort)
+ abort();
+ }
+ ret = true;
+ }
+ prof_dump_buf_end = 0;
+
+ return (ret);
+}
+
+static bool
+prof_write(bool propagate_err, const char *s)
+{
+ unsigned i, slen, n;
+
+ cassert(config_prof);
+
+ i = 0;
+ slen = strlen(s);
+ while (i < slen) {
+ /* Flush the buffer if it is full. */
+ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
+ if (prof_flush(propagate_err) && propagate_err)
+ return (true);
+
+ if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
+ /* Finish writing. */
+ n = slen - i;
+ } else {
+ /* Write as much of s as will fit. */
+ n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
+ }
+ memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
+ prof_dump_buf_end += n;
+ i += n;
+ }
+
+ return (false);
+}
+
+JEMALLOC_ATTR(format(printf, 2, 3))
+static bool
+prof_printf(bool propagate_err, const char *format, ...)
+{
+ bool ret;
+ va_list ap;
+ char buf[PROF_PRINTF_BUFSIZE];
+
+ va_start(ap, format);
+ malloc_vsnprintf(buf, sizeof(buf), format, ap);
+ va_end(ap);
+ ret = prof_write(propagate_err, buf);
+
+ return (ret);
+}
+
+static void
+prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
+{
+ prof_thr_cnt_t *thr_cnt;
+ prof_cnt_t tcnt;
+
+ cassert(config_prof);
+
+ malloc_mutex_lock(ctx->lock);
+
+ memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
+ ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
+ volatile unsigned *epoch = &thr_cnt->epoch;
+
+ while (true) {
+ unsigned epoch0 = *epoch;
+
+ /* Make sure epoch is even. */
+ if (epoch0 & 1U)
+ continue;
+
+ memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t));
+
+ /* Terminate if epoch didn't change while reading. */
+ if (*epoch == epoch0)
+ break;
+ }
+
+ ctx->cnt_summed.curobjs += tcnt.curobjs;
+ ctx->cnt_summed.curbytes += tcnt.curbytes;
+ if (opt_prof_accum) {
+ ctx->cnt_summed.accumobjs += tcnt.accumobjs;
+ ctx->cnt_summed.accumbytes += tcnt.accumbytes;
+ }
+ }
+
+ if (ctx->cnt_summed.curobjs != 0)
+ (*leak_nctx)++;
+
+ /* Add to cnt_all. */
+ cnt_all->curobjs += ctx->cnt_summed.curobjs;
+ cnt_all->curbytes += ctx->cnt_summed.curbytes;
+ if (opt_prof_accum) {
+ cnt_all->accumobjs += ctx->cnt_summed.accumobjs;
+ cnt_all->accumbytes += ctx->cnt_summed.accumbytes;
+ }
+
+ malloc_mutex_unlock(ctx->lock);
+}
+
+static void
+prof_ctx_destroy(prof_ctx_t *ctx)
+{
+
+ cassert(config_prof);
+
+ /*
+ * Check that ctx is still unused by any thread cache before destroying
+ * it. prof_lookup() artificially raises ctx->cnt_merge.curobjs in
+ * order to avoid a race condition with this function, as does
+ * prof_ctx_merge() in order to avoid a race between the main body of
+ * prof_ctx_merge() and entry into this function.
+ */
+ prof_enter();
+ malloc_mutex_lock(ctx->lock);
+ if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 1) {
+ assert(ctx->cnt_merged.curbytes == 0);
+ assert(ctx->cnt_merged.accumobjs == 0);
+ assert(ctx->cnt_merged.accumbytes == 0);
+ /* Remove ctx from bt2ctx. */
+ if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
+ assert(false);
+ prof_leave();
+ /* Destroy ctx. */
+ malloc_mutex_unlock(ctx->lock);
+ bt_destroy(ctx->bt);
+ idalloc(ctx);
+ } else {
+ /*
+ * Compensate for increment in prof_ctx_merge() or
+ * prof_lookup().
+ */
+ ctx->cnt_merged.curobjs--;
+ malloc_mutex_unlock(ctx->lock);
+ prof_leave();
+ }
+}
+
+static void
+prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
+{
+ bool destroy;
+
+ cassert(config_prof);
+
+ /* Merge cnt stats and detach from ctx. */
+ malloc_mutex_lock(ctx->lock);
+ ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
+ ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
+ ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
+ ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
+ ql_remove(&ctx->cnts_ql, cnt, cnts_link);
+ if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
+ ctx->cnt_merged.curobjs == 0) {
+ /*
+ * Artificially raise ctx->cnt_merged.curobjs in order to keep
+ * another thread from winning the race to destroy ctx while
+ * this one has ctx->lock dropped. Without this, it would be
+ * possible for another thread to:
+ *
+ * 1) Sample an allocation associated with ctx.
+ * 2) Deallocate the sampled object.
+ * 3) Successfully prof_ctx_destroy(ctx).
+ *
+ * The result would be that ctx no longer exists by the time
+ * this thread accesses it in prof_ctx_destroy().
+ */
+ ctx->cnt_merged.curobjs++;
+ destroy = true;
+ } else
+ destroy = false;
+ malloc_mutex_unlock(ctx->lock);
+ if (destroy)
+ prof_ctx_destroy(ctx);
+}
+
+static bool
+prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt)
+{
+ unsigned i;
+
+ cassert(config_prof);
+
+ if (opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) {
+ assert(ctx->cnt_summed.curbytes == 0);
+ assert(ctx->cnt_summed.accumobjs == 0);
+ assert(ctx->cnt_summed.accumbytes == 0);
+ return (false);
+ }
+
+ if (prof_printf(propagate_err, "%"PRId64": %"PRId64
+ " [%"PRIu64": %"PRIu64"] @",
+ ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes,
+ ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes))
+ return (true);
+
+ for (i = 0; i < bt->len; i++) {
+ if (prof_printf(propagate_err, " %#"PRIxPTR,
+ (uintptr_t)bt->vec[i]))
+ return (true);
+ }
+
+ if (prof_write(propagate_err, "\n"))
+ return (true);
+
+ return (false);
+}
+
+static bool
+prof_dump_maps(bool propagate_err)
+{
+ int mfd;
+ char filename[PATH_MAX + 1];
+
+ cassert(config_prof);
+
+ malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
+ (int)getpid());
+ mfd = open(filename, O_RDONLY);
+ if (mfd != -1) {
+ ssize_t nread;
+
+ if (prof_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
+ propagate_err)
+ return (true);
+ nread = 0;
+ do {
+ prof_dump_buf_end += nread;
+ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
+ /* Make space in prof_dump_buf before read(). */
+ if (prof_flush(propagate_err) && propagate_err)
+ return (true);
+ }
+ nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
+ PROF_DUMP_BUFSIZE - prof_dump_buf_end);
+ } while (nread > 0);
+ close(mfd);
+ } else
+ return (true);
+
+ return (false);
+}
+
+static bool
+prof_dump(bool propagate_err, const char *filename, bool leakcheck)
+{
+ prof_cnt_t cnt_all;
+ size_t tabind;
+ union {
+ prof_bt_t *p;
+ void *v;
+ } bt;
+ union {
+ prof_ctx_t *p;
+ void *v;
+ } ctx;
+ size_t leak_nctx;
+
+ cassert(config_prof);
+
+ prof_enter();
+ prof_dump_fd = creat(filename, 0644);
+ if (prof_dump_fd == -1) {
+ if (propagate_err == false) {
+ malloc_printf(
+ "<jemalloc>: creat(\"%s\"), 0644) failed\n",
+ filename);
+ if (opt_abort)
+ abort();
+ }
+ goto label_error;
+ }
+
+ /* Merge per thread profile stats, and sum them in cnt_all. */
+ memset(&cnt_all, 0, sizeof(prof_cnt_t));
+ leak_nctx = 0;
+ for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;)
+ prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx);
+
+ /* Dump profile header. */
+ if (opt_lg_prof_sample == 0) {
+ if (prof_printf(propagate_err,
+ "heap profile: %"PRId64": %"PRId64
+ " [%"PRIu64": %"PRIu64"] @ heapprofile\n",
+ cnt_all.curobjs, cnt_all.curbytes,
+ cnt_all.accumobjs, cnt_all.accumbytes))
+ goto label_error;
+ } else {
+ if (prof_printf(propagate_err,
+ "heap profile: %"PRId64": %"PRId64
+ " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
+ cnt_all.curobjs, cnt_all.curbytes,
+ cnt_all.accumobjs, cnt_all.accumbytes,
+ ((uint64_t)1U << opt_lg_prof_sample)))
+ goto label_error;
+ }
+
+ /* Dump per ctx profile stats. */
+ for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v)
+ == false;) {
+ if (prof_dump_ctx(propagate_err, ctx.p, bt.p))
+ goto label_error;
+ }
+
+ /* Dump /proc/<pid>/maps if possible. */
+ if (prof_dump_maps(propagate_err))
+ goto label_error;
+
+ if (prof_flush(propagate_err))
+ goto label_error;
+ close(prof_dump_fd);
+ prof_leave();
+
+ if (leakcheck && cnt_all.curbytes != 0) {
+ malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
+ PRId64" object%s, %zu context%s\n",
+ cnt_all.curbytes, (cnt_all.curbytes != 1) ? "s" : "",
+ cnt_all.curobjs, (cnt_all.curobjs != 1) ? "s" : "",
+ leak_nctx, (leak_nctx != 1) ? "s" : "");
+ malloc_printf(
+ "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
+ filename);
+ }
+
+ return (false);
+label_error:
+ prof_leave();
+ return (true);
+}
+
+#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
+static void
+prof_dump_filename(char *filename, char v, int64_t vseq)
+{
+
+ cassert(config_prof);
+
+ if (vseq != UINT64_C(0xffffffffffffffff)) {
+ /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
+ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
+ "%s.%d.%"PRIu64".%c%"PRId64".heap",
+ opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
+ } else {
+ /* "<prefix>.<pid>.<seq>.<v>.heap" */
+ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
+ "%s.%d.%"PRIu64".%c.heap",
+ opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
+ }
+}
+
+static void
+prof_fdump(void)
+{
+ char filename[DUMP_FILENAME_BUFSIZE];
+
+ cassert(config_prof);
+
+ if (prof_booted == false)
+ return;
+
+ if (opt_prof_prefix[0] != '\0') {
+ malloc_mutex_lock(&prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'f', UINT64_C(0xffffffffffffffff));
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
+ prof_dump(false, filename, opt_prof_leak);
+ }
+}
+
+void
+prof_idump(void)
+{
+ char filename[PATH_MAX + 1];
+
+ cassert(config_prof);
+
+ if (prof_booted == false)
+ return;
+ malloc_mutex_lock(&enq_mtx);
+ if (enq) {
+ enq_idump = true;
+ malloc_mutex_unlock(&enq_mtx);
+ return;
+ }
+ malloc_mutex_unlock(&enq_mtx);
+
+ if (opt_prof_prefix[0] != '\0') {
+ malloc_mutex_lock(&prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'i', prof_dump_iseq);
+ prof_dump_iseq++;
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
+ prof_dump(false, filename, false);
+ }
+}
+
+bool
+prof_mdump(const char *filename)
+{
+ char filename_buf[DUMP_FILENAME_BUFSIZE];
+
+ cassert(config_prof);
+
+ if (opt_prof == false || prof_booted == false)
+ return (true);
+
+ if (filename == NULL) {
+ /* No filename specified, so automatically generate one. */
+ if (opt_prof_prefix[0] == '\0')
+ return (true);
+ malloc_mutex_lock(&prof_dump_seq_mtx);
+ prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
+ prof_dump_mseq++;
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
+ filename = filename_buf;
+ }
+ return (prof_dump(true, filename, false));
+}
+
+void
+prof_gdump(void)
+{
+ char filename[DUMP_FILENAME_BUFSIZE];
+
+ cassert(config_prof);
+
+ if (prof_booted == false)
+ return;
+ malloc_mutex_lock(&enq_mtx);
+ if (enq) {
+ enq_gdump = true;
+ malloc_mutex_unlock(&enq_mtx);
+ return;
+ }
+ malloc_mutex_unlock(&enq_mtx);
+
+ if (opt_prof_prefix[0] != '\0') {
+ malloc_mutex_lock(&prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'u', prof_dump_useq);
+ prof_dump_useq++;
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
+ prof_dump(false, filename, false);
+ }
+}
+
+static void
+prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
+{
+ size_t ret1, ret2;
+ uint64_t h;
+ prof_bt_t *bt = (prof_bt_t *)key;
+
+ cassert(config_prof);
+ assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
+ assert(hash1 != NULL);
+ assert(hash2 != NULL);
+
+ h = hash(bt->vec, bt->len * sizeof(void *),
+ UINT64_C(0x94122f335b332aea));
+ if (minbits <= 32) {
+ /*
+ * Avoid doing multiple hashes, since a single hash provides
+ * enough bits.
+ */
+ ret1 = h & ZU(0xffffffffU);
+ ret2 = h >> 32;
+ } else {
+ ret1 = h;
+ ret2 = hash(bt->vec, bt->len * sizeof(void *),
+ UINT64_C(0x8432a476666bbc13));
+ }
+
+ *hash1 = ret1;
+ *hash2 = ret2;
+}
+
+static bool
+prof_bt_keycomp(const void *k1, const void *k2)
+{
+ const prof_bt_t *bt1 = (prof_bt_t *)k1;
+ const prof_bt_t *bt2 = (prof_bt_t *)k2;
+
+ cassert(config_prof);
+
+ if (bt1->len != bt2->len)
+ return (false);
+ return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
+}
+
+static malloc_mutex_t *
+prof_ctx_mutex_choose(void)
+{
+ unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
+
+ return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
+}
+
+prof_tdata_t *
+prof_tdata_init(void)
+{
+ prof_tdata_t *prof_tdata;
+
+ cassert(config_prof);
+
+ /* Initialize an empty cache for this thread. */
+ prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
+ if (prof_tdata == NULL)
+ return (NULL);
+
+ if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS,
+ prof_bt_hash, prof_bt_keycomp)) {
+ idalloc(prof_tdata);
+ return (NULL);
+ }
+ ql_new(&prof_tdata->lru_ql);
+
+ prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX);
+ if (prof_tdata->vec == NULL) {
+ ckh_delete(&prof_tdata->bt2cnt);
+ idalloc(prof_tdata);
+ return (NULL);
+ }
+
+ prof_tdata->prng_state = 0;
+ prof_tdata->threshold = 0;
+ prof_tdata->accum = 0;
+
+ prof_tdata_tsd_set(&prof_tdata);
+
+ return (prof_tdata);
+}
+
+void
+prof_tdata_cleanup(void *arg)
+{
+ prof_thr_cnt_t *cnt;
+ prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg;
+
+ cassert(config_prof);
+
+ /*
+ * Delete the hash table. All of its contents can still be iterated
+ * over via the LRU.
+ */
+ ckh_delete(&prof_tdata->bt2cnt);
+
+ /* Iteratively merge cnt's into the global stats and delete them. */
+ while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
+ ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
+ prof_ctx_merge(cnt->ctx, cnt);
+ idalloc(cnt);
+ }
+
+ idalloc(prof_tdata->vec);
+
+ idalloc(prof_tdata);
+ prof_tdata = NULL;
+ prof_tdata_tsd_set(&prof_tdata);
+}
+
+void
+prof_boot0(void)
+{
+
+ cassert(config_prof);
+
+ memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
+ sizeof(PROF_PREFIX_DEFAULT));
+}
+
+void
+prof_boot1(void)
+{
+
+ cassert(config_prof);
+
+ /*
+ * opt_prof and prof_promote must be in their final state before any
+ * arenas are initialized, so this function must be executed early.
+ */
+
+ if (opt_prof_leak && opt_prof == false) {
+ /*
+ * Enable opt_prof, but in such a way that profiles are never
+ * automatically dumped.
+ */
+ opt_prof = true;
+ opt_prof_gdump = false;
+ prof_interval = 0;
+ } else if (opt_prof) {
+ if (opt_lg_prof_interval >= 0) {
+ prof_interval = (((uint64_t)1U) <<
+ opt_lg_prof_interval);
+ } else
+ prof_interval = 0;
+ }
+
+ prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);
+}
+
+bool
+prof_boot2(void)
+{
+
+ cassert(config_prof);
+
+ if (opt_prof) {
+ unsigned i;
+
+ if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
+ prof_bt_keycomp))
+ return (true);
+ if (malloc_mutex_init(&bt2ctx_mtx))
+ return (true);
+ if (prof_tdata_tsd_boot()) {
+ malloc_write(
+ "<jemalloc>: Error in pthread_key_create()\n");
+ abort();
+ }
+
+ if (malloc_mutex_init(&prof_dump_seq_mtx))
+ return (true);
+
+ if (malloc_mutex_init(&enq_mtx))
+ return (true);
+ enq = false;
+ enq_idump = false;
+ enq_gdump = false;
+
+ if (atexit(prof_fdump) != 0) {
+ malloc_write("<jemalloc>: Error in atexit()\n");
+ if (opt_abort)
+ abort();
+ }
+
+ ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
+ sizeof(malloc_mutex_t));
+ if (ctx_locks == NULL)
+ return (true);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
+ if (malloc_mutex_init(&ctx_locks[i]))
+ return (true);
+ }
+ }
+
+#ifdef JEMALLOC_PROF_LIBGCC
+ /*
+ * Cause the backtracing machinery to allocate its internal state
+ * before enabling profiling.
+ */
+ _Unwind_Backtrace(prof_unwind_init_callback, NULL);
+#endif
+
+ prof_booted = true;
+
+ return (false);
+}
+
+/******************************************************************************/
diff --git a/contrib/jemalloc/src/quarantine.c b/contrib/jemalloc/src/quarantine.c
new file mode 100644
index 0000000..89a25c6
--- /dev/null
+++ b/contrib/jemalloc/src/quarantine.c
@@ -0,0 +1,163 @@
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+typedef struct quarantine_s quarantine_t;
+
+struct quarantine_s {
+ size_t curbytes;
+ size_t curobjs;
+ size_t first;
+#define LG_MAXOBJS_INIT 10
+ size_t lg_maxobjs;
+ void *objs[1]; /* Dynamically sized ring buffer. */
+};
+
+static void quarantine_cleanup(void *arg);
+
+malloc_tsd_data(static, quarantine, quarantine_t *, NULL)
+malloc_tsd_funcs(JEMALLOC_INLINE, quarantine, quarantine_t *, NULL,
+ quarantine_cleanup)
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static quarantine_t *quarantine_init(size_t lg_maxobjs);
+static quarantine_t *quarantine_grow(quarantine_t *quarantine);
+static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
+
+/******************************************************************************/
+
+static quarantine_t *
+quarantine_init(size_t lg_maxobjs)
+{
+ quarantine_t *quarantine;
+
+ quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
+ ((ZU(1) << lg_maxobjs) * sizeof(void *)));
+ if (quarantine == NULL)
+ return (NULL);
+ quarantine->curbytes = 0;
+ quarantine->curobjs = 0;
+ quarantine->first = 0;
+ quarantine->lg_maxobjs = lg_maxobjs;
+
+ quarantine_tsd_set(&quarantine);
+
+ return (quarantine);
+}
+
+static quarantine_t *
+quarantine_grow(quarantine_t *quarantine)
+{
+ quarantine_t *ret;
+
+ ret = quarantine_init(quarantine->lg_maxobjs + 1);
+ if (ret == NULL)
+ return (quarantine);
+
+ ret->curbytes = quarantine->curbytes;
+ if (quarantine->first + quarantine->curobjs < (ZU(1) <<
+ quarantine->lg_maxobjs)) {
+ /* objs ring buffer data are contiguous. */
+ memcpy(ret->objs, &quarantine->objs[quarantine->first],
+ quarantine->curobjs * sizeof(void *));
+ ret->curobjs = quarantine->curobjs;
+ } else {
+ /* objs ring buffer data wrap around. */
+ size_t ncopy = (ZU(1) << quarantine->lg_maxobjs) -
+ quarantine->first;
+ memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy *
+ sizeof(void *));
+ ret->curobjs = ncopy;
+ if (quarantine->curobjs != 0) {
+ memcpy(&ret->objs[ret->curobjs], quarantine->objs,
+ quarantine->curobjs - ncopy);
+ }
+ }
+
+ return (ret);
+}
+
+static void
+quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
+{
+
+ while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) {
+ void *ptr = quarantine->objs[quarantine->first];
+ size_t usize = isalloc(ptr, config_prof);
+ idalloc(ptr);
+ quarantine->curbytes -= usize;
+ quarantine->curobjs--;
+ quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
+ quarantine->lg_maxobjs) - 1);
+ }
+}
+
+void
+quarantine(void *ptr)
+{
+ quarantine_t *quarantine;
+ size_t usize = isalloc(ptr, config_prof);
+
+ assert(config_fill);
+ assert(opt_quarantine);
+
+ quarantine = *quarantine_tsd_get();
+ if (quarantine == NULL && (quarantine =
+ quarantine_init(LG_MAXOBJS_INIT)) == NULL) {
+ idalloc(ptr);
+ return;
+ }
+ /*
+ * Drain one or more objects if the quarantine size limit would be
+ * exceeded by appending ptr.
+ */
+ if (quarantine->curbytes + usize > opt_quarantine) {
+ size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
+ - usize : 0;
+ quarantine_drain(quarantine, upper_bound);
+ }
+ /* Grow the quarantine ring buffer if it's full. */
+ if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
+ quarantine = quarantine_grow(quarantine);
+ /* quarantine_grow() must free a slot if it fails to grow. */
+ assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
+ /* Append ptr if its size doesn't exceed the quarantine size. */
+ if (quarantine->curbytes + usize <= opt_quarantine) {
+ size_t offset = (quarantine->first + quarantine->curobjs) &
+ ((ZU(1) << quarantine->lg_maxobjs) - 1);
+ quarantine->objs[offset] = ptr;
+ quarantine->curbytes += usize;
+ quarantine->curobjs++;
+ if (opt_junk)
+ memset(ptr, 0x5a, usize);
+ } else {
+ assert(quarantine->curbytes == 0);
+ idalloc(ptr);
+ }
+}
+
+static void
+quarantine_cleanup(void *arg)
+{
+ quarantine_t *quarantine = *(quarantine_t **)arg;
+
+ if (quarantine != NULL) {
+ quarantine_drain(quarantine, 0);
+ idalloc(quarantine);
+ }
+}
+
+bool
+quarantine_boot(void)
+{
+
+ assert(config_fill);
+
+ if (quarantine_tsd_boot())
+ return (true);
+
+ return (false);
+}
diff --git a/contrib/jemalloc/src/rtree.c b/contrib/jemalloc/src/rtree.c
new file mode 100644
index 0000000..eb0ff1e2
--- /dev/null
+++ b/contrib/jemalloc/src/rtree.c
@@ -0,0 +1,46 @@
+#define JEMALLOC_RTREE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+rtree_t *
+rtree_new(unsigned bits)
+{
+ rtree_t *ret;
+ unsigned bits_per_level, height, i;
+
+ bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
+ height = bits / bits_per_level;
+ if (height * bits_per_level != bits)
+ height++;
+ assert(height * bits_per_level >= bits);
+
+ ret = (rtree_t*)base_alloc(offsetof(rtree_t, level2bits) +
+ (sizeof(unsigned) * height));
+ if (ret == NULL)
+ return (NULL);
+ memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
+ height));
+
+ if (malloc_mutex_init(&ret->mutex)) {
+ /* Leak the rtree. */
+ return (NULL);
+ }
+ ret->height = height;
+ if (bits_per_level * height > bits)
+ ret->level2bits[0] = bits % bits_per_level;
+ else
+ ret->level2bits[0] = bits_per_level;
+ for (i = 1; i < height; i++)
+ ret->level2bits[i] = bits_per_level;
+
+ ret->root = (void**)base_alloc(sizeof(void *) << ret->level2bits[0]);
+ if (ret->root == NULL) {
+ /*
+ * We leak the rtree here, since there's no generic base
+ * deallocation.
+ */
+ return (NULL);
+ }
+ memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
+
+ return (ret);
+}
diff --git a/contrib/jemalloc/src/stats.c b/contrib/jemalloc/src/stats.c
new file mode 100644
index 0000000..4cad214
--- /dev/null
+++ b/contrib/jemalloc/src/stats.c
@@ -0,0 +1,550 @@
+#define JEMALLOC_STATS_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+#define CTL_GET(n, v, t) do { \
+ size_t sz = sizeof(t); \
+ xmallctl(n, v, &sz, NULL, 0); \
+} while (0)
+
+#define CTL_I_GET(n, v, t) do { \
+ size_t mib[6]; \
+ size_t miblen = sizeof(mib) / sizeof(size_t); \
+ size_t sz = sizeof(t); \
+ xmallctlnametomib(n, mib, &miblen); \
+ mib[2] = i; \
+ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
+} while (0)
+
+#define CTL_J_GET(n, v, t) do { \
+ size_t mib[6]; \
+ size_t miblen = sizeof(mib) / sizeof(size_t); \
+ size_t sz = sizeof(t); \
+ xmallctlnametomib(n, mib, &miblen); \
+ mib[2] = j; \
+ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
+} while (0)
+
+#define CTL_IJ_GET(n, v, t) do { \
+ size_t mib[6]; \
+ size_t miblen = sizeof(mib) / sizeof(size_t); \
+ size_t sz = sizeof(t); \
+ xmallctlnametomib(n, mib, &miblen); \
+ mib[2] = i; \
+ mib[4] = j; \
+ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
+} while (0)
+
+/******************************************************************************/
+/* Data. */
+
+bool opt_stats_print = false;
+
+size_t stats_cactive = 0;
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, unsigned i);
+static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, unsigned i);
+static void stats_arena_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, unsigned i, bool bins, bool large);
+
+/******************************************************************************/
+
+static void
+stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ unsigned i)
+{
+ size_t page;
+ bool config_tcache;
+ unsigned nbins, j, gap_start;
+
+ CTL_GET("arenas.page", &page, size_t);
+
+ CTL_GET("config.tcache", &config_tcache, bool);
+ if (config_tcache) {
+ malloc_cprintf(write_cb, cbopaque,
+ "bins: bin size regs pgs allocated nmalloc"
+ " ndalloc nrequests nfills nflushes"
+ " newruns reruns curruns\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "bins: bin size regs pgs allocated nmalloc"
+ " ndalloc newruns reruns curruns\n");
+ }
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
+ uint64_t nruns;
+
+ CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
+ if (nruns == 0) {
+ if (gap_start == UINT_MAX)
+ gap_start = j;
+ } else {
+ size_t reg_size, run_size, allocated;
+ uint32_t nregs;
+ uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
+ uint64_t reruns;
+ size_t curruns;
+
+ if (gap_start != UINT_MAX) {
+ if (j > gap_start + 1) {
+ /* Gap of more than one size class. */
+ malloc_cprintf(write_cb, cbopaque,
+ "[%u..%u]\n", gap_start,
+ j - 1);
+ } else {
+ /* Gap of one size class. */
+ malloc_cprintf(write_cb, cbopaque,
+ "[%u]\n", gap_start);
+ }
+ gap_start = UINT_MAX;
+ }
+ CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
+ CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
+ CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
+ CTL_IJ_GET("stats.arenas.0.bins.0.allocated",
+ &allocated, size_t);
+ CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc",
+ &nmalloc, uint64_t);
+ CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc",
+ &ndalloc, uint64_t);
+ if (config_tcache) {
+ CTL_IJ_GET("stats.arenas.0.bins.0.nrequests",
+ &nrequests, uint64_t);
+ CTL_IJ_GET("stats.arenas.0.bins.0.nfills",
+ &nfills, uint64_t);
+ CTL_IJ_GET("stats.arenas.0.bins.0.nflushes",
+ &nflushes, uint64_t);
+ }
+ CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
+ uint64_t);
+ CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
+ size_t);
+ if (config_tcache) {
+ malloc_cprintf(write_cb, cbopaque,
+ "%13u %5zu %4u %3zu %12zu %12"PRIu64
+ " %12"PRIu64" %12"PRIu64" %12"PRIu64
+ " %12"PRIu64" %12"PRIu64" %12"PRIu64
+ " %12zu\n",
+ j, reg_size, nregs, run_size / page,
+ allocated, nmalloc, ndalloc, nrequests,
+ nfills, nflushes, nruns, reruns, curruns);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "%13u %5zu %4u %3zu %12zu %12"PRIu64
+ " %12"PRIu64" %12"PRIu64" %12"PRIu64
+ " %12zu\n",
+ j, reg_size, nregs, run_size / page,
+ allocated, nmalloc, ndalloc, nruns, reruns,
+ curruns);
+ }
+ }
+ }
+ if (gap_start != UINT_MAX) {
+ if (j > gap_start + 1) {
+ /* Gap of more than one size class. */
+ malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
+ gap_start, j - 1);
+ } else {
+ /* Gap of one size class. */
+ malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
+ }
+ }
+}
+
+static void
+stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ unsigned i)
+{
+ size_t page, nlruns, j;
+ ssize_t gap_start;
+
+ CTL_GET("arenas.page", &page, size_t);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "large: size pages nmalloc ndalloc nrequests"
+ " curruns\n");
+ CTL_GET("arenas.nlruns", &nlruns, size_t);
+ for (j = 0, gap_start = -1; j < nlruns; j++) {
+ uint64_t nmalloc, ndalloc, nrequests;
+ size_t run_size, curruns;
+
+ CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
+ uint64_t);
+ CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc,
+ uint64_t);
+ CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests,
+ uint64_t);
+ if (nrequests == 0) {
+ if (gap_start == -1)
+ gap_start = j;
+ } else {
+ CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
+ CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
+ size_t);
+ if (gap_start != -1) {
+ malloc_cprintf(write_cb, cbopaque, "[%zu]\n",
+ j - gap_start);
+ gap_start = -1;
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
+ " %12zu\n",
+ run_size, run_size / page, nmalloc, ndalloc,
+ nrequests, curruns);
+ }
+ }
+ if (gap_start != -1)
+ malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start);
+}
+
+static void
+stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ unsigned i, bool bins, bool large)
+{
+ unsigned nthreads;
+ size_t page, pactive, pdirty, mapped;
+ uint64_t npurge, nmadvise, purged;
+ size_t small_allocated;
+ uint64_t small_nmalloc, small_ndalloc, small_nrequests;
+ size_t large_allocated;
+ uint64_t large_nmalloc, large_ndalloc, large_nrequests;
+
+ CTL_GET("arenas.page", &page, size_t);
+
+ CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
+ malloc_cprintf(write_cb, cbopaque,
+ "assigned threads: %u\n", nthreads);
+ CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
+ CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
+ CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
+ CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t);
+ CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
+ " %"PRIu64" madvise%s, %"PRIu64" purged\n",
+ pactive, pdirty, npurge, npurge == 1 ? "" : "s",
+ nmadvise, nmadvise == 1 ? "" : "s", purged);
+
+ malloc_cprintf(write_cb, cbopaque,
+ " allocated nmalloc ndalloc nrequests\n");
+ CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t);
+ CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
+ CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
+ CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+ small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
+ CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t);
+ CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
+ CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
+ CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+ large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
+ malloc_cprintf(write_cb, cbopaque,
+ "total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+ small_allocated + large_allocated,
+ small_nmalloc + large_nmalloc,
+ small_ndalloc + large_ndalloc,
+ small_nrequests + large_nrequests);
+ malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
+ CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
+ malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
+
+ if (bins)
+ stats_arena_bins_print(write_cb, cbopaque, i);
+ if (large)
+ stats_arena_lruns_print(write_cb, cbopaque, i);
+}
+
+void
+stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts)
+{
+ int err;
+ uint64_t epoch;
+ size_t u64sz;
+ bool general = true;
+ bool merged = true;
+ bool unmerged = true;
+ bool bins = true;
+ bool large = true;
+
+ /*
+ * Refresh stats, in case mallctl() was called by the application.
+ *
+ * Check for OOM here, since refreshing the ctl cache can trigger
+ * allocation. In practice, none of the subsequent mallctl()-related
+ * calls in this function will cause OOM if this one succeeds.
+ * */
+ epoch = 1;
+ u64sz = sizeof(uint64_t);
+ err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
+ if (err != 0) {
+ if (err == EAGAIN) {
+ malloc_write("<jemalloc>: Memory allocation failure in "
+ "mallctl(\"epoch\", ...)\n");
+ return;
+ }
+ malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
+ "...)\n");
+ abort();
+ }
+
+ if (write_cb == NULL) {
+ /*
+ * The caller did not provide an alternate write_cb callback
+ * function, so use the default one. malloc_write() is an
+ * inline function, so use malloc_message() directly here.
+ */
+ write_cb = je_malloc_message;
+ cbopaque = NULL;
+ }
+
+ if (opts != NULL) {
+ unsigned i;
+
+ for (i = 0; opts[i] != '\0'; i++) {
+ switch (opts[i]) {
+ case 'g':
+ general = false;
+ break;
+ case 'm':
+ merged = false;
+ break;
+ case 'a':
+ unmerged = false;
+ break;
+ case 'b':
+ bins = false;
+ break;
+ case 'l':
+ large = false;
+ break;
+ default:;
+ }
+ }
+ }
+
+ write_cb(cbopaque, "___ Begin jemalloc statistics ___\n");
+ if (general) {
+ int err;
+ const char *cpv;
+ bool bv;
+ unsigned uv;
+ ssize_t ssv;
+ size_t sv, bsz, ssz, sssz, cpsz;
+
+ bsz = sizeof(bool);
+ ssz = sizeof(size_t);
+ sssz = sizeof(ssize_t);
+ cpsz = sizeof(const char *);
+
+ CTL_GET("version", &cpv, const char *);
+ malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
+ CTL_GET("config.debug", &bv, bool);
+ malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
+ bv ? "enabled" : "disabled");
+
+#define OPT_WRITE_BOOL(n) \
+ if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
+ == 0) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %s\n", bv ? "true" : "false"); \
+ }
+#define OPT_WRITE_SIZE_T(n) \
+ if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
+ == 0) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %zu\n", sv); \
+ }
+#define OPT_WRITE_SSIZE_T(n) \
+ if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
+ == 0) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %zd\n", ssv); \
+ }
+#define OPT_WRITE_CHAR_P(n) \
+ if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
+ == 0) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": \"%s\"\n", cpv); \
+ }
+
+ write_cb(cbopaque, "Run-time option settings:\n");
+ OPT_WRITE_BOOL(abort)
+ OPT_WRITE_SIZE_T(lg_chunk)
+ OPT_WRITE_SIZE_T(narenas)
+ OPT_WRITE_SSIZE_T(lg_dirty_mult)
+ OPT_WRITE_BOOL(stats_print)
+ OPT_WRITE_BOOL(junk)
+ OPT_WRITE_SIZE_T(quarantine)
+ OPT_WRITE_BOOL(redzone)
+ OPT_WRITE_BOOL(zero)
+ OPT_WRITE_BOOL(utrace)
+ OPT_WRITE_BOOL(valgrind)
+ OPT_WRITE_BOOL(xmalloc)
+ OPT_WRITE_BOOL(tcache)
+ OPT_WRITE_SSIZE_T(lg_tcache_max)
+ OPT_WRITE_BOOL(prof)
+ OPT_WRITE_CHAR_P(prof_prefix)
+ OPT_WRITE_BOOL(prof_active)
+ OPT_WRITE_SSIZE_T(lg_prof_sample)
+ OPT_WRITE_BOOL(prof_accum)
+ OPT_WRITE_SSIZE_T(lg_prof_interval)
+ OPT_WRITE_BOOL(prof_gdump)
+ OPT_WRITE_BOOL(prof_leak)
+
+#undef OPT_WRITE_BOOL
+#undef OPT_WRITE_SIZE_T
+#undef OPT_WRITE_SSIZE_T
+#undef OPT_WRITE_CHAR_P
+
+ malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
+
+ CTL_GET("arenas.narenas", &uv, unsigned);
+ malloc_cprintf(write_cb, cbopaque, "Max arenas: %u\n", uv);
+
+ malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
+ sizeof(void *));
+
+ CTL_GET("arenas.quantum", &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
+
+ CTL_GET("arenas.page", &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
+
+ CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
+ if (ssv >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "Min active:dirty page ratio per arena: %u:1\n",
+ (1U << ssv));
+ } else {
+ write_cb(cbopaque,
+ "Min active:dirty page ratio per arena: N/A\n");
+ }
+ if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
+ == 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "Maximum thread-cached size class: %zu\n", sv);
+ }
+ if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
+ bv) {
+ CTL_GET("opt.lg_prof_sample", &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "Average profile sample interval: %"PRIu64
+ " (2^%zu)\n", (((uint64_t)1U) << sv), sv);
+
+ CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
+ if (ssv >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "Average profile dump interval: %"PRIu64
+ " (2^%zd)\n",
+ (((uint64_t)1U) << ssv), ssv);
+ } else {
+ write_cb(cbopaque,
+ "Average profile dump interval: N/A\n");
+ }
+ }
+ CTL_GET("opt.lg_chunk", &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
+ (ZU(1) << sv), sv);
+ }
+
+ if (config_stats) {
+ size_t *cactive;
+ size_t allocated, active, mapped;
+ size_t chunks_current, chunks_high;
+ uint64_t chunks_total;
+ size_t huge_allocated;
+ uint64_t huge_nmalloc, huge_ndalloc;
+
+ CTL_GET("stats.cactive", &cactive, size_t *);
+ CTL_GET("stats.allocated", &allocated, size_t);
+ CTL_GET("stats.active", &active, size_t);
+ CTL_GET("stats.mapped", &mapped, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "Allocated: %zu, active: %zu, mapped: %zu\n",
+ allocated, active, mapped);
+ malloc_cprintf(write_cb, cbopaque,
+ "Current active ceiling: %zu\n", atomic_read_z(cactive));
+
+ /* Print chunk stats. */
+ CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
+ CTL_GET("stats.chunks.high", &chunks_high, size_t);
+ CTL_GET("stats.chunks.current", &chunks_current, size_t);
+ malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
+ "highchunks curchunks\n");
+ malloc_cprintf(write_cb, cbopaque, " %13"PRIu64"%13zu%13zu\n",
+ chunks_total, chunks_high, chunks_current);
+
+ /* Print huge stats. */
+ CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
+ CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t);
+ CTL_GET("stats.huge.allocated", &huge_allocated, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "huge: nmalloc ndalloc allocated\n");
+ malloc_cprintf(write_cb, cbopaque,
+ " %12"PRIu64" %12"PRIu64" %12zu\n",
+ huge_nmalloc, huge_ndalloc, huge_allocated);
+
+ if (merged) {
+ unsigned narenas;
+
+ CTL_GET("arenas.narenas", &narenas, unsigned);
+ {
+ bool initialized[narenas];
+ size_t isz;
+ unsigned i, ninitialized;
+
+ isz = sizeof(initialized);
+ xmallctl("arenas.initialized", initialized,
+ &isz, NULL, 0);
+ for (i = ninitialized = 0; i < narenas; i++) {
+ if (initialized[i])
+ ninitialized++;
+ }
+
+ if (ninitialized > 1 || unmerged == false) {
+ /* Print merged arena stats. */
+ malloc_cprintf(write_cb, cbopaque,
+ "\nMerged arenas stats:\n");
+ stats_arena_print(write_cb, cbopaque,
+ narenas, bins, large);
+ }
+ }
+ }
+
+ if (unmerged) {
+ unsigned narenas;
+
+ /* Print stats for each arena. */
+
+ CTL_GET("arenas.narenas", &narenas, unsigned);
+ {
+ bool initialized[narenas];
+ size_t isz;
+ unsigned i;
+
+ isz = sizeof(initialized);
+ xmallctl("arenas.initialized", initialized,
+ &isz, NULL, 0);
+
+ for (i = 0; i < narenas; i++) {
+ if (initialized[i]) {
+ malloc_cprintf(write_cb,
+ cbopaque,
+ "\narenas[%u]:\n", i);
+ stats_arena_print(write_cb,
+ cbopaque, i, bins, large);
+ }
+ }
+ }
+ }
+ }
+ write_cb(cbopaque, "--- End jemalloc statistics ---\n");
+}
diff --git a/contrib/jemalloc/src/tcache.c b/contrib/jemalloc/src/tcache.c
new file mode 100644
index 0000000..be26b59
--- /dev/null
+++ b/contrib/jemalloc/src/tcache.c
@@ -0,0 +1,435 @@
+#define JEMALLOC_TCACHE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+malloc_tsd_data(, tcache, tcache_t *, NULL)
+malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
+
+bool opt_tcache = true;
+ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
+
+tcache_bin_info_t *tcache_bin_info;
+static unsigned stack_nelms; /* Total stack elms per tcache. */
+
+size_t nhbins;
+size_t tcache_maxclass;
+
+/******************************************************************************/
+
+void *
+tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
+{
+ void *ret;
+
+ arena_tcache_fill_small(tcache->arena, tbin, binind,
+ config_prof ? tcache->prof_accumbytes : 0);
+ if (config_prof)
+ tcache->prof_accumbytes = 0;
+ ret = tcache_alloc_easy(tbin);
+
+ return (ret);
+}
+
+void
+tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
+ tcache_t *tcache)
+{
+ void *ptr;
+ unsigned i, nflush, ndeferred;
+ bool merged_stats = false;
+
+ assert(binind < NBINS);
+ assert(rem <= tbin->ncached);
+
+ for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
+ /* Lock the arena bin associated with the first object. */
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+ tbin->avail[0]);
+ arena_t *arena = chunk->arena;
+ arena_bin_t *bin = &arena->bins[binind];
+
+ if (config_prof && arena == tcache->arena) {
+ malloc_mutex_lock(&arena->lock);
+ arena_prof_accum(arena, tcache->prof_accumbytes);
+ malloc_mutex_unlock(&arena->lock);
+ tcache->prof_accumbytes = 0;
+ }
+
+ malloc_mutex_lock(&bin->lock);
+ if (config_stats && arena == tcache->arena) {
+ assert(merged_stats == false);
+ merged_stats = true;
+ bin->stats.nflushes++;
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ }
+ ndeferred = 0;
+ for (i = 0; i < nflush; i++) {
+ ptr = tbin->avail[i];
+ assert(ptr != NULL);
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (chunk->arena == arena) {
+ size_t pageind = ((uintptr_t)ptr -
+ (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_t *mapelm =
+ &chunk->map[pageind-map_bias];
+ if (config_fill && opt_junk) {
+ arena_alloc_junk_small(ptr,
+ &arena_bin_info[binind], true);
+ }
+ arena_dalloc_bin(arena, chunk, ptr, mapelm);
+ } else {
+ /*
+ * This object was allocated via a different
+ * arena bin than the one that is currently
+ * locked. Stash the object, so that it can be
+ * handled in a future pass.
+ */
+ tbin->avail[ndeferred] = ptr;
+ ndeferred++;
+ }
+ }
+ malloc_mutex_unlock(&bin->lock);
+ }
+ if (config_stats && merged_stats == false) {
+ /*
+ * The flush loop didn't happen to flush to this thread's
+ * arena, so the stats didn't get merged. Manually do so now.
+ */
+ arena_bin_t *bin = &tcache->arena->bins[binind];
+ malloc_mutex_lock(&bin->lock);
+ bin->stats.nflushes++;
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ malloc_mutex_unlock(&bin->lock);
+ }
+
+ memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
+ rem * sizeof(void *));
+ tbin->ncached = rem;
+ if ((int)tbin->ncached < tbin->low_water)
+ tbin->low_water = tbin->ncached;
+}
+
+void
+tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
+ tcache_t *tcache)
+{
+ void *ptr;
+ unsigned i, nflush, ndeferred;
+ bool merged_stats = false;
+
+ assert(binind < nhbins);
+ assert(rem <= tbin->ncached);
+
+ for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
+ /* Lock the arena associated with the first object. */
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+ tbin->avail[0]);
+ arena_t *arena = chunk->arena;
+
+ malloc_mutex_lock(&arena->lock);
+ if ((config_prof || config_stats) && arena == tcache->arena) {
+ if (config_prof) {
+ arena_prof_accum(arena,
+ tcache->prof_accumbytes);
+ tcache->prof_accumbytes = 0;
+ }
+ if (config_stats) {
+ merged_stats = true;
+ arena->stats.nrequests_large +=
+ tbin->tstats.nrequests;
+ arena->stats.lstats[binind - NBINS].nrequests +=
+ tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ }
+ }
+ ndeferred = 0;
+ for (i = 0; i < nflush; i++) {
+ ptr = tbin->avail[i];
+ assert(ptr != NULL);
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (chunk->arena == arena)
+ arena_dalloc_large(arena, chunk, ptr);
+ else {
+ /*
+ * This object was allocated via a different
+ * arena than the one that is currently locked.
+ * Stash the object, so that it can be handled
+ * in a future pass.
+ */
+ tbin->avail[ndeferred] = ptr;
+ ndeferred++;
+ }
+ }
+ malloc_mutex_unlock(&arena->lock);
+ }
+ if (config_stats && merged_stats == false) {
+ /*
+ * The flush loop didn't happen to flush to this thread's
+ * arena, so the stats didn't get merged. Manually do so now.
+ */
+ arena_t *arena = tcache->arena;
+ malloc_mutex_lock(&arena->lock);
+ arena->stats.nrequests_large += tbin->tstats.nrequests;
+ arena->stats.lstats[binind - NBINS].nrequests +=
+ tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ malloc_mutex_unlock(&arena->lock);
+ }
+
+ memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
+ rem * sizeof(void *));
+ tbin->ncached = rem;
+ if ((int)tbin->ncached < tbin->low_water)
+ tbin->low_water = tbin->ncached;
+}
+
+void
+tcache_arena_associate(tcache_t *tcache, arena_t *arena)
+{
+
+ if (config_stats) {
+ /* Link into list of extant tcaches. */
+ malloc_mutex_lock(&arena->lock);
+ ql_elm_new(tcache, link);
+ ql_tail_insert(&arena->tcache_ql, tcache, link);
+ malloc_mutex_unlock(&arena->lock);
+ }
+ tcache->arena = arena;
+}
+
+void
+tcache_arena_dissociate(tcache_t *tcache)
+{
+
+ if (config_stats) {
+ /* Unlink from list of extant tcaches. */
+ malloc_mutex_lock(&tcache->arena->lock);
+ ql_remove(&tcache->arena->tcache_ql, tcache, link);
+ malloc_mutex_unlock(&tcache->arena->lock);
+ tcache_stats_merge(tcache, tcache->arena);
+ }
+}
+
+tcache_t *
+tcache_create(arena_t *arena)
+{
+ tcache_t *tcache;
+ size_t size, stack_offset;
+ unsigned i;
+
+ size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
+ /* Naturally align the pointer stacks. */
+ size = PTR_CEILING(size);
+ stack_offset = size;
+ size += stack_nelms * sizeof(void *);
+ /*
+ * Round up to the nearest multiple of the cacheline size, in order to
+ * avoid the possibility of false cacheline sharing.
+ *
+ * That this works relies on the same logic as in ipalloc(), but we
+ * cannot directly call ipalloc() here due to tcache bootstrapping
+ * issues.
+ */
+ size = (size + CACHELINE_MASK) & (-CACHELINE);
+
+ if (size <= SMALL_MAXCLASS)
+ tcache = (tcache_t *)arena_malloc_small(arena, size, true);
+ else if (size <= tcache_maxclass)
+ tcache = (tcache_t *)arena_malloc_large(arena, size, true);
+ else
+ tcache = (tcache_t *)icalloc(size);
+
+ if (tcache == NULL)
+ return (NULL);
+
+ tcache_arena_associate(tcache, arena);
+
+ assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
+ for (i = 0; i < nhbins; i++) {
+ tcache->tbins[i].lg_fill_div = 1;
+ tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
+ (uintptr_t)stack_offset);
+ stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
+ }
+
+ tcache_tsd_set(&tcache);
+
+ return (tcache);
+}
+
+void
+tcache_destroy(tcache_t *tcache)
+{
+ unsigned i;
+ size_t tcache_size;
+
+ tcache_arena_dissociate(tcache);
+
+ for (i = 0; i < NBINS; i++) {
+ tcache_bin_t *tbin = &tcache->tbins[i];
+ tcache_bin_flush_small(tbin, i, 0, tcache);
+
+ if (config_stats && tbin->tstats.nrequests != 0) {
+ arena_t *arena = tcache->arena;
+ arena_bin_t *bin = &arena->bins[i];
+ malloc_mutex_lock(&bin->lock);
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ malloc_mutex_unlock(&bin->lock);
+ }
+ }
+
+ for (; i < nhbins; i++) {
+ tcache_bin_t *tbin = &tcache->tbins[i];
+ tcache_bin_flush_large(tbin, i, 0, tcache);
+
+ if (config_stats && tbin->tstats.nrequests != 0) {
+ arena_t *arena = tcache->arena;
+ malloc_mutex_lock(&arena->lock);
+ arena->stats.nrequests_large += tbin->tstats.nrequests;
+ arena->stats.lstats[i - NBINS].nrequests +=
+ tbin->tstats.nrequests;
+ malloc_mutex_unlock(&arena->lock);
+ }
+ }
+
+ if (config_prof && tcache->prof_accumbytes > 0) {
+ malloc_mutex_lock(&tcache->arena->lock);
+ arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
+ malloc_mutex_unlock(&tcache->arena->lock);
+ }
+
+ tcache_size = arena_salloc(tcache, false);
+ if (tcache_size <= SMALL_MAXCLASS) {
+ arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
+ arena_t *arena = chunk->arena;
+ size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
+ LG_PAGE;
+ arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
+ arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
+ (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
+ LG_PAGE));
+ arena_bin_t *bin = run->bin;
+
+ malloc_mutex_lock(&bin->lock);
+ arena_dalloc_bin(arena, chunk, tcache, mapelm);
+ malloc_mutex_unlock(&bin->lock);
+ } else if (tcache_size <= tcache_maxclass) {
+ arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
+ arena_t *arena = chunk->arena;
+
+ malloc_mutex_lock(&arena->lock);
+ arena_dalloc_large(arena, chunk, tcache);
+ malloc_mutex_unlock(&arena->lock);
+ } else
+ idalloc(tcache);
+}
+
+void
+tcache_thread_cleanup(void *arg)
+{
+ tcache_t *tcache = *(tcache_t **)arg;
+
+ if (tcache == TCACHE_STATE_DISABLED) {
+ /* Do nothing. */
+ } else if (tcache == TCACHE_STATE_REINCARNATED) {
+ /*
+ * Another destructor called an allocator function after this
+ * destructor was called. Reset tcache to
+ * TCACHE_STATE_PURGATORY in order to receive another callback.
+ */
+ tcache = TCACHE_STATE_PURGATORY;
+ tcache_tsd_set(&tcache);
+ } else if (tcache == TCACHE_STATE_PURGATORY) {
+ /*
+ * The previous time this destructor was called, we set the key
+ * to TCACHE_STATE_PURGATORY so that other destructors wouldn't
+ * cause re-creation of the tcache. This time, do nothing, so
+ * that the destructor will not be called again.
+ */
+ } else if (tcache != NULL) {
+ assert(tcache != TCACHE_STATE_PURGATORY);
+ tcache_destroy(tcache);
+ tcache = TCACHE_STATE_PURGATORY;
+ tcache_tsd_set(&tcache);
+ }
+}
+
+void
+tcache_stats_merge(tcache_t *tcache, arena_t *arena)
+{
+ unsigned i;
+
+ /* Merge and reset tcache stats. */
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+ tcache_bin_t *tbin = &tcache->tbins[i];
+ malloc_mutex_lock(&bin->lock);
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ malloc_mutex_unlock(&bin->lock);
+ tbin->tstats.nrequests = 0;
+ }
+
+ for (; i < nhbins; i++) {
+ malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
+ tcache_bin_t *tbin = &tcache->tbins[i];
+ arena->stats.nrequests_large += tbin->tstats.nrequests;
+ lstats->nrequests += tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ }
+}
+
+bool
+tcache_boot0(void)
+{
+ unsigned i;
+
+ /*
+ * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
+ * known.
+ */
+ if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
+ tcache_maxclass = SMALL_MAXCLASS;
+ else if ((1U << opt_lg_tcache_max) > arena_maxclass)
+ tcache_maxclass = arena_maxclass;
+ else
+ tcache_maxclass = (1U << opt_lg_tcache_max);
+
+ nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
+
+ /* Initialize tcache_bin_info. */
+ tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
+ sizeof(tcache_bin_info_t));
+ if (tcache_bin_info == NULL)
+ return (true);
+ stack_nelms = 0;
+ for (i = 0; i < NBINS; i++) {
+ if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
+ tcache_bin_info[i].ncached_max =
+ (arena_bin_info[i].nregs << 1);
+ } else {
+ tcache_bin_info[i].ncached_max =
+ TCACHE_NSLOTS_SMALL_MAX;
+ }
+ stack_nelms += tcache_bin_info[i].ncached_max;
+ }
+ for (; i < nhbins; i++) {
+ tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
+ stack_nelms += tcache_bin_info[i].ncached_max;
+ }
+
+ return (false);
+}
+
+bool
+tcache_boot1(void)
+{
+
+ if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
+ return (true);
+
+ return (false);
+}
diff --git a/contrib/jemalloc/src/tsd.c b/contrib/jemalloc/src/tsd.c
new file mode 100644
index 0000000..0838dc8
--- /dev/null
+++ b/contrib/jemalloc/src/tsd.c
@@ -0,0 +1,72 @@
+#define JEMALLOC_TSD_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+static unsigned ncleanups;
+static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
+
+/******************************************************************************/
+
+void *
+malloc_tsd_malloc(size_t size)
+{
+
+ /* Avoid choose_arena() in order to dodge bootstrapping issues. */
+ return arena_malloc(arenas[0], size, false, false);
+}
+
+void
+malloc_tsd_dalloc(void *wrapper)
+{
+
+ idalloc(wrapper);
+}
+
+void
+malloc_tsd_no_cleanup(void *arg)
+{
+
+ not_reached();
+}
+
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+void
+_malloc_thread_cleanup(void)
+{
+ bool pending[ncleanups], again;
+ unsigned i;
+
+ for (i = 0; i < ncleanups; i++)
+ pending[i] = true;
+
+ do {
+ again = false;
+ for (i = 0; i < ncleanups; i++) {
+ if (pending[i]) {
+ pending[i] = cleanups[i].f(cleanups[i].arg);
+ if (pending[i])
+ again = true;
+ }
+ }
+ } while (again);
+}
+#endif
+
+void
+malloc_tsd_cleanup_register(bool (*f)(void *), void *arg)
+{
+
+ assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
+ cleanups[ncleanups].f = f;
+ cleanups[ncleanups].arg = arg;
+ ncleanups++;
+}
+
+void
+malloc_tsd_boot(void)
+{
+
+ ncleanups = 0;
+}
diff --git a/contrib/jemalloc/src/util.c b/contrib/jemalloc/src/util.c
new file mode 100644
index 0000000..8b05042
--- /dev/null
+++ b/contrib/jemalloc/src/util.c
@@ -0,0 +1,635 @@
+#define assert(e) do { \
+ if (config_debug && !(e)) { \
+ malloc_write("<jemalloc>: Failed assertion\n"); \
+ abort(); \
+ } \
+} while (0)
+
+#define not_reached() do { \
+ if (config_debug) { \
+ malloc_write("<jemalloc>: Unreachable code reached\n"); \
+ abort(); \
+ } \
+} while (0)
+
+#define not_implemented() do { \
+ if (config_debug) { \
+ malloc_write("<jemalloc>: Not implemented\n"); \
+ abort(); \
+ } \
+} while (0)
+
+#define JEMALLOC_UTIL_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void wrtmessage(void *cbopaque, const char *s);
+#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
+static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
+ size_t *slen_p);
+#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
+static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
+#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
+static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
+#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
+static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
+ size_t *slen_p);
+
+/******************************************************************************/
+
+/* malloc_message() setup. */
+JEMALLOC_CATTR(visibility("hidden"), static)
+void
+wrtmessage(void *cbopaque, const char *s)
+{
+
+#ifdef SYS_write
+ /*
+ * Use syscall(2) rather than write(2) when possible in order to avoid
+ * the possibility of memory allocation within libc. This is necessary
+ * on FreeBSD; most operating systems do not have this problem though.
+ */
+ UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
+#else
+ UNUSED int result = write(STDERR_FILENO, s, strlen(s));
+#endif
+}
+
+void (*je_malloc_message)(void *, const char *s)
+ JEMALLOC_ATTR(visibility("default")) = wrtmessage;
+
+JEMALLOC_CATTR(visibility("hidden"), static)
+void
+wrtmessage_1_0(const char *s1, const char *s2, const char *s3,
+ const char *s4)
+{
+
+ wrtmessage(NULL, s1);
+ wrtmessage(NULL, s2);
+ wrtmessage(NULL, s3);
+ wrtmessage(NULL, s4);
+}
+
+void (*__malloc_message_1_0)(const char *s1, const char *s2, const char *s3,
+ const char *s4) = wrtmessage_1_0;
+__sym_compat(_malloc_message, __malloc_message_1_0, FBSD_1.0);
+
+/*
+ * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
+ * provide a wrapper.
+ */
+int
+buferror(int errnum, char *buf, size_t buflen)
+{
+#ifdef _GNU_SOURCE
+ char *b = strerror_r(errno, buf, buflen);
+ if (b != buf) {
+ strncpy(buf, b, buflen);
+ buf[buflen-1] = '\0';
+ }
+ return (0);
+#else
+ return (strerror_r(errno, buf, buflen));
+#endif
+}
+
+uintmax_t
+malloc_strtoumax(const char *nptr, char **endptr, int base)
+{
+ uintmax_t ret, digit;
+ int b;
+ bool neg;
+ const char *p, *ns;
+
+ if (base < 0 || base == 1 || base > 36) {
+ errno = EINVAL;
+ return (UINTMAX_MAX);
+ }
+ b = base;
+
+ /* Swallow leading whitespace and get sign, if any. */
+ neg = false;
+ p = nptr;
+ while (true) {
+ switch (*p) {
+ case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
+ p++;
+ break;
+ case '-':
+ neg = true;
+ /* Fall through. */
+ case '+':
+ p++;
+ /* Fall through. */
+ default:
+ goto label_prefix;
+ }
+ }
+
+ /* Get prefix, if any. */
+ label_prefix:
+ /*
+ * Note where the first non-whitespace/sign character is so that it is
+ * possible to tell whether any digits are consumed (e.g., " 0" vs.
+ * " -x").
+ */
+ ns = p;
+ if (*p == '0') {
+ switch (p[1]) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7':
+ if (b == 0)
+ b = 8;
+ if (b == 8)
+ p++;
+ break;
+ case 'x':
+ switch (p[2]) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ if (b == 0)
+ b = 16;
+ if (b == 16)
+ p += 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ if (b == 0)
+ b = 10;
+
+ /* Convert. */
+ ret = 0;
+ while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
+ || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
+ || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
+ uintmax_t pret = ret;
+ ret *= b;
+ ret += digit;
+ if (ret < pret) {
+ /* Overflow. */
+ errno = ERANGE;
+ return (UINTMAX_MAX);
+ }
+ p++;
+ }
+ if (neg)
+ ret = -ret;
+
+ if (endptr != NULL) {
+ if (p == ns) {
+ /* No characters were converted. */
+ *endptr = (char *)nptr;
+ } else
+ *endptr = (char *)p;
+ }
+
+ return (ret);
+}
+
+static char *
+u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
+{
+ unsigned i;
+
+ i = U2S_BUFSIZE - 1;
+ s[i] = '\0';
+ switch (base) {
+ case 10:
+ do {
+ i--;
+ s[i] = "0123456789"[x % (uint64_t)10];
+ x /= (uint64_t)10;
+ } while (x > 0);
+ break;
+ case 16: {
+ const char *digits = (uppercase)
+ ? "0123456789ABCDEF"
+ : "0123456789abcdef";
+
+ do {
+ i--;
+ s[i] = digits[x & 0xf];
+ x >>= 4;
+ } while (x > 0);
+ break;
+ } default: {
+ const char *digits = (uppercase)
+ ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ : "0123456789abcdefghijklmnopqrstuvwxyz";
+
+ assert(base >= 2 && base <= 36);
+ do {
+ i--;
+ s[i] = digits[x % (uint64_t)base];
+ x /= (uint64_t)base;
+ } while (x > 0);
+ }}
+
+ *slen_p = U2S_BUFSIZE - 1 - i;
+ return (&s[i]);
+}
+
+static char *
+d2s(intmax_t x, char sign, char *s, size_t *slen_p)
+{
+ bool neg;
+
+ if ((neg = (x < 0)))
+ x = -x;
+ s = u2s(x, 10, false, s, slen_p);
+ if (neg)
+ sign = '-';
+ switch (sign) {
+ case '-':
+ if (neg == false)
+ break;
+ /* Fall through. */
+ case ' ':
+ case '+':
+ s--;
+ (*slen_p)++;
+ *s = sign;
+ break;
+ default: not_reached();
+ }
+ return (s);
+}
+
+static char *
+o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
+{
+
+ s = u2s(x, 8, false, s, slen_p);
+ if (alt_form && *s != '0') {
+ s--;
+ (*slen_p)++;
+ *s = '0';
+ }
+ return (s);
+}
+
+static char *
+x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
+{
+
+ s = u2s(x, 16, uppercase, s, slen_p);
+ if (alt_form) {
+ s -= 2;
+ (*slen_p) += 2;
+ memcpy(s, uppercase ? "0X" : "0x", 2);
+ }
+ return (s);
+}
+
+int
+malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
+{
+ int ret;
+ size_t i;
+ const char *f;
+ va_list tap;
+
+#define APPEND_C(c) do { \
+ if (i < size) \
+ str[i] = (c); \
+ i++; \
+} while (0)
+#define APPEND_S(s, slen) do { \
+ if (i < size) { \
+ size_t cpylen = (slen <= size - i) ? slen : size - i; \
+ memcpy(&str[i], s, cpylen); \
+ } \
+ i += slen; \
+} while (0)
+#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
+ /* Left padding. */ \
+ size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
+ (size_t)width - slen : 0); \
+ if (left_justify == false && pad_len != 0) { \
+ size_t j; \
+ for (j = 0; j < pad_len; j++) \
+ APPEND_C(' '); \
+ } \
+ /* Value. */ \
+ APPEND_S(s, slen); \
+ /* Right padding. */ \
+ if (left_justify && pad_len != 0) { \
+ size_t j; \
+ for (j = 0; j < pad_len; j++) \
+ APPEND_C(' '); \
+ } \
+} while (0)
+#define GET_ARG_NUMERIC(val, len) do { \
+ switch (len) { \
+ case '?': \
+ val = va_arg(ap, int); \
+ break; \
+ case 'l': \
+ val = va_arg(ap, long); \
+ break; \
+ case 'q': \
+ val = va_arg(ap, long long); \
+ break; \
+ case 'j': \
+ val = va_arg(ap, intmax_t); \
+ break; \
+ case 't': \
+ val = va_arg(ap, ptrdiff_t); \
+ break; \
+ case 'z': \
+ val = va_arg(ap, ssize_t); \
+ break; \
+ case 'p': /* Synthetic; used for %p. */ \
+ val = va_arg(ap, uintptr_t); \
+ break; \
+ default: not_reached(); \
+ } \
+} while (0)
+
+ if (config_debug)
+ va_copy(tap, ap);
+
+ i = 0;
+ f = format;
+ while (true) {
+ switch (*f) {
+ case '\0': goto label_out;
+ case '%': {
+ bool alt_form = false;
+ bool zero_pad = false;
+ bool left_justify = false;
+ bool plus_space = false;
+ bool plus_plus = false;
+ int prec = -1;
+ int width = -1;
+ char len = '?';
+
+ f++;
+ if (*f == '%') {
+ /* %% */
+ APPEND_C(*f);
+ break;
+ }
+ /* Flags. */
+ while (true) {
+ switch (*f) {
+ case '#':
+ assert(alt_form == false);
+ alt_form = true;
+ break;
+ case '0':
+ assert(zero_pad == false);
+ zero_pad = true;
+ break;
+ case '-':
+ assert(left_justify == false);
+ left_justify = true;
+ break;
+ case ' ':
+ assert(plus_space == false);
+ plus_space = true;
+ break;
+ case '+':
+ assert(plus_plus == false);
+ plus_plus = true;
+ break;
+ default: goto label_width;
+ }
+ f++;
+ }
+ /* Width. */
+ label_width:
+ switch (*f) {
+ case '*':
+ width = va_arg(ap, int);
+ f++;
+ break;
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9': {
+ uintmax_t uwidth;
+ errno = 0;
+ uwidth = malloc_strtoumax(f, (char **)&f, 10);
+ assert(uwidth != UINTMAX_MAX || errno !=
+ ERANGE);
+ width = (int)uwidth;
+ if (*f == '.') {
+ f++;
+ goto label_precision;
+ } else
+ goto label_length;
+ break;
+ } case '.':
+ f++;
+ goto label_precision;
+ default: goto label_length;
+ }
+ /* Precision. */
+ label_precision:
+ switch (*f) {
+ case '*':
+ prec = va_arg(ap, int);
+ f++;
+ break;
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9': {
+ uintmax_t uprec;
+ errno = 0;
+ uprec = malloc_strtoumax(f, (char **)&f, 10);
+ assert(uprec != UINTMAX_MAX || errno != ERANGE);
+ prec = (int)uprec;
+ break;
+ }
+ default: break;
+ }
+ /* Length. */
+ label_length:
+ switch (*f) {
+ case 'l':
+ f++;
+ if (*f == 'l') {
+ len = 'q';
+ f++;
+ } else
+ len = 'l';
+ break;
+ case 'j':
+ len = 'j';
+ f++;
+ break;
+ case 't':
+ len = 't';
+ f++;
+ break;
+ case 'z':
+ len = 'z';
+ f++;
+ break;
+ default: break;
+ }
+ /* Conversion specifier. */
+ switch (*f) {
+ char *s;
+ size_t slen;
+ case 'd': case 'i': {
+ intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[D2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len);
+ s = d2s(val, (plus_plus ? '+' : (plus_space ?
+ ' ' : '-')), buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'o': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[O2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len);
+ s = o2s(val, alt_form, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'u': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[U2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len);
+ s = u2s(val, 10, false, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'x': case 'X': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[X2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len);
+ s = x2s(val, alt_form, *f == 'X', buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'c': {
+ unsigned char val;
+ char buf[2];
+
+ assert(len == '?' || len == 'l');
+ assert_not_implemented(len != 'l');
+ val = va_arg(ap, int);
+ buf[0] = val;
+ buf[1] = '\0';
+ APPEND_PADDED_S(buf, 1, width, left_justify);
+ f++;
+ break;
+ } case 's':
+ assert(len == '?' || len == 'l');
+ assert_not_implemented(len != 'l');
+ s = va_arg(ap, char *);
+ slen = (prec == -1) ? strlen(s) : prec;
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ case 'p': {
+ uintmax_t val;
+ char buf[X2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, 'p');
+ s = x2s(val, true, false, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ }
+ default: not_implemented();
+ }
+ break;
+ } default: {
+ APPEND_C(*f);
+ f++;
+ break;
+ }}
+ }
+ label_out:
+ if (i < size)
+ str[i] = '\0';
+ else
+ str[size - 1] = '\0';
+ ret = i;
+
+#undef APPEND_C
+#undef APPEND_S
+#undef APPEND_PADDED_S
+#undef GET_ARG_NUMERIC
+ return (ret);
+}
+
+JEMALLOC_ATTR(format(printf, 3, 4))
+int
+malloc_snprintf(char *str, size_t size, const char *format, ...)
+{
+ int ret;
+ va_list ap;
+
+ va_start(ap, format);
+ ret = malloc_vsnprintf(str, size, format, ap);
+ va_end(ap);
+
+ return (ret);
+}
+
+void
+malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, va_list ap)
+{
+ char buf[MALLOC_PRINTF_BUFSIZE];
+
+ if (write_cb == NULL) {
+ /*
+ * The caller did not provide an alternate write_cb callback
+ * function, so use the default one. malloc_write() is an
+ * inline function, so use malloc_message() directly here.
+ */
+ write_cb = je_malloc_message;
+ cbopaque = NULL;
+ }
+
+ malloc_vsnprintf(buf, sizeof(buf), format, ap);
+ write_cb(cbopaque, buf);
+}
+
+/*
+ * Print to a callback function in such a way as to (hopefully) avoid memory
+ * allocation.
+ */
+JEMALLOC_ATTR(format(printf, 3, 4))
+void
+malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(write_cb, cbopaque, format, ap);
+ va_end(ap);
+}
+
+/* Print to stderr in such a way as to avoid memory allocation. */
+JEMALLOC_ATTR(format(printf, 1, 2))
+void
+malloc_printf(const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(NULL, NULL, format, ap);
+ va_end(ap);
+}
OpenPOWER on IntegriCloud