summaryrefslogtreecommitdiffstats
path: root/contrib/jemalloc/src
diff options
context:
space:
mode:
authorjasone <jasone@FreeBSD.org>2015-08-18 00:21:25 +0000
committerjasone <jasone@FreeBSD.org>2015-08-18 00:21:25 +0000
commitcbb776d46e1d7066efdee506c6096c75fb20f96e (patch)
treeec1bf01436900198985847d5fded5190c930cf79 /contrib/jemalloc/src
parentc22443062b314e6f18aa5797d1b9dd3fb82e1157 (diff)
downloadFreeBSD-src-cbb776d46e1d7066efdee506c6096c75fb20f96e.zip
FreeBSD-src-cbb776d46e1d7066efdee506c6096c75fb20f96e.tar.gz
Update jemalloc to version 4.0.0.
Diffstat (limited to 'contrib/jemalloc/src')
-rw-r--r--contrib/jemalloc/src/arena.c2846
-rw-r--r--contrib/jemalloc/src/base.c178
-rw-r--r--contrib/jemalloc/src/bitmap.c18
-rw-r--r--contrib/jemalloc/src/chunk.c792
-rw-r--r--contrib/jemalloc/src/chunk_dss.c46
-rw-r--r--contrib/jemalloc/src/chunk_mmap.c150
-rw-r--r--contrib/jemalloc/src/ckh.c61
-rw-r--r--contrib/jemalloc/src/ctl.c1085
-rw-r--r--contrib/jemalloc/src/extent.c40
-rw-r--r--contrib/jemalloc/src/huge.c560
-rw-r--r--contrib/jemalloc/src/jemalloc.c1981
-rw-r--r--contrib/jemalloc/src/mutex.c14
-rw-r--r--contrib/jemalloc/src/pages.c173
-rw-r--r--contrib/jemalloc/src/prof.c1797
-rw-r--r--contrib/jemalloc/src/quarantine.c132
-rw-r--r--contrib/jemalloc/src/rtree.c138
-rw-r--r--contrib/jemalloc/src/stats.c479
-rw-r--r--contrib/jemalloc/src/tcache.c314
-rw-r--r--contrib/jemalloc/src/tsd.c59
-rw-r--r--contrib/jemalloc/src/util.c30
20 files changed, 7108 insertions, 3785 deletions
diff --git a/contrib/jemalloc/src/arena.c b/contrib/jemalloc/src/arena.c
index dad707b..af48b39 100644
--- a/contrib/jemalloc/src/arena.c
+++ b/contrib/jemalloc/src/arena.c
@@ -5,37 +5,17 @@
/* Data. */
ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
+static ssize_t lg_dirty_mult_default;
arena_bin_info_t arena_bin_info[NBINS];
-JEMALLOC_ALIGNED(CACHELINE)
-const uint8_t small_size2bin[] = {
-#define S2B_8(i) i,
-#define S2B_16(i) S2B_8(i) S2B_8(i)
-#define S2B_32(i) S2B_16(i) S2B_16(i)
-#define S2B_64(i) S2B_32(i) S2B_32(i)
-#define S2B_128(i) S2B_64(i) S2B_64(i)
-#define S2B_256(i) S2B_128(i) S2B_128(i)
-#define S2B_512(i) S2B_256(i) S2B_256(i)
-#define S2B_1024(i) S2B_512(i) S2B_512(i)
-#define S2B_2048(i) S2B_1024(i) S2B_1024(i)
-#define S2B_4096(i) S2B_2048(i) S2B_2048(i)
-#define S2B_8192(i) S2B_4096(i) S2B_4096(i)
-#define SIZE_CLASS(bin, delta, size) \
- S2B_##delta(bin)
- SIZE_CLASSES
-#undef S2B_8
-#undef S2B_16
-#undef S2B_32
-#undef S2B_64
-#undef S2B_128
-#undef S2B_256
-#undef S2B_512
-#undef S2B_1024
-#undef S2B_2048
-#undef S2B_4096
-#undef S2B_8192
-#undef SIZE_CLASS
-};
+size_t map_bias;
+size_t map_misc_offset;
+size_t arena_maxrun; /* Max run size for arenas. */
+size_t arena_maxclass; /* Max size class for arenas. */
+static size_t small_maxrun; /* Max run size used for small size classes. */
+static bool *small_run_tab; /* Valid small run page multiples. */
+unsigned nlclasses; /* Number of large size classes. */
+unsigned nhclasses; /* Number of huge size classes. */
/******************************************************************************/
/*
@@ -45,7 +25,7 @@ const uint8_t small_size2bin[] = {
static void arena_purge(arena_t *arena, bool all);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
- bool cleaned);
+ bool cleaned, bool decommitted);
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
@@ -53,296 +33,327 @@ static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
/******************************************************************************/
-static inline int
-arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
+#define CHUNK_MAP_KEY ((uintptr_t)0x1U)
+
+JEMALLOC_INLINE_C arena_chunk_map_misc_t *
+arena_miscelm_key_create(size_t size)
{
- uintptr_t a_mapelm = (uintptr_t)a;
- uintptr_t b_mapelm = (uintptr_t)b;
- assert(a != NULL);
- assert(b != NULL);
+ return ((arena_chunk_map_misc_t *)((size << CHUNK_MAP_SIZE_SHIFT) |
+ CHUNK_MAP_KEY));
+}
+
+JEMALLOC_INLINE_C bool
+arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm)
+{
- return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
+ return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0);
}
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
- u.rb_link, arena_run_comp)
+#undef CHUNK_MAP_KEY
-static inline int
-arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
+JEMALLOC_INLINE_C size_t
+arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm)
{
- int ret;
- size_t a_size = a->bits & ~PAGE_MASK;
- size_t b_size = b->bits & ~PAGE_MASK;
- ret = (a_size > b_size) - (a_size < b_size);
- if (ret == 0) {
- uintptr_t a_mapelm, b_mapelm;
+ assert(arena_miscelm_is_key(miscelm));
- if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
- a_mapelm = (uintptr_t)a;
- else {
- /*
- * Treat keys as though they are lower than anything
- * else.
- */
- a_mapelm = 0;
- }
- b_mapelm = (uintptr_t)b;
+ return (((uintptr_t)miscelm & CHUNK_MAP_SIZE_MASK) >>
+ CHUNK_MAP_SIZE_SHIFT);
+}
- ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
- }
+JEMALLOC_INLINE_C size_t
+arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm)
+{
+ arena_chunk_t *chunk;
+ size_t pageind, mapbits;
- return (ret);
-}
+ assert(!arena_miscelm_is_key(miscelm));
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
- u.rb_link, arena_avail_comp)
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+ pageind = arena_miscelm_to_pageind(miscelm);
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT);
+}
-static inline int
-arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
+JEMALLOC_INLINE_C int
+arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
{
+ uintptr_t a_miscelm = (uintptr_t)a;
+ uintptr_t b_miscelm = (uintptr_t)b;
assert(a != NULL);
assert(b != NULL);
- /*
- * Short-circuit for self comparison. The following comparison code
- * would come to the same result, but at the cost of executing the slow
- * path.
- */
- if (a == b)
- return (0);
+ return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
+}
+
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
+ rb_link, arena_run_comp)
+
+static size_t
+run_quantize(size_t size)
+{
+ size_t qsize;
+
+ assert(size != 0);
+ assert(size == PAGE_CEILING(size));
+
+ /* Don't change sizes that are valid small run sizes. */
+ if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
+ return (size);
/*
- * Order such that chunks with higher fragmentation are "less than"
- * those with lower fragmentation -- purging order is from "least" to
- * "greatest". Fragmentation is measured as:
- *
- * mean current avail run size
- * --------------------------------
- * mean defragmented avail run size
- *
- * navail
- * -----------
- * nruns_avail nruns_avail-nruns_adjac
- * = ========================= = -----------------------
- * navail nruns_avail
- * -----------------------
- * nruns_avail-nruns_adjac
- *
- * The following code multiplies away the denominator prior to
- * comparison, in order to avoid division.
- *
+ * Round down to the nearest run size that can actually be requested
+ * during normal large allocation. Add large_pad so that cache index
+ * randomization can offset the allocation from the page boundary.
*/
- {
- size_t a_val = (a->nruns_avail - a->nruns_adjac) *
- b->nruns_avail;
- size_t b_val = (b->nruns_avail - b->nruns_adjac) *
- a->nruns_avail;
+ qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
+ if (qsize <= SMALL_MAXCLASS + large_pad)
+ return (run_quantize(size - large_pad));
+ assert(qsize <= size);
+ return (qsize);
+}
+
+static size_t
+run_quantize_next(size_t size)
+{
+ size_t large_run_size_next;
+
+ assert(size != 0);
+ assert(size == PAGE_CEILING(size));
- if (a_val < b_val)
- return (1);
- if (a_val > b_val)
- return (-1);
- }
/*
- * Break ties by chunk address. For fragmented chunks, report lower
- * addresses as "lower", so that fragmentation reduction happens first
- * at lower addresses. However, use the opposite ordering for
- * unfragmented chunks, in order to increase the chances of
- * re-allocating dirty runs.
+ * Return the next quantized size greater than the input size.
+ * Quantized sizes comprise the union of run sizes that back small
+ * region runs, and run sizes that back large regions with no explicit
+ * alignment constraints.
*/
- {
- uintptr_t a_chunk = (uintptr_t)a;
- uintptr_t b_chunk = (uintptr_t)b;
- int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
- if (a->nruns_adjac == 0) {
- assert(b->nruns_adjac == 0);
- ret = -ret;
+
+ if (size > SMALL_MAXCLASS) {
+ large_run_size_next = PAGE_CEILING(index2size(size2index(size -
+ large_pad) + 1) + large_pad);
+ } else
+ large_run_size_next = SIZE_T_MAX;
+ if (size >= small_maxrun)
+ return (large_run_size_next);
+
+ while (true) {
+ size += PAGE;
+ assert(size <= small_maxrun);
+ if (small_run_tab[size >> LG_PAGE]) {
+ if (large_run_size_next < size)
+ return (large_run_size_next);
+ return (size);
}
- return (ret);
}
}
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
- dirty_link, arena_chunk_dirty_comp)
-
-static inline bool
-arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
+static size_t
+run_quantize_first(size_t size)
{
- bool ret;
+ size_t qsize = run_quantize(size);
- if (pageind-1 < map_bias)
- ret = false;
- else {
- ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
- assert(ret == false || arena_mapbits_dirty_get(chunk,
- pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
+ if (qsize < size) {
+ /*
+ * Skip a quantization that may have an adequately large run,
+ * because under-sized runs may be mixed in. This only happens
+ * when an unusual size is requested, i.e. for aligned
+ * allocation, and is just one of several places where linear
+ * search would potentially find sufficiently aligned available
+ * memory somewhere lower.
+ */
+ qsize = run_quantize_next(size);
}
- return (ret);
+ return (qsize);
}
-static inline bool
-arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
+JEMALLOC_INLINE_C int
+arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
{
- bool ret;
+ int ret;
+ uintptr_t a_miscelm = (uintptr_t)a;
+ size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ?
+ arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a));
+ size_t b_qsize = run_quantize(arena_miscelm_size_get(b));
- if (pageind+npages == chunk_npages)
- ret = false;
- else {
- assert(pageind+npages < chunk_npages);
- ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
- assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
- != arena_mapbits_dirty_get(chunk, pageind+npages));
+ /*
+ * Compare based on quantized size rather than size, in order to sort
+ * equally useful runs only by address.
+ */
+ ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
+ if (ret == 0) {
+ if (!arena_miscelm_is_key(a)) {
+ uintptr_t b_miscelm = (uintptr_t)b;
+
+ ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
+ } else {
+ /*
+ * Treat keys as if they are lower than anything else.
+ */
+ ret = -1;
+ }
}
+
return (ret);
}
-static inline bool
-arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
+ arena_chunk_map_misc_t, rb_link, arena_avail_comp)
+
+static void
+arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
{
- return (arena_avail_adjac_pred(chunk, pageind) ||
- arena_avail_adjac_succ(chunk, pageind, npages));
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+ arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
+ pageind));
}
static void
-arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
+arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
{
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
+ arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
+ pageind));
+}
- /*
- * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
- * removed and reinserted even if the run to be inserted is clean.
- */
- if (chunk->ndirty != 0)
- arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
-
- if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
- chunk->nruns_adjac++;
- if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
- chunk->nruns_adjac++;
- chunk->nruns_avail++;
- assert(chunk->nruns_avail > chunk->nruns_adjac);
+static void
+arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
+{
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
- if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
- arena->ndirty += npages;
- chunk->ndirty += npages;
- }
- if (chunk->ndirty != 0)
- arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+ assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
+ assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
+ CHUNK_MAP_DIRTY);
- arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
- pageind));
+ qr_new(&miscelm->rd, rd_link);
+ qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
+ arena->ndirty += npages;
}
static void
-arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
+arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
{
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
+ assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
+ assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
+ CHUNK_MAP_DIRTY);
- /*
- * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
- * removed and reinserted even if the run to be removed is clean.
- */
- if (chunk->ndirty != 0)
- arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
+ qr_remove(&miscelm->rd, rd_link);
+ assert(arena->ndirty >= npages);
+ arena->ndirty -= npages;
+}
+
+static size_t
+arena_chunk_dirty_npages(const extent_node_t *node)
+{
- if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
- chunk->nruns_adjac--;
- if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
- chunk->nruns_adjac--;
- chunk->nruns_avail--;
- assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
- == 0 && chunk->nruns_adjac == 0));
+ return (extent_node_size_get(node) >> LG_PAGE);
+}
+
+void
+arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
+{
- if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
- arena->ndirty -= npages;
- chunk->ndirty -= npages;
+ if (cache) {
+ extent_node_dirty_linkage_init(node);
+ extent_node_dirty_insert(node, &arena->runs_dirty,
+ &arena->chunks_cache);
+ arena->ndirty += arena_chunk_dirty_npages(node);
}
- if (chunk->ndirty != 0)
- arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
+}
- arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
- pageind));
+void
+arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
+{
+
+ if (dirty) {
+ extent_node_dirty_remove(node);
+ assert(arena->ndirty >= arena_chunk_dirty_npages(node));
+ arena->ndirty -= arena_chunk_dirty_npages(node);
+ }
}
-static inline void *
+JEMALLOC_INLINE_C void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
{
void *ret;
unsigned regind;
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
+ arena_chunk_map_misc_t *miscelm;
+ void *rpages;
assert(run->nfree > 0);
- assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
+ assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
- regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
- ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
+ regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
+ miscelm = arena_run_to_miscelm(run);
+ rpages = arena_miscelm_to_rpages(miscelm);
+ ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
(uintptr_t)(bin_info->reg_interval * regind));
run->nfree--;
- if (regind == run->nextind)
- run->nextind++;
- assert(regind < run->nextind);
return (ret);
}
-static inline void
+JEMALLOC_INLINE_C void
arena_run_reg_dalloc(arena_run_t *run, void *ptr)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
- size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
+ index_t binind = arena_ptr_small_binind_get(ptr, mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
unsigned regind = arena_run_regind(run, bin_info, ptr);
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
assert(run->nfree < bin_info->nregs);
/* Freeing an interior pointer can cause assertion failure. */
- assert(((uintptr_t)ptr - ((uintptr_t)run +
+ assert(((uintptr_t)ptr -
+ ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
(uintptr_t)bin_info->reg0_offset)) %
(uintptr_t)bin_info->reg_interval == 0);
- assert((uintptr_t)ptr >= (uintptr_t)run +
+ assert((uintptr_t)ptr >=
+ (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
(uintptr_t)bin_info->reg0_offset);
/* Freeing an unallocated pointer can cause assertion failure. */
- assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
+ assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
- bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
+ bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
run->nfree++;
}
-static inline void
+JEMALLOC_INLINE_C void
arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
{
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), (npages << LG_PAGE));
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ (run_ind << LG_PAGE)), (npages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
(npages << LG_PAGE));
}
-static inline void
+JEMALLOC_INLINE_C void
arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
- VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), PAGE);
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
+ << LG_PAGE)), PAGE);
}
-static inline void
+JEMALLOC_INLINE_C void
arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
size_t i;
@@ -358,9 +369,9 @@ arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
{
if (config_stats) {
- ssize_t cactive_diff = CHUNK_CEILING((arena->nactive +
- add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive -
- sub_pages) << LG_PAGE);
+ ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages
+ - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
+ LG_PAGE);
if (cactive_diff != 0)
stats_cactive_add(cactive_diff);
}
@@ -368,10 +379,12 @@ arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
static void
arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
- size_t flag_dirty, size_t need_pages)
+ size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
{
size_t total_pages, rem_pages;
+ assert(flag_dirty == 0 || flag_decommitted == 0);
+
total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
LG_PAGE;
assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
@@ -379,54 +392,70 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
assert(need_pages <= total_pages);
rem_pages = total_pages - need_pages;
- arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
+ arena_avail_remove(arena, chunk, run_ind, total_pages);
+ if (flag_dirty != 0)
+ arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
arena_cactive_update(arena, need_pages, 0);
arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) {
+ size_t flags = flag_dirty | flag_decommitted;
+ size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
+ 0;
+
+ arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
+ (rem_pages << LG_PAGE), flags |
+ (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
+ flag_unzeroed_mask));
+ arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
+ (rem_pages << LG_PAGE), flags |
+ (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
+ flag_unzeroed_mask));
if (flag_dirty != 0) {
- arena_mapbits_unallocated_set(chunk,
- run_ind+need_pages, (rem_pages << LG_PAGE),
- flag_dirty);
- arena_mapbits_unallocated_set(chunk,
- run_ind+total_pages-1, (rem_pages << LG_PAGE),
- flag_dirty);
- } else {
- arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
- (rem_pages << LG_PAGE),
- arena_mapbits_unzeroed_get(chunk,
- run_ind+need_pages));
- arena_mapbits_unallocated_set(chunk,
- run_ind+total_pages-1, (rem_pages << LG_PAGE),
- arena_mapbits_unzeroed_get(chunk,
- run_ind+total_pages-1));
+ arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
+ rem_pages);
}
- arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
- false, true);
+ arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
}
}
-static void
+static bool
arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
bool remove, bool zero)
{
arena_chunk_t *chunk;
- size_t flag_dirty, run_ind, need_pages, i;
+ arena_chunk_map_misc_t *miscelm;
+ size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
+ size_t flag_unzeroed_mask;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ miscelm = arena_run_to_miscelm(run);
+ run_ind = arena_miscelm_to_pageind(miscelm);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+ flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
need_pages = (size >> LG_PAGE);
assert(need_pages > 0);
+ if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
+ run_ind << LG_PAGE, size, arena->ind))
+ return (true);
+
if (remove) {
arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
- need_pages);
+ flag_decommitted, need_pages);
}
if (zero) {
- if (flag_dirty == 0) {
+ if (flag_decommitted != 0) {
+ /* The run is untouched, and therefore zeroed. */
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
+ *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
+ (need_pages << LG_PAGE));
+ } else if (flag_dirty != 0) {
+ /* The run is dirty, so all pages must be zeroed. */
+ arena_run_zero(chunk, run_ind, need_pages);
+ } else {
/*
* The run is clean, so some pages may be zeroed (i.e.
* never before touched).
@@ -443,12 +472,9 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
run_ind+i);
}
}
- } else {
- /* The run is dirty, so all pages must be zeroed. */
- arena_run_zero(chunk, run_ind, need_pages);
}
} else {
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
@@ -456,68 +482,66 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
* Set the last element first, in case the run only contains one page
* (i.e. both statements set the same element).
*/
- arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
- arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
+ flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+ CHUNK_MAP_UNZEROED : 0;
+ arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ run_ind+need_pages-1)));
+ arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
+ return (false);
}
-static void
+static bool
arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
{
- arena_run_split_large_helper(arena, run, size, true, zero);
+ return (arena_run_split_large_helper(arena, run, size, true, zero));
}
-static void
+static bool
arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
{
- arena_run_split_large_helper(arena, run, size, false, zero);
+ return (arena_run_split_large_helper(arena, run, size, false, zero));
}
-static void
+static bool
arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
- size_t binind)
+ index_t binind)
{
arena_chunk_t *chunk;
- size_t flag_dirty, run_ind, need_pages, i;
+ arena_chunk_map_misc_t *miscelm;
+ size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
assert(binind != BININD_INVALID);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ miscelm = arena_run_to_miscelm(run);
+ run_ind = arena_miscelm_to_pageind(miscelm);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+ flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
need_pages = (size >> LG_PAGE);
assert(need_pages > 0);
- arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
+ if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
+ run_ind << LG_PAGE, size, arena->ind))
+ return (true);
- /*
- * Propagate the dirty and unzeroed flags to the allocated small run,
- * so that arena_dalloc_bin_run() has the ability to conditionally trim
- * clean pages.
- */
- arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
- /*
- * The first page will always be dirtied during small run
- * initialization, so a validation failure here would not actually
- * cause an observable failure.
- */
- if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
- run_ind) == 0)
- arena_run_page_validate_zeroed(chunk, run_ind);
- for (i = 1; i < need_pages - 1; i++) {
- arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
- if (config_debug && flag_dirty == 0 &&
- arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
+ arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
+ flag_decommitted, need_pages);
+
+ for (i = 0; i < need_pages; i++) {
+ size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
+ run_ind+i);
+ arena_mapbits_small_set(chunk, run_ind+i, i, binind,
+ flag_unzeroed);
+ if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
arena_run_page_validate_zeroed(chunk, run_ind+i);
}
- arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1,
- binind, flag_dirty);
- if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
- run_ind+need_pages-1) == 0)
- arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1);
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
+ return (false);
}
static arena_chunk_t *
@@ -533,76 +557,143 @@ arena_chunk_init_spare(arena_t *arena)
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxclass);
+ arena_maxrun);
assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
- arena_maxclass);
+ arena_maxrun);
assert(arena_mapbits_dirty_get(chunk, map_bias) ==
arena_mapbits_dirty_get(chunk, chunk_npages-1));
return (chunk);
}
+static bool
+arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
+{
+
+ /*
+ * The extent node notion of "committed" doesn't directly apply to
+ * arena chunks. Arbitrarily mark them as committed. The commit state
+ * of runs is tracked individually, and upon chunk deallocation the
+ * entire chunk is in a consistent commit state.
+ */
+ extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
+ extent_node_achunk_set(&chunk->node, true);
+ return (chunk_register(chunk, &chunk->node));
+}
+
static arena_chunk_t *
-arena_chunk_init_hard(arena_t *arena)
+arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ bool *zero, bool *commit)
{
arena_chunk_t *chunk;
- bool zero;
- size_t unzeroed, i;
- assert(arena->spare == NULL);
-
- zero = false;
malloc_mutex_unlock(&arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false,
- &zero, arena->dss_prec);
+
+ chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
+ chunksize, chunksize, zero, commit);
+ if (chunk != NULL && !*commit) {
+ /* Commit header. */
+ if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
+ LG_PAGE, arena->ind)) {
+ chunk_dalloc_wrapper(arena, chunk_hooks,
+ (void *)chunk, chunksize, *commit);
+ chunk = NULL;
+ }
+ }
+ if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
+ if (!*commit) {
+ /* Undo commit of header. */
+ chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
+ LG_PAGE, arena->ind);
+ }
+ chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
+ chunksize, *commit);
+ chunk = NULL;
+ }
+
malloc_mutex_lock(&arena->lock);
- if (chunk == NULL)
- return (NULL);
- if (config_stats)
+ return (chunk);
+}
+
+static arena_chunk_t *
+arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
+{
+ arena_chunk_t *chunk;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+
+ chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
+ chunksize, zero, true);
+ if (chunk != NULL) {
+ if (arena_chunk_register(arena, chunk, *zero)) {
+ chunk_dalloc_cache(arena, &chunk_hooks, chunk,
+ chunksize, true);
+ return (NULL);
+ }
+ *commit = true;
+ }
+ if (chunk == NULL) {
+ chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
+ zero, commit);
+ }
+
+ if (config_stats && chunk != NULL) {
arena->stats.mapped += chunksize;
+ arena->stats.metadata_mapped += (map_bias << LG_PAGE);
+ }
- chunk->arena = arena;
+ return (chunk);
+}
- /*
- * Claim that no pages are in use, since the header is merely overhead.
- */
- chunk->ndirty = 0;
+static arena_chunk_t *
+arena_chunk_init_hard(arena_t *arena)
+{
+ arena_chunk_t *chunk;
+ bool zero, commit;
+ size_t flag_unzeroed, flag_decommitted, i;
- chunk->nruns_avail = 0;
- chunk->nruns_adjac = 0;
+ assert(arena->spare == NULL);
+
+ zero = false;
+ commit = false;
+ chunk = arena_chunk_alloc_internal(arena, &zero, &commit);
+ if (chunk == NULL)
+ return (NULL);
/*
* Initialize the map to contain one maximal free untouched run. Mark
- * the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
+ * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
+ * chunk.
*/
- unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
- arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
- unzeroed);
+ flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
+ flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
+ arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
+ flag_unzeroed | flag_decommitted);
/*
* There is no need to initialize the internal page map entries unless
* the chunk is not zeroed.
*/
- if (zero == false) {
- VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk,
- map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
- chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
+ if (!zero) {
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
+ (void *)arena_bitselm_get(chunk, map_bias+1),
+ (size_t)((uintptr_t) arena_bitselm_get(chunk,
+ chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
- arena_mapbits_unzeroed_set(chunk, i, unzeroed);
+ arena_mapbits_internal_set(chunk, i, flag_unzeroed);
} else {
- VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
- map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
- chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
- map_bias+1)));
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
+ *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
+ arena_bitselm_get(chunk, chunk_npages-1) -
+ (uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
- unzeroed);
+ flag_unzeroed);
}
}
}
- arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass,
- unzeroed);
+ arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
+ flag_unzeroed);
return (chunk);
}
@@ -621,65 +712,383 @@ arena_chunk_alloc(arena_t *arena)
}
/* Insert the run into the runs_avail tree. */
- arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
- false, false);
+ arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
return (chunk);
}
static void
-arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
+arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
{
+
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxclass);
+ arena_maxrun);
assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
- arena_maxclass);
+ arena_maxrun);
assert(arena_mapbits_dirty_get(chunk, map_bias) ==
arena_mapbits_dirty_get(chunk, chunk_npages-1));
+ assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
+ arena_mapbits_decommitted_get(chunk, chunk_npages-1));
/*
* Remove run from the runs_avail tree, so that the arena does not use
* it.
*/
- arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
- false, false);
+ arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
if (arena->spare != NULL) {
arena_chunk_t *spare = arena->spare;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ bool committed;
arena->spare = chunk;
- malloc_mutex_unlock(&arena->lock);
- chunk_dealloc((void *)spare, chunksize, true);
- malloc_mutex_lock(&arena->lock);
- if (config_stats)
+ if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
+ arena_run_dirty_remove(arena, spare, map_bias,
+ chunk_npages-map_bias);
+ }
+
+ chunk_deregister(spare, &spare->node);
+
+ committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
+ 0);
+ if (!committed) {
+ /*
+ * Decommit the header. Mark the chunk as decommitted
+ * even if header decommit fails, since treating a
+ * partially committed chunk as committed has a high
+ * potential for causing later access of decommitted
+ * memory.
+ */
+ chunk_hooks = chunk_hooks_get(arena);
+ chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
+ LG_PAGE, arena->ind);
+ }
+
+ chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
+ chunksize, committed);
+
+ if (config_stats) {
arena->stats.mapped -= chunksize;
+ arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
+ }
} else
arena->spare = chunk;
}
-static arena_run_t *
-arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
+static void
+arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
{
- arena_run_t *run;
- arena_chunk_map_t *mapelm, key;
+ index_t index = size2index(usize) - nlclasses - NBINS;
- key.bits = size | CHUNK_MAP_KEY;
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
- size_t pageind = (((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
- + map_bias;
+ cassert(config_stats);
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- LG_PAGE));
- arena_run_split_large(arena, run, size, zero);
- return (run);
+ arena->stats.nmalloc_huge++;
+ arena->stats.allocated_huge += usize;
+ arena->stats.hstats[index].nmalloc++;
+ arena->stats.hstats[index].curhchunks++;
+}
+
+static void
+arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
+{
+ index_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.nmalloc_huge--;
+ arena->stats.allocated_huge -= usize;
+ arena->stats.hstats[index].nmalloc--;
+ arena->stats.hstats[index].curhchunks--;
+}
+
+static void
+arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
+{
+ index_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.ndalloc_huge++;
+ arena->stats.allocated_huge -= usize;
+ arena->stats.hstats[index].ndalloc++;
+ arena->stats.hstats[index].curhchunks--;
+}
+
+static void
+arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
+{
+ index_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.ndalloc_huge--;
+ arena->stats.allocated_huge += usize;
+ arena->stats.hstats[index].ndalloc--;
+ arena->stats.hstats[index].curhchunks++;
+}
+
+static void
+arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
+{
+
+ arena_huge_dalloc_stats_update(arena, oldsize);
+ arena_huge_malloc_stats_update(arena, usize);
+}
+
+static void
+arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
+ size_t usize)
+{
+
+ arena_huge_dalloc_stats_update_undo(arena, oldsize);
+ arena_huge_malloc_stats_update_undo(arena, usize);
+}
+
+extent_node_t *
+arena_node_alloc(arena_t *arena)
+{
+ extent_node_t *node;
+
+ malloc_mutex_lock(&arena->node_cache_mtx);
+ node = ql_last(&arena->node_cache, ql_link);
+ if (node == NULL) {
+ malloc_mutex_unlock(&arena->node_cache_mtx);
+ return (base_alloc(sizeof(extent_node_t)));
}
+ ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
+ malloc_mutex_unlock(&arena->node_cache_mtx);
+ return (node);
+}
- return (NULL);
+void
+arena_node_dalloc(arena_t *arena, extent_node_t *node)
+{
+
+ malloc_mutex_lock(&arena->node_cache_mtx);
+ ql_elm_new(node, ql_link);
+ ql_tail_insert(&arena->node_cache, node, ql_link);
+ malloc_mutex_unlock(&arena->node_cache_mtx);
+}
+
+static void *
+arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ size_t usize, size_t alignment, bool *zero, size_t csize)
+{
+ void *ret;
+ bool commit = true;
+
+ ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
+ zero, &commit);
+ if (ret == NULL) {
+ /* Revert optimistic stats updates. */
+ malloc_mutex_lock(&arena->lock);
+ if (config_stats) {
+ arena_huge_malloc_stats_update_undo(arena, usize);
+ arena->stats.mapped -= usize;
+ }
+ arena->nactive -= (usize >> LG_PAGE);
+ malloc_mutex_unlock(&arena->lock);
+ }
+
+ return (ret);
+}
+
+void *
+arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
+ bool *zero)
+{
+ void *ret;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ size_t csize = CHUNK_CEILING(usize);
+
+ malloc_mutex_lock(&arena->lock);
+
+ /* Optimistically update stats. */
+ if (config_stats) {
+ arena_huge_malloc_stats_update(arena, usize);
+ arena->stats.mapped += usize;
+ }
+ arena->nactive += (usize >> LG_PAGE);
+
+ ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
+ zero, true);
+ malloc_mutex_unlock(&arena->lock);
+ if (ret == NULL) {
+ ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
+ alignment, zero, csize);
+ }
+
+ if (config_stats && ret != NULL)
+ stats_cactive_add(usize);
+ return (ret);
+}
+
+void
+arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
+{
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ size_t csize;
+
+ csize = CHUNK_CEILING(usize);
+ malloc_mutex_lock(&arena->lock);
+ if (config_stats) {
+ arena_huge_dalloc_stats_update(arena, usize);
+ arena->stats.mapped -= usize;
+ stats_cactive_sub(usize);
+ }
+ arena->nactive -= (usize >> LG_PAGE);
+
+ chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
+ malloc_mutex_unlock(&arena->lock);
+}
+
+void
+arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
+ size_t usize)
+{
+
+ assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
+ assert(oldsize != usize);
+
+ malloc_mutex_lock(&arena->lock);
+ if (config_stats)
+ arena_huge_ralloc_stats_update(arena, oldsize, usize);
+ if (oldsize < usize) {
+ size_t udiff = usize - oldsize;
+ arena->nactive += udiff >> LG_PAGE;
+ if (config_stats)
+ stats_cactive_add(udiff);
+ } else {
+ size_t udiff = oldsize - usize;
+ arena->nactive -= udiff >> LG_PAGE;
+ if (config_stats)
+ stats_cactive_sub(udiff);
+ }
+ malloc_mutex_unlock(&arena->lock);
+}
+
+void
+arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
+ size_t usize)
+{
+ size_t udiff = oldsize - usize;
+ size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
+
+ malloc_mutex_lock(&arena->lock);
+ if (config_stats) {
+ arena_huge_ralloc_stats_update(arena, oldsize, usize);
+ if (cdiff != 0) {
+ arena->stats.mapped -= cdiff;
+ stats_cactive_sub(udiff);
+ }
+ }
+ arena->nactive -= udiff >> LG_PAGE;
+
+ if (cdiff != 0) {
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ void *nchunk = (void *)((uintptr_t)chunk +
+ CHUNK_CEILING(usize));
+
+ chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
+ }
+ malloc_mutex_unlock(&arena->lock);
+}
+
+static bool
+arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
+ size_t udiff, size_t cdiff)
+{
+ bool err;
+ bool commit = true;
+
+ err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
+ zero, &commit) == NULL);
+ if (err) {
+ /* Revert optimistic stats updates. */
+ malloc_mutex_lock(&arena->lock);
+ if (config_stats) {
+ arena_huge_ralloc_stats_update_undo(arena, oldsize,
+ usize);
+ arena->stats.mapped -= cdiff;
+ }
+ arena->nactive -= (udiff >> LG_PAGE);
+ malloc_mutex_unlock(&arena->lock);
+ } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
+ cdiff, true, arena->ind)) {
+ chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
+ true);
+ err = true;
+ }
+ return (err);
+}
+
+bool
+arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
+ size_t usize, bool *zero)
+{
+ bool err;
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
+ void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
+ size_t udiff = usize - oldsize;
+ size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
+
+ malloc_mutex_lock(&arena->lock);
+
+ /* Optimistically update stats. */
+ if (config_stats) {
+ arena_huge_ralloc_stats_update(arena, oldsize, usize);
+ arena->stats.mapped += cdiff;
+ }
+ arena->nactive += (udiff >> LG_PAGE);
+
+ err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
+ chunksize, zero, true) == NULL);
+ malloc_mutex_unlock(&arena->lock);
+ if (err) {
+ err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
+ chunk, oldsize, usize, zero, nchunk, udiff,
+ cdiff);
+ } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
+ cdiff, true, arena->ind)) {
+ chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
+ true);
+ err = true;
+ }
+
+ if (config_stats && !err)
+ stats_cactive_add(udiff);
+ return (err);
+}
+
+/*
+ * Do first-best-fit run selection, i.e. select the lowest run that best fits.
+ * Run sizes are quantized, so not all candidate runs are necessarily exactly
+ * the same size.
+ */
+static arena_run_t *
+arena_run_first_best_fit(arena_t *arena, size_t size)
+{
+ size_t search_size = run_quantize_first(size);
+ arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size);
+ arena_chunk_map_misc_t *miscelm =
+ arena_avail_tree_nsearch(&arena->runs_avail, key);
+ if (miscelm == NULL)
+ return (NULL);
+ return (&miscelm->run);
+}
+
+static arena_run_t *
+arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
+{
+ arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
+ if (run != NULL) {
+ if (arena_run_split_large(arena, run, size, zero))
+ run = NULL;
+ }
+ return (run);
}
static arena_run_t *
@@ -688,8 +1097,8 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
arena_chunk_t *chunk;
arena_run_t *run;
- assert(size <= arena_maxclass);
- assert((size & PAGE_MASK) == 0);
+ assert(size <= arena_maxrun);
+ assert(size == PAGE_CEILING(size));
/* Search the arena's chunks for the lowest best fit. */
run = arena_run_alloc_large_helper(arena, size, zero);
@@ -701,8 +1110,9 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
*/
chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
- run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
- arena_run_split_large(arena, run, size, zero);
+ run = &arena_miscelm_get(chunk, map_bias)->run;
+ if (arena_run_split_large(arena, run, size, zero))
+ run = NULL;
return (run);
}
@@ -715,36 +1125,24 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
}
static arena_run_t *
-arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
+arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
{
- arena_run_t *run;
- arena_chunk_map_t *mapelm, key;
-
- key.bits = size | CHUNK_MAP_KEY;
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
- size_t pageind = (((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
- + map_bias;
-
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- LG_PAGE));
- arena_run_split_small(arena, run, size, binind);
- return (run);
+ arena_run_t *run = arena_run_first_best_fit(arena, size);
+ if (run != NULL) {
+ if (arena_run_split_small(arena, run, size, binind))
+ run = NULL;
}
-
- return (NULL);
+ return (run);
}
static arena_run_t *
-arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
+arena_run_alloc_small(arena_t *arena, size_t size, index_t binind)
{
arena_chunk_t *chunk;
arena_run_t *run;
- assert(size <= arena_maxclass);
- assert((size & PAGE_MASK) == 0);
+ assert(size <= arena_maxrun);
+ assert(size == PAGE_CEILING(size));
assert(binind != BININD_INVALID);
/* Search the arena's chunks for the lowest best fit. */
@@ -757,8 +1155,9 @@ arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
*/
chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
- run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
- arena_run_split_small(arena, run, size, binind);
+ run = &arena_miscelm_get(chunk, map_bias)->run;
+ if (arena_run_split_small(arena, run, size, binind))
+ run = NULL;
return (run);
}
@@ -770,313 +1169,373 @@ arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
return (arena_run_alloc_small_helper(arena, size, binind));
}
-static inline void
+static bool
+arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
+{
+
+ return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
+ << 3));
+}
+
+ssize_t
+arena_lg_dirty_mult_get(arena_t *arena)
+{
+ ssize_t lg_dirty_mult;
+
+ malloc_mutex_lock(&arena->lock);
+ lg_dirty_mult = arena->lg_dirty_mult;
+ malloc_mutex_unlock(&arena->lock);
+
+ return (lg_dirty_mult);
+}
+
+bool
+arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
+{
+
+ if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
+ return (true);
+
+ malloc_mutex_lock(&arena->lock);
+ arena->lg_dirty_mult = lg_dirty_mult;
+ arena_maybe_purge(arena);
+ malloc_mutex_unlock(&arena->lock);
+
+ return (false);
+}
+
+void
arena_maybe_purge(arena_t *arena)
{
- size_t npurgeable, threshold;
/* Don't purge if the option is disabled. */
- if (opt_lg_dirty_mult < 0)
+ if (arena->lg_dirty_mult < 0)
return;
- /* Don't purge if all dirty pages are already being purged. */
- if (arena->ndirty <= arena->npurgatory)
+ /* Don't recursively purge. */
+ if (arena->purging)
return;
- npurgeable = arena->ndirty - arena->npurgatory;
- threshold = (arena->nactive >> opt_lg_dirty_mult);
/*
- * Don't purge unless the number of purgeable pages exceeds the
- * threshold.
+ * Iterate, since preventing recursive purging could otherwise leave too
+ * many dirty pages.
*/
- if (npurgeable <= threshold)
- return;
-
- arena_purge(arena, false);
+ while (true) {
+ size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
+ if (threshold < chunk_npages)
+ threshold = chunk_npages;
+ /*
+ * Don't purge unless the number of purgeable pages exceeds the
+ * threshold.
+ */
+ if (arena->ndirty <= threshold)
+ return;
+ arena_purge(arena, false);
+ }
}
-static arena_chunk_t *
-chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
+static size_t
+arena_dirty_count(arena_t *arena)
{
- size_t *ndirty = (size_t *)arg;
+ size_t ndirty = 0;
+ arena_runs_dirty_link_t *rdelm;
+ extent_node_t *chunkselm;
+
+ for (rdelm = qr_next(&arena->runs_dirty, rd_link),
+ chunkselm = qr_next(&arena->chunks_cache, cc_link);
+ rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
+ size_t npages;
+
+ if (rdelm == &chunkselm->rd) {
+ npages = extent_node_size_get(chunkselm) >> LG_PAGE;
+ chunkselm = qr_next(chunkselm, cc_link);
+ } else {
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+ rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ assert(arena_mapbits_allocated_get(chunk, pageind) ==
+ 0);
+ assert(arena_mapbits_large_get(chunk, pageind) == 0);
+ assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
+ npages = arena_mapbits_unallocated_size_get(chunk,
+ pageind) >> LG_PAGE;
+ }
+ ndirty += npages;
+ }
- assert(chunk->ndirty != 0);
- *ndirty += chunk->ndirty;
- return (NULL);
+ return (ndirty);
}
static size_t
-arena_compute_npurgatory(arena_t *arena, bool all)
+arena_compute_npurge(arena_t *arena, bool all)
{
- size_t npurgatory, npurgeable;
+ size_t npurge;
/*
* Compute the minimum number of pages that this thread should try to
* purge.
*/
- npurgeable = arena->ndirty - arena->npurgatory;
+ if (!all) {
+ size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
+ threshold = threshold < chunk_npages ? chunk_npages : threshold;
- if (all == false) {
- size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
-
- npurgatory = npurgeable - threshold;
+ npurge = arena->ndirty - threshold;
} else
- npurgatory = npurgeable;
+ npurge = arena->ndirty;
- return (npurgatory);
+ return (npurge);
}
-static void
-arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all,
- arena_chunk_mapelms_t *mapelms)
-{
- size_t pageind, npages;
-
- /*
- * Temporarily allocate free dirty runs within chunk. If all is false,
- * only operate on dirty runs that are fragments; otherwise operate on
- * all dirty runs.
- */
- for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
- if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
+static size_t
+arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
+ size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel,
+ extent_node_t *purge_chunks_sentinel)
+{
+ arena_runs_dirty_link_t *rdelm, *rdelm_next;
+ extent_node_t *chunkselm;
+ size_t nstashed = 0;
+
+ /* Stash at least npurge pages. */
+ for (rdelm = qr_next(&arena->runs_dirty, rd_link),
+ chunkselm = qr_next(&arena->chunks_cache, cc_link);
+ rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
+ size_t npages;
+ rdelm_next = qr_next(rdelm, rd_link);
+
+ if (rdelm == &chunkselm->rd) {
+ extent_node_t *chunkselm_next;
+ bool zero;
+ UNUSED void *chunk;
+
+ chunkselm_next = qr_next(chunkselm, cc_link);
+ /*
+ * Allocate. chunkselm remains valid due to the
+ * dalloc_node=false argument to chunk_alloc_cache().
+ */
+ zero = false;
+ chunk = chunk_alloc_cache(arena, chunk_hooks,
+ extent_node_addr_get(chunkselm),
+ extent_node_size_get(chunkselm), chunksize, &zero,
+ false);
+ assert(chunk == extent_node_addr_get(chunkselm));
+ assert(zero == extent_node_zeroed_get(chunkselm));
+ extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
+ purge_chunks_sentinel);
+ npages = extent_node_size_get(chunkselm) >> LG_PAGE;
+ chunkselm = chunkselm_next;
+ } else {
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ arena_run_t *run = &miscelm->run;
size_t run_size =
arena_mapbits_unallocated_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
+
assert(pageind + npages <= chunk_npages);
assert(arena_mapbits_dirty_get(chunk, pageind) ==
arena_mapbits_dirty_get(chunk, pageind+npages-1));
- if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
- (all || arena_avail_adjac(chunk, pageind,
- npages))) {
- arena_run_t *run = (arena_run_t *)((uintptr_t)
- chunk + (uintptr_t)(pageind << LG_PAGE));
-
- arena_run_split_large(arena, run, run_size,
- false);
- /* Append to list for later processing. */
- ql_elm_new(mapelm, u.ql_link);
- ql_tail_insert(mapelms, mapelm, u.ql_link);
- }
- } else {
- /* Skip run. */
- if (arena_mapbits_large_get(chunk, pageind) != 0) {
- npages = arena_mapbits_large_size_get(chunk,
- pageind) >> LG_PAGE;
- } else {
- size_t binind;
- arena_bin_info_t *bin_info;
- arena_run_t *run = (arena_run_t *)((uintptr_t)
- chunk + (uintptr_t)(pageind << LG_PAGE));
-
- assert(arena_mapbits_small_runind_get(chunk,
- pageind) == 0);
- binind = arena_bin_index(arena, run->bin);
- bin_info = &arena_bin_info[binind];
- npages = bin_info->run_size >> LG_PAGE;
+ /*
+ * If purging the spare chunk's run, make it available
+ * prior to allocation.
+ */
+ if (chunk == arena->spare)
+ arena_chunk_alloc(arena);
+
+ /* Temporarily allocate the free dirty run. */
+ arena_run_split_large(arena, run, run_size, false);
+ /* Stash. */
+ if (false)
+ qr_new(rdelm, rd_link); /* Redundant. */
+ else {
+ assert(qr_next(rdelm, rd_link) == rdelm);
+ assert(qr_prev(rdelm, rd_link) == rdelm);
}
+ qr_meld(purge_runs_sentinel, rdelm, rd_link);
}
+
+ nstashed += npages;
+ if (!all && nstashed >= npurge)
+ break;
}
- assert(pageind == chunk_npages);
- assert(chunk->ndirty == 0 || all == false);
- assert(chunk->nruns_adjac == 0);
+
+ return (nstashed);
}
static size_t
-arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk,
- arena_chunk_mapelms_t *mapelms)
+arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ arena_runs_dirty_link_t *purge_runs_sentinel,
+ extent_node_t *purge_chunks_sentinel)
{
- size_t npurged, pageind, npages, nmadvise;
- arena_chunk_map_t *mapelm;
+ size_t npurged, nmadvise;
+ arena_runs_dirty_link_t *rdelm;
+ extent_node_t *chunkselm;
- malloc_mutex_unlock(&arena->lock);
if (config_stats)
nmadvise = 0;
npurged = 0;
- ql_foreach(mapelm, mapelms, u.ql_link) {
- bool unzeroed;
- size_t flag_unzeroed, i;
-
- pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t)) + map_bias;
- npages = arena_mapbits_large_size_get(chunk, pageind) >>
- LG_PAGE;
- assert(pageind + npages <= chunk_npages);
- unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
- LG_PAGE)), (npages << LG_PAGE));
- flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
- /*
- * Set the unzeroed flag for all pages, now that pages_purge()
- * has returned whether the pages were zeroed as a side effect
- * of purging. This chunk map modification is safe even though
- * the arena mutex isn't currently owned by this thread,
- * because the run is marked as allocated, thus protecting it
- * from being modified by any other thread. As long as these
- * writes don't perturb the first and last elements'
- * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
- */
- for (i = 0; i < npages; i++) {
- arena_mapbits_unzeroed_set(chunk, pageind+i,
- flag_unzeroed);
+
+ malloc_mutex_unlock(&arena->lock);
+ for (rdelm = qr_next(purge_runs_sentinel, rd_link),
+ chunkselm = qr_next(purge_chunks_sentinel, cc_link);
+ rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
+ size_t npages;
+
+ if (rdelm == &chunkselm->rd) {
+ /*
+ * Don't actually purge the chunk here because 1)
+ * chunkselm is embedded in the chunk and must remain
+ * valid, and 2) we deallocate the chunk in
+ * arena_unstash_purged(), where it is destroyed,
+ * decommitted, or purged, depending on chunk
+ * deallocation policy.
+ */
+ size_t size = extent_node_size_get(chunkselm);
+ npages = size >> LG_PAGE;
+ chunkselm = qr_next(chunkselm, cc_link);
+ } else {
+ size_t pageind, run_size, flag_unzeroed, flags, i;
+ bool decommitted;
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ pageind = arena_miscelm_to_pageind(miscelm);
+ run_size = arena_mapbits_large_size_get(chunk, pageind);
+ npages = run_size >> LG_PAGE;
+
+ assert(pageind + npages <= chunk_npages);
+ assert(!arena_mapbits_decommitted_get(chunk, pageind));
+ assert(!arena_mapbits_decommitted_get(chunk,
+ pageind+npages-1));
+ decommitted = !chunk_hooks->decommit(chunk, chunksize,
+ pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
+ if (decommitted) {
+ flag_unzeroed = 0;
+ flags = CHUNK_MAP_DECOMMITTED;
+ } else {
+ flag_unzeroed = chunk_purge_wrapper(arena,
+ chunk_hooks, chunk, chunksize, pageind <<
+ LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
+ flags = flag_unzeroed;
+ }
+ arena_mapbits_large_set(chunk, pageind+npages-1, 0,
+ flags);
+ arena_mapbits_large_set(chunk, pageind, run_size,
+ flags);
+
+ /*
+ * Set the unzeroed flag for internal pages, now that
+ * chunk_purge_wrapper() has returned whether the pages
+ * were zeroed as a side effect of purging. This chunk
+ * map modification is safe even though the arena mutex
+ * isn't currently owned by this thread, because the run
+ * is marked as allocated, thus protecting it from being
+ * modified by any other thread. As long as these
+ * writes don't perturb the first and last elements'
+ * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
+ */
+ for (i = 1; i < npages-1; i++) {
+ arena_mapbits_internal_set(chunk, pageind+i,
+ flag_unzeroed);
+ }
}
+
npurged += npages;
if (config_stats)
nmadvise++;
}
malloc_mutex_lock(&arena->lock);
- if (config_stats)
+
+ if (config_stats) {
arena->stats.nmadvise += nmadvise;
+ arena->stats.purged += npurged;
+ }
return (npurged);
}
static void
-arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk,
- arena_chunk_mapelms_t *mapelms)
-{
- arena_chunk_map_t *mapelm;
- size_t pageind;
-
- /* Deallocate runs. */
- for (mapelm = ql_first(mapelms); mapelm != NULL;
- mapelm = ql_first(mapelms)) {
- arena_run_t *run;
-
- pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t)) + map_bias;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
- LG_PAGE));
- ql_remove(mapelms, mapelm, u.ql_link);
- arena_run_dalloc(arena, run, false, true);
+arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ arena_runs_dirty_link_t *purge_runs_sentinel,
+ extent_node_t *purge_chunks_sentinel)
+{
+ arena_runs_dirty_link_t *rdelm, *rdelm_next;
+ extent_node_t *chunkselm;
+
+ /* Deallocate chunks/runs. */
+ for (rdelm = qr_next(purge_runs_sentinel, rd_link),
+ chunkselm = qr_next(purge_chunks_sentinel, cc_link);
+ rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
+ rdelm_next = qr_next(rdelm, rd_link);
+ if (rdelm == &chunkselm->rd) {
+ extent_node_t *chunkselm_next = qr_next(chunkselm,
+ cc_link);
+ void *addr = extent_node_addr_get(chunkselm);
+ size_t size = extent_node_size_get(chunkselm);
+ bool zeroed = extent_node_zeroed_get(chunkselm);
+ bool committed = extent_node_committed_get(chunkselm);
+ extent_node_dirty_remove(chunkselm);
+ arena_node_dalloc(arena, chunkselm);
+ chunkselm = chunkselm_next;
+ chunk_dalloc_arena(arena, chunk_hooks, addr, size,
+ zeroed, committed);
+ } else {
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ bool decommitted = (arena_mapbits_decommitted_get(chunk,
+ pageind) != 0);
+ arena_run_t *run = &miscelm->run;
+ qr_remove(rdelm, rd_link);
+ arena_run_dalloc(arena, run, false, true, decommitted);
+ }
}
}
-static inline size_t
-arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
+static void
+arena_purge(arena_t *arena, bool all)
{
- size_t npurged;
- arena_chunk_mapelms_t mapelms;
-
- ql_new(&mapelms);
-
- /*
- * If chunk is the spare, temporarily re-allocate it, 1) so that its
- * run is reinserted into runs_avail, and 2) so that it cannot be
- * completely discarded by another thread while arena->lock is dropped
- * by this thread. Note that the arena_run_dalloc() call will
- * implicitly deallocate the chunk, so no explicit action is required
- * in this function to deallocate the chunk.
- *
- * Note that once a chunk contains dirty pages, it cannot again contain
- * a single run unless 1) it is a dirty run, or 2) this function purges
- * dirty pages and causes the transition to a single clean run. Thus
- * (chunk == arena->spare) is possible, but it is not possible for
- * this function to be called on the spare unless it contains a dirty
- * run.
- */
- if (chunk == arena->spare) {
- assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
- assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
-
- arena_chunk_alloc(arena);
- }
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
+ size_t npurge, npurgeable, npurged;
+ arena_runs_dirty_link_t purge_runs_sentinel;
+ extent_node_t purge_chunks_sentinel;
- if (config_stats)
- arena->stats.purged += chunk->ndirty;
+ arena->purging = true;
/*
- * Operate on all dirty runs if there is no clean/dirty run
- * fragmentation.
+ * Calls to arena_dirty_count() are disabled even for debug builds
+ * because overhead grows nonlinearly as memory usage increases.
*/
- if (chunk->nruns_adjac == 0)
- all = true;
-
- arena_chunk_stash_dirty(arena, chunk, all, &mapelms);
- npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms);
- arena_chunk_unstash_purged(arena, chunk, &mapelms);
-
- return (npurged);
-}
-
-static void
-arena_purge(arena_t *arena, bool all)
-{
- arena_chunk_t *chunk;
- size_t npurgatory;
- if (config_debug) {
- size_t ndirty = 0;
-
- arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
- chunks_dirty_iter_cb, (void *)&ndirty);
+ if (false && config_debug) {
+ size_t ndirty = arena_dirty_count(arena);
assert(ndirty == arena->ndirty);
}
- assert(arena->ndirty > arena->npurgatory || all);
- assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
- arena->npurgatory) || all);
+ assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all);
if (config_stats)
arena->stats.npurge++;
- /*
- * Add the minimum number of pages this thread should try to purge to
- * arena->npurgatory. This will keep multiple threads from racing to
- * reduce ndirty below the threshold.
- */
- npurgatory = arena_compute_npurgatory(arena, all);
- arena->npurgatory += npurgatory;
-
- while (npurgatory > 0) {
- size_t npurgeable, npurged, nunpurged;
+ npurge = arena_compute_npurge(arena, all);
+ qr_new(&purge_runs_sentinel, rd_link);
+ extent_node_dirty_linkage_init(&purge_chunks_sentinel);
- /* Get next chunk with dirty pages. */
- chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
- if (chunk == NULL) {
- /*
- * This thread was unable to purge as many pages as
- * originally intended, due to races with other threads
- * that either did some of the purging work, or re-used
- * dirty pages.
- */
- arena->npurgatory -= npurgatory;
- return;
- }
- npurgeable = chunk->ndirty;
- assert(npurgeable != 0);
+ npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge,
+ &purge_runs_sentinel, &purge_chunks_sentinel);
+ assert(npurgeable >= npurge);
+ npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
+ &purge_chunks_sentinel);
+ assert(npurged == npurgeable);
+ arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
+ &purge_chunks_sentinel);
- if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
- /*
- * This thread will purge all the dirty pages in chunk,
- * so set npurgatory to reflect this thread's intent to
- * purge the pages. This tends to reduce the chances
- * of the following scenario:
- *
- * 1) This thread sets arena->npurgatory such that
- * (arena->ndirty - arena->npurgatory) is at the
- * threshold.
- * 2) This thread drops arena->lock.
- * 3) Another thread causes one or more pages to be
- * dirtied, and immediately determines that it must
- * purge dirty pages.
- *
- * If this scenario *does* play out, that's okay,
- * because all of the purging work being done really
- * needs to happen.
- */
- arena->npurgatory += npurgeable - npurgatory;
- npurgatory = npurgeable;
- }
-
- /*
- * Keep track of how many pages are purgeable, versus how many
- * actually get purged, and adjust counters accordingly.
- */
- arena->npurgatory -= npurgeable;
- npurgatory -= npurgeable;
- npurged = arena_chunk_purge(arena, chunk, all);
- nunpurged = npurgeable - npurged;
- arena->npurgatory += nunpurged;
- npurgatory += nunpurged;
- }
+ arena->purging = false;
}
void
@@ -1090,7 +1549,8 @@ arena_purge_all(arena_t *arena)
static void
arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
- size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
+ size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
+ size_t flag_decommitted)
{
size_t size = *p_size;
size_t run_ind = *p_run_ind;
@@ -1099,7 +1559,9 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
/* Try to coalesce forward. */
if (run_ind + run_pages < chunk_npages &&
arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
- arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
+ arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
+ arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
+ flag_decommitted) {
size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
run_ind+run_pages);
size_t nrun_pages = nrun_size >> LG_PAGE;
@@ -1112,8 +1574,18 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
run_ind+run_pages+nrun_pages-1) == nrun_size);
assert(arena_mapbits_dirty_get(chunk,
run_ind+run_pages+nrun_pages-1) == flag_dirty);
- arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
- false, true);
+ assert(arena_mapbits_decommitted_get(chunk,
+ run_ind+run_pages+nrun_pages-1) == flag_decommitted);
+ arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
+
+ /*
+ * If the successor is dirty, remove it from the set of dirty
+ * pages.
+ */
+ if (flag_dirty != 0) {
+ arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
+ nrun_pages);
+ }
size += nrun_size;
run_pages += nrun_pages;
@@ -1126,7 +1598,8 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
/* Try to coalesce backward. */
if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
- flag_dirty) {
+ flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
+ flag_decommitted) {
size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
run_ind-1);
size_t prun_pages = prun_size >> LG_PAGE;
@@ -1140,8 +1613,18 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
prun_size);
assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
- arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
- false);
+ assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
+ flag_decommitted);
+ arena_avail_remove(arena, chunk, run_ind, prun_pages);
+
+ /*
+ * If the predecessor is dirty, remove it from the set of dirty
+ * pages.
+ */
+ if (flag_dirty != 0) {
+ arena_run_dirty_remove(arena, chunk, run_ind,
+ prun_pages);
+ }
size += prun_size;
run_pages += prun_pages;
@@ -1156,26 +1639,53 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
*p_run_pages = run_pages;
}
-static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
+static size_t
+arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ size_t run_ind)
{
- arena_chunk_t *chunk;
- size_t size, run_ind, run_pages, flag_dirty;
+ size_t size;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
assert(run_ind >= map_bias);
assert(run_ind < chunk_npages);
+
if (arena_mapbits_large_get(chunk, run_ind) != 0) {
size = arena_mapbits_large_size_get(chunk, run_ind);
- assert(size == PAGE ||
- arena_mapbits_large_size_get(chunk,
+ assert(size == PAGE || arena_mapbits_large_size_get(chunk,
run_ind+(size>>LG_PAGE)-1) == 0);
} else {
- size_t binind = arena_bin_index(arena, run->bin);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
size = bin_info->run_size;
}
+
+ return (size);
+}
+
+static bool
+arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run)
+{
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+ size_t run_ind = arena_miscelm_to_pageind(miscelm);
+ size_t offset = run_ind << LG_PAGE;
+ size_t length = arena_run_size_get(arena, chunk, run, run_ind);
+
+ return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length,
+ arena->ind));
+}
+
+static void
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
+ bool decommitted)
+{
+ arena_chunk_t *chunk;
+ arena_chunk_map_misc_t *miscelm;
+ size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ miscelm = arena_run_to_miscelm(run);
+ run_ind = arena_miscelm_to_pageind(miscelm);
+ assert(run_ind >= map_bias);
+ assert(run_ind < chunk_npages);
+ size = arena_run_size_get(arena, chunk, run, run_ind);
run_pages = (size >> LG_PAGE);
arena_cactive_update(arena, 0, run_pages);
arena->nactive -= run_pages;
@@ -1187,16 +1697,18 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
*/
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
+ if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
+ != 0)
dirty = true;
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
+ flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
/* Mark pages as unallocated in the chunk map. */
- if (dirty) {
- arena_mapbits_unallocated_set(chunk, run_ind, size,
- CHUNK_MAP_DIRTY);
+ if (dirty || decommitted) {
+ size_t flags = flag_dirty | flag_decommitted;
+ arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- CHUNK_MAP_DIRTY);
+ flags);
} else {
arena_mapbits_unallocated_set(chunk, run_ind, size,
arena_mapbits_unzeroed_get(chunk, run_ind));
@@ -1205,20 +1717,25 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
}
arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
- flag_dirty);
+ flag_dirty, flag_decommitted);
/* Insert into runs_avail, now that coalescing is complete. */
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
+ assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
+ arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
+ arena_avail_insert(arena, chunk, run_ind, run_pages);
+
+ if (dirty)
+ arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
/* Deallocate chunk if it is now completely unused. */
- if (size == arena_maxclass) {
+ if (size == arena_maxrun) {
assert(run_ind == map_bias);
- assert(run_pages == (arena_maxclass >> LG_PAGE));
- arena_chunk_dealloc(arena, chunk);
+ assert(run_pages == (arena_maxrun >> LG_PAGE));
+ arena_chunk_dalloc(arena, chunk);
}
/*
@@ -1233,12 +1750,25 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
}
static void
+arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run)
+{
+ bool committed = arena_run_decommit(arena, chunk, run);
+
+ arena_run_dalloc(arena, run, committed, false, !committed);
+}
+
+static void
arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t oldsize, size_t newsize)
{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
size_t head_npages = (oldsize - newsize) >> LG_PAGE;
size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
+ size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
+ size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+ CHUNK_MAP_UNZEROED : 0;
assert(oldsize > newsize);
@@ -1248,8 +1778,11 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
* run first, in case of single-page runs.
*/
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
- arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
- arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
+ arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages-1)));
+ arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
if (config_debug) {
UNUSED size_t tail_npages = newsize >> LG_PAGE;
@@ -1259,18 +1792,25 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
pageind+head_npages+tail_npages-1) == flag_dirty);
}
arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
- flag_dirty);
+ flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages)));
- arena_run_dalloc(arena, run, false, false);
+ arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0));
}
static void
arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t oldsize, size_t newsize, bool dirty)
{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
size_t head_npages = newsize >> LG_PAGE;
size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
+ size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
+ size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+ CHUNK_MAP_UNZEROED : 0;
+ arena_chunk_map_misc_t *tail_miscelm;
+ arena_run_t *tail_run;
assert(oldsize > newsize);
@@ -1280,8 +1820,11 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
* run first, in case of single-page runs.
*/
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
- arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
- arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
+ arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages-1)));
+ arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
if (config_debug) {
UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
@@ -1291,29 +1834,21 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
pageind+head_npages+tail_npages-1) == flag_dirty);
}
arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
- flag_dirty);
+ flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages)));
- arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
- dirty, false);
+ tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
+ tail_run = &tail_miscelm->run;
+ arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted !=
+ 0));
}
static arena_run_t *
arena_bin_runs_first(arena_bin_t *bin)
{
- arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
- if (mapelm != NULL) {
- arena_chunk_t *chunk;
- size_t pageind;
- arena_run_t *run;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
- pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t))) + map_bias;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) <<
- LG_PAGE));
- return (run);
- }
+ arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
+ if (miscelm != NULL)
+ return (&miscelm->run);
return (NULL);
}
@@ -1321,25 +1856,21 @@ arena_bin_runs_first(arena_bin_t *bin)
static void
arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
{
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
- assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
+ assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
- arena_run_tree_insert(&bin->runs, mapelm);
+ arena_run_tree_insert(&bin->runs, miscelm);
}
static void
arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
{
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
- assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
+ assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
- arena_run_tree_remove(&bin->runs, mapelm);
+ arena_run_tree_remove(&bin->runs, miscelm);
}
static arena_run_t *
@@ -1358,7 +1889,7 @@ static arena_run_t *
arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
{
arena_run_t *run;
- size_t binind;
+ index_t binind;
arena_bin_info_t *bin_info;
/* Look for a usable run. */
@@ -1376,14 +1907,10 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
malloc_mutex_lock(&arena->lock);
run = arena_run_alloc_small(arena, bin_info->run_size, binind);
if (run != NULL) {
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
-
/* Initialize run internals. */
- run->bin = bin;
- run->nextind = 0;
+ run->binind = binind;
run->nfree = bin_info->nregs;
- bitmap_init(bitmap, &bin_info->bitmap_info);
+ bitmap_init(run->bitmap, &bin_info->bitmap_info);
}
malloc_mutex_unlock(&arena->lock);
/********************************/
@@ -1413,7 +1940,7 @@ static void *
arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
{
void *ret;
- size_t binind;
+ index_t binind;
arena_bin_info_t *bin_info;
arena_run_t *run;
@@ -1459,7 +1986,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
}
void
-arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
+arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind,
uint64_t prof_accumbytes)
{
unsigned i, nfill;
@@ -1479,9 +2006,20 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
ptr = arena_bin_malloc_hard(arena, bin);
- if (ptr == NULL)
+ if (ptr == NULL) {
+ /*
+ * OOM. tbin->avail isn't yet filled down to its first
+ * element, so the successful allocations (if any) must
+ * be moved to the base of tbin->avail before bailing
+ * out.
+ */
+ if (i > 0) {
+ memmove(tbin->avail, &tbin->avail[nfill - i],
+ i * sizeof(void *));
+ }
break;
- if (config_fill && opt_junk) {
+ }
+ if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ptr, &arena_bin_info[binind],
true);
}
@@ -1489,9 +2027,9 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
tbin->avail[nfill - 1 - i] = ptr;
}
if (config_stats) {
- bin->stats.allocated += i * arena_bin_info[binind].reg_size;
bin->stats.nmalloc += i;
bin->stats.nrequests += tbin->tstats.nrequests;
+ bin->stats.curregs += i;
bin->stats.nfills++;
tbin->tstats.nrequests = 0;
}
@@ -1543,24 +2081,29 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
size_t i;
bool error = false;
- for (i = 1; i <= redzone_size; i++) {
- uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
- if (*byte != 0xa5) {
- error = true;
- arena_redzone_corruption(ptr, size, false, i, *byte);
- if (reset)
- *byte = 0xa5;
+ if (opt_junk_alloc) {
+ for (i = 1; i <= redzone_size; i++) {
+ uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
+ if (*byte != 0xa5) {
+ error = true;
+ arena_redzone_corruption(ptr, size, false, i,
+ *byte);
+ if (reset)
+ *byte = 0xa5;
+ }
}
- }
- for (i = 0; i < redzone_size; i++) {
- uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
- if (*byte != 0xa5) {
- error = true;
- arena_redzone_corruption(ptr, size, true, i, *byte);
- if (reset)
- *byte = 0xa5;
+ for (i = 0; i < redzone_size; i++) {
+ uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
+ if (*byte != 0xa5) {
+ error = true;
+ arena_redzone_corruption(ptr, size, true, i,
+ *byte);
+ if (reset)
+ *byte = 0xa5;
+ }
}
}
+
if (opt_abort && error)
abort();
}
@@ -1588,14 +2131,14 @@ arena_dalloc_junk_small_t *arena_dalloc_junk_small =
void
arena_quarantine_junk_small(void *ptr, size_t usize)
{
- size_t binind;
+ index_t binind;
arena_bin_info_t *bin_info;
cassert(config_fill);
- assert(opt_junk);
+ assert(opt_junk_free);
assert(opt_quarantine);
assert(usize <= SMALL_MAXCLASS);
- binind = SMALL_SIZE2BIN(usize);
+ binind = size2index(usize);
bin_info = &arena_bin_info[binind];
arena_redzones_validate(ptr, bin_info, true);
}
@@ -1606,12 +2149,12 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
void *ret;
arena_bin_t *bin;
arena_run_t *run;
- size_t binind;
+ index_t binind;
- binind = SMALL_SIZE2BIN(size);
+ binind = size2index(size);
assert(binind < NBINS);
bin = &arena->bins[binind];
- size = arena_bin_info[binind].reg_size;
+ size = index2size(binind);
malloc_mutex_lock(&bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0)
@@ -1625,29 +2168,29 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
}
if (config_stats) {
- bin->stats.allocated += size;
bin->stats.nmalloc++;
bin->stats.nrequests++;
+ bin->stats.curregs++;
}
malloc_mutex_unlock(&bin->lock);
- if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
+ if (config_prof && !isthreaded && arena_prof_accum(arena, size))
prof_idump();
- if (zero == false) {
+ if (!zero) {
if (config_fill) {
- if (opt_junk) {
+ if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
- } else if (opt_zero)
+ } else if (unlikely(opt_zero))
memset(ret, 0, size);
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
- if (config_fill && opt_junk) {
+ if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
@@ -1658,36 +2201,59 @@ void *
arena_malloc_large(arena_t *arena, size_t size, bool zero)
{
void *ret;
+ size_t usize;
+ uintptr_t random_offset;
+ arena_run_t *run;
+ arena_chunk_map_misc_t *miscelm;
UNUSED bool idump;
/* Large allocation. */
- size = PAGE_CEILING(size);
+ usize = s2u(size);
malloc_mutex_lock(&arena->lock);
- ret = (void *)arena_run_alloc_large(arena, size, zero);
- if (ret == NULL) {
+ if (config_cache_oblivious) {
+ uint64_t r;
+
+ /*
+ * Compute a uniformly distributed offset within the first page
+ * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
+ * for 4 KiB pages and 64-byte cachelines.
+ */
+ prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state,
+ UINT64_C(6364136223846793009),
+ UINT64_C(1442695040888963409));
+ random_offset = ((uintptr_t)r) << LG_CACHELINE;
+ } else
+ random_offset = 0;
+ run = arena_run_alloc_large(arena, usize + large_pad, zero);
+ if (run == NULL) {
malloc_mutex_unlock(&arena->lock);
return (NULL);
}
+ miscelm = arena_run_to_miscelm(run);
+ ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
+ random_offset);
if (config_stats) {
+ index_t index = size2index(usize) - NBINS;
+
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ arena->stats.allocated_large += usize;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
}
if (config_prof)
- idump = arena_prof_accum_locked(arena, size);
+ idump = arena_prof_accum_locked(arena, usize);
malloc_mutex_unlock(&arena->lock);
if (config_prof && idump)
prof_idump();
- if (zero == false) {
+ if (!zero) {
if (config_fill) {
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
+ if (unlikely(opt_junk_alloc))
+ memset(ret, 0xa5, usize);
+ else if (unlikely(opt_zero))
+ memset(ret, 0, usize);
}
}
@@ -1695,18 +2261,25 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
}
/* Only handles large allocations that require more than page alignment. */
-void *
-arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
+static void *
+arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
+ bool zero)
{
void *ret;
size_t alloc_size, leadsize, trailsize;
arena_run_t *run;
arena_chunk_t *chunk;
+ arena_chunk_map_misc_t *miscelm;
+ void *rpages;
+
+ assert(usize == PAGE_CEILING(usize));
- assert((size & PAGE_MASK) == 0);
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL))
+ return (NULL);
alignment = PAGE_CEILING(alignment);
- alloc_size = size + alignment - PAGE;
+ alloc_size = usize + large_pad + alignment - PAGE;
malloc_mutex_lock(&arena->lock);
run = arena_run_alloc_large(arena, alloc_size, false);
@@ -1715,37 +2288,94 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
return (NULL);
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ miscelm = arena_run_to_miscelm(run);
+ rpages = arena_miscelm_to_rpages(miscelm);
- leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
- (uintptr_t)run;
- assert(alloc_size >= leadsize + size);
- trailsize = alloc_size - leadsize - size;
- ret = (void *)((uintptr_t)run + leadsize);
+ leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
+ (uintptr_t)rpages;
+ assert(alloc_size >= leadsize + usize);
+ trailsize = alloc_size - leadsize - usize - large_pad;
if (leadsize != 0) {
- arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
- leadsize);
+ arena_chunk_map_misc_t *head_miscelm = miscelm;
+ arena_run_t *head_run = run;
+
+ miscelm = arena_miscelm_get(chunk,
+ arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
+ LG_PAGE));
+ run = &miscelm->run;
+
+ arena_run_trim_head(arena, chunk, head_run, alloc_size,
+ alloc_size - leadsize);
}
if (trailsize != 0) {
- arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
- false);
+ arena_run_trim_tail(arena, chunk, run, usize + large_pad +
+ trailsize, usize + large_pad, false);
+ }
+ if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
+ size_t run_ind =
+ arena_miscelm_to_pageind(arena_run_to_miscelm(run));
+ bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
+ bool decommitted = (arena_mapbits_decommitted_get(chunk,
+ run_ind) != 0);
+
+ assert(decommitted); /* Cause of OOM. */
+ arena_run_dalloc(arena, run, dirty, false, decommitted);
+ malloc_mutex_unlock(&arena->lock);
+ return (NULL);
}
- arena_run_init_large(arena, (arena_run_t *)ret, size, zero);
+ ret = arena_miscelm_to_rpages(miscelm);
if (config_stats) {
+ index_t index = size2index(usize) - NBINS;
+
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ arena->stats.allocated_large += usize;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
}
malloc_mutex_unlock(&arena->lock);
- if (config_fill && zero == false) {
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
+ if (config_fill && !zero) {
+ if (unlikely(opt_junk_alloc))
+ memset(ret, 0xa5, usize);
+ else if (unlikely(opt_zero))
+ memset(ret, 0, usize);
+ }
+ return (ret);
+}
+
+void *
+arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
+ bool zero, tcache_t *tcache)
+{
+ void *ret;
+
+ if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
+ && (usize & PAGE_MASK) == 0))) {
+ /* Small; alignment doesn't require special run placement. */
+ ret = arena_malloc(tsd, arena, usize, zero, tcache);
+ } else if (usize <= arena_maxclass && alignment <= PAGE) {
+ /*
+ * Large; alignment doesn't require special run placement.
+ * However, the cached pointer may be at a random offset from
+ * the base of the run, so do some bit manipulation to retrieve
+ * the base.
+ */
+ ret = arena_malloc(tsd, arena, usize, zero, tcache);
+ if (config_cache_oblivious)
+ ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
+ } else {
+ if (likely(usize <= arena_maxclass)) {
+ ret = arena_palloc_large(tsd, arena, usize, alignment,
+ zero);
+ } else if (likely(alignment <= chunksize))
+ ret = huge_malloc(tsd, arena, usize, zero, tcache);
+ else {
+ ret = huge_palloc(tsd, arena, usize, alignment, zero,
+ tcache);
+ }
}
return (ret);
}
@@ -1754,22 +2384,23 @@ void
arena_prof_promoted(const void *ptr, size_t size)
{
arena_chunk_t *chunk;
- size_t pageind, binind;
+ size_t pageind;
+ index_t binind;
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
- assert(isalloc(ptr, false) == PAGE);
- assert(isalloc(ptr, true) == PAGE);
+ assert(isalloc(ptr, false) == LARGE_MINCLASS);
+ assert(isalloc(ptr, true) == LARGE_MINCLASS);
assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- binind = SMALL_SIZE2BIN(size);
+ binind = size2index(size);
assert(binind < NBINS);
arena_mapbits_large_binind_set(chunk, pageind, binind);
- assert(isalloc(ptr, false) == PAGE);
+ assert(isalloc(ptr, false) == LARGE_MINCLASS);
assert(isalloc(ptr, true) == size);
}
@@ -1782,7 +2413,8 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
if (run == bin->runcur)
bin->runcur = NULL;
else {
- size_t binind = arena_bin_index(chunk->arena, bin);
+ index_t binind = arena_bin_index(extent_node_arena_get(
+ &chunk->node), bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
if (bin_info->nregs != 1) {
@@ -1800,46 +2432,15 @@ static void
arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
{
- size_t binind;
- arena_bin_info_t *bin_info;
- size_t npages, run_ind, past;
assert(run != bin->runcur);
- assert(arena_run_tree_search(&bin->runs,
- arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
- == NULL);
-
- binind = arena_bin_index(chunk->arena, run->bin);
- bin_info = &arena_bin_info[binind];
+ assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
+ NULL);
malloc_mutex_unlock(&bin->lock);
/******************************/
- npages = bin_info->run_size >> LG_PAGE;
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- past = (size_t)(PAGE_CEILING((uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
- bin_info->reg_interval - bin_info->redzone_size) -
- (uintptr_t)chunk) >> LG_PAGE);
malloc_mutex_lock(&arena->lock);
-
- /*
- * If the run was originally clean, and some pages were never touched,
- * trim the clean pages before deallocating the dirty portion of the
- * run.
- */
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+npages-1));
- if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
- npages) {
- /* Trim clean pages. Convert to large run beforehand. */
- assert(npages > 0);
- arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
- arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
- arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
- ((past - run_ind) << LG_PAGE), false);
- /* npages = past - run_ind; */
- }
- arena_run_dalloc(arena, run, true, false);
+ arena_run_dalloc_decommit(arena, chunk, run);
malloc_mutex_unlock(&arena->lock);
/****************************/
malloc_mutex_lock(&bin->lock);
@@ -1868,26 +2469,24 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_runs_insert(bin, run);
}
-void
-arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- arena_chunk_map_t *mapelm)
+static void
+arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ arena_chunk_map_bits_t *bitselm, bool junked)
{
- size_t pageind;
+ size_t pageind, rpages_ind;
arena_run_t *run;
arena_bin_t *bin;
arena_bin_info_t *bin_info;
- size_t size, binind;
+ index_t binind;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
- bin = run->bin;
- binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
+ rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
+ run = &arena_miscelm_get(chunk, rpages_ind)->run;
+ binind = run->binind;
+ bin = &arena->bins[binind];
bin_info = &arena_bin_info[binind];
- if (config_fill || config_stats)
- size = bin_info->reg_size;
- if (config_fill && opt_junk)
+ if (!junked && config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_small(ptr, bin_info);
arena_run_reg_dalloc(run, ptr);
@@ -1898,23 +2497,32 @@ arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_bin_lower_run(arena, chunk, run, bin);
if (config_stats) {
- bin->stats.allocated -= size;
bin->stats.ndalloc++;
+ bin->stats.curregs--;
}
}
void
+arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ arena_chunk_map_bits_t *bitselm)
+{
+
+ arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
+}
+
+void
arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t pageind, arena_chunk_map_t *mapelm)
+ size_t pageind, arena_chunk_map_bits_t *bitselm)
{
arena_run_t *run;
arena_bin_t *bin;
+ size_t rpages_ind;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
- bin = run->bin;
+ rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
+ run = &arena_miscelm_get(chunk, rpages_ind)->run;
+ bin = &arena->bins[run->binind];
malloc_mutex_lock(&bin->lock);
- arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
+ arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
malloc_mutex_unlock(&bin->lock);
}
@@ -1922,26 +2530,26 @@ void
arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind)
{
- arena_chunk_map_t *mapelm;
+ arena_chunk_map_bits_t *bitselm;
if (config_debug) {
/* arena_ptr_small_binind_get() does extra sanity checking. */
assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) != BININD_INVALID);
}
- mapelm = arena_mapp_get(chunk, pageind);
- arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
+ bitselm = arena_bitselm_get(chunk, pageind);
+ arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
#endif
-static void
+void
arena_dalloc_junk_large(void *ptr, size_t usize)
{
- if (config_fill && opt_junk)
+ if (config_fill && unlikely(opt_junk_free))
memset(ptr, 0x5a, usize);
}
#ifdef JEMALLOC_JET
@@ -1952,23 +2560,38 @@ arena_dalloc_junk_large_t *arena_dalloc_junk_large =
#endif
void
-arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, bool junked)
{
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
+ arena_run_t *run = &miscelm->run;
if (config_fill || config_stats) {
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t usize = arena_mapbits_large_size_get(chunk, pageind);
+ size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
+ large_pad;
- arena_dalloc_junk_large(ptr, usize);
+ if (!junked)
+ arena_dalloc_junk_large(ptr, usize);
if (config_stats) {
+ index_t index = size2index(usize) - NBINS;
+
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= usize;
- arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--;
+ arena->stats.lstats[index].ndalloc++;
+ arena->stats.lstats[index].curruns--;
}
}
- arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
+ arena_run_dalloc_decommit(arena, chunk, run);
+}
+
+void
+arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr)
+{
+
+ arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
}
void
@@ -1976,7 +2599,7 @@ arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
malloc_mutex_lock(&arena->lock);
- arena_dalloc_large_locked(arena, chunk, ptr);
+ arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
malloc_mutex_unlock(&arena->lock);
}
@@ -1984,6 +2607,9 @@ static void
arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t oldsize, size_t size)
{
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
+ arena_run_t *run = &miscelm->run;
assert(size < oldsize);
@@ -1992,20 +2618,23 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
* allocations.
*/
malloc_mutex_lock(&arena->lock);
- arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
- true);
+ arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
+ large_pad, true);
if (config_stats) {
+ index_t oldindex = size2index(oldsize) - NBINS;
+ index_t index = size2index(size) - NBINS;
+
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
+ arena->stats.lstats[oldindex].ndalloc++;
+ arena->stats.lstats[oldindex].curruns--;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
}
malloc_mutex_unlock(&arena->lock);
}
@@ -2015,31 +2644,42 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t oldsize, size_t size, size_t extra, bool zero)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t npages = oldsize >> LG_PAGE;
+ size_t npages = (oldsize + large_pad) >> LG_PAGE;
size_t followsize;
+ size_t usize_min = s2u(size);
- assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
+ assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
+ large_pad);
/* Try to extend the run. */
- assert(size + extra > oldsize);
+ assert(usize_min > oldsize);
malloc_mutex_lock(&arena->lock);
- if (pageind + npages < chunk_npages &&
+ if (pageind+npages < chunk_npages &&
arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
(followsize = arena_mapbits_unallocated_size_get(chunk,
- pageind+npages)) >= size - oldsize) {
+ pageind+npages)) >= usize_min - oldsize) {
/*
* The next run is available and sufficiently large. Split the
* following run, then merge the first part with the existing
* allocation.
*/
- size_t flag_dirty;
- size_t splitsize = (oldsize + followsize <= size + extra)
- ? followsize : size + extra - oldsize;
- arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk +
- ((pageind+npages) << LG_PAGE)), splitsize, zero);
+ arena_run_t *run;
+ size_t flag_dirty, flag_unzeroed_mask, splitsize, usize;
+
+ usize = s2u(size + extra);
+ while (oldsize + followsize < usize)
+ usize = index2size(size2index(usize)-1);
+ assert(usize >= usize_min);
+ splitsize = usize - oldsize;
+
+ run = &arena_miscelm_get(chunk, pageind+npages)->run;
+ if (arena_run_split_large(arena, run, splitsize, zero)) {
+ malloc_mutex_unlock(&arena->lock);
+ return (true);
+ }
size = oldsize + splitsize;
- npages = size >> LG_PAGE;
+ npages = (size + large_pad) >> LG_PAGE;
/*
* Mark the extended run as dirty if either portion of the run
@@ -2051,21 +2691,29 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
*/
flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
arena_mapbits_dirty_get(chunk, pageind+npages-1);
- arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
- arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
+ flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
+ arena_mapbits_large_set(chunk, pageind, size + large_pad,
+ flag_dirty | (flag_unzeroed_mask &
+ arena_mapbits_unzeroed_get(chunk, pageind)));
+ arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+npages-1)));
if (config_stats) {
+ index_t oldindex = size2index(oldsize) - NBINS;
+ index_t index = size2index(size) - NBINS;
+
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
+ arena->stats.lstats[oldindex].ndalloc++;
+ arena->stats.lstats[oldindex].curruns--;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
}
malloc_mutex_unlock(&arena->lock);
return (false);
@@ -2083,7 +2731,7 @@ static void
arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
{
- if (config_fill && opt_junk) {
+ if (config_fill && unlikely(opt_junk_free)) {
memset((void *)((uintptr_t)ptr + usize), 0x5a,
old_usize - usize);
}
@@ -2103,10 +2751,14 @@ static bool
arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
{
- size_t psize;
+ size_t usize;
- psize = PAGE_CEILING(size + extra);
- if (psize == oldsize) {
+ /* Make sure extra can't cause size_t overflow. */
+ if (unlikely(extra >= arena_maxclass))
+ return (true);
+
+ usize = s2u(size + extra);
+ if (usize == oldsize) {
/* Same size class. */
return (false);
} else {
@@ -2114,24 +2766,23 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
arena_t *arena;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena = chunk->arena;
+ arena = extent_node_arena_get(&chunk->node);
- if (psize < oldsize) {
+ if (usize < oldsize) {
/* Fill before shrinking in order avoid a race. */
- arena_ralloc_junk_large(ptr, oldsize, psize);
+ arena_ralloc_junk_large(ptr, oldsize, usize);
arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
- psize);
+ usize);
return (false);
} else {
bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
- oldsize, PAGE_CEILING(size),
- psize - PAGE_CEILING(size), zero);
- if (config_fill && ret == false && zero == false) {
- if (opt_junk) {
+ oldsize, size, extra, zero);
+ if (config_fill && !ret && !zero) {
+ if (unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)ptr +
oldsize), 0xa5, isalloc(ptr,
config_prof) - oldsize);
- } else if (opt_zero) {
+ } else if (unlikely(opt_zero)) {
memset((void *)((uintptr_t)ptr +
oldsize), 0, isalloc(ptr,
config_prof) - oldsize);
@@ -2147,84 +2798,103 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
{
- /*
- * Avoid moving the allocation if the size class can be left the same.
- */
- if (oldsize <= arena_maxclass) {
- if (oldsize <= SMALL_MAXCLASS) {
- assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
- == oldsize);
- if ((size + extra <= SMALL_MAXCLASS &&
- SMALL_SIZE2BIN(size + extra) ==
- SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
- size + extra >= oldsize))
- return (false);
- } else {
- assert(size <= arena_maxclass);
- if (size + extra > SMALL_MAXCLASS) {
- if (arena_ralloc_large(ptr, oldsize, size,
- extra, zero) == false)
+ if (likely(size <= arena_maxclass)) {
+ /*
+ * Avoid moving the allocation if the size class can be left the
+ * same.
+ */
+ if (likely(oldsize <= arena_maxclass)) {
+ if (oldsize <= SMALL_MAXCLASS) {
+ assert(
+ arena_bin_info[size2index(oldsize)].reg_size
+ == oldsize);
+ if ((size + extra <= SMALL_MAXCLASS &&
+ size2index(size + extra) ==
+ size2index(oldsize)) || (size <= oldsize &&
+ size + extra >= oldsize))
return (false);
+ } else {
+ assert(size <= arena_maxclass);
+ if (size + extra > SMALL_MAXCLASS) {
+ if (!arena_ralloc_large(ptr, oldsize,
+ size, extra, zero))
+ return (false);
+ }
}
}
- }
- /* Reallocation would require a move. */
- return (true);
+ /* Reallocation would require a move. */
+ return (true);
+ } else
+ return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero));
}
void *
-arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
- size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
- bool try_tcache_dalloc)
+arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, tcache_t *tcache)
{
void *ret;
- size_t copysize;
- /* Try to avoid moving the allocation. */
- if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false)
- return (ptr);
+ if (likely(size <= arena_maxclass)) {
+ size_t copysize;
- /*
- * size and oldsize are different enough that we need to move the
- * object. In that case, fall back to allocating new space and
- * copying.
- */
- if (alignment != 0) {
- size_t usize = sa2u(size + extra, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
- } else
- ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
+ /* Try to avoid moving the allocation. */
+ if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero))
+ return (ptr);
- if (ret == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, this time without extra. */
+ /*
+ * size and oldsize are different enough that we need to move
+ * the object. In that case, fall back to allocating new space
+ * and copying.
+ */
if (alignment != 0) {
- size_t usize = sa2u(size, alignment);
+ size_t usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
- ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
+ ret = ipalloct(tsd, usize, alignment, zero, tcache,
arena);
- } else
- ret = arena_malloc(arena, size, zero, try_tcache_alloc);
+ } else {
+ ret = arena_malloc(tsd, arena, size + extra, zero,
+ tcache);
+ }
- if (ret == NULL)
- return (NULL);
- }
+ if (ret == NULL) {
+ if (extra == 0)
+ return (NULL);
+ /* Try again, this time without extra. */
+ if (alignment != 0) {
+ size_t usize = sa2u(size, alignment);
+ if (usize == 0)
+ return (NULL);
+ ret = ipalloct(tsd, usize, alignment, zero,
+ tcache, arena);
+ } else {
+ ret = arena_malloc(tsd, arena, size, zero,
+ tcache);
+ }
- /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
+ if (ret == NULL)
+ return (NULL);
+ }
- /*
- * Copy at most size bytes (not size+extra), since the caller has no
- * expectation that the extra bytes will be reliably preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
- VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
- memcpy(ret, ptr, copysize);
- iqalloct(ptr, try_tcache_dalloc);
+ /*
+ * Junk/zero-filling were already done by
+ * ipalloc()/arena_malloc().
+ */
+
+ /*
+ * Copy at most size bytes (not size+extra), since the caller
+ * has no expectation that the extra bytes will be reliably
+ * preserved.
+ */
+ copysize = (size < oldsize) ? size : oldsize;
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
+ memcpy(ret, ptr, copysize);
+ isqalloc(tsd, ptr, oldsize, tcache);
+ } else {
+ ret = huge_ralloc(tsd, arena, ptr, oldsize, size, extra,
+ alignment, zero, tcache);
+ }
return (ret);
}
@@ -2239,24 +2909,46 @@ arena_dss_prec_get(arena_t *arena)
return (ret);
}
-void
+bool
arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
{
+ if (!have_dss)
+ return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&arena->lock);
arena->dss_prec = dss_prec;
malloc_mutex_unlock(&arena->lock);
+ return (false);
+}
+
+ssize_t
+arena_lg_dirty_mult_default_get(void)
+{
+
+ return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
+}
+
+bool
+arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
+{
+
+ if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
+ return (true);
+ atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
+ return (false);
}
void
-arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
- size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats)
+arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
+ size_t *nactive, size_t *ndirty, arena_stats_t *astats,
+ malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
+ malloc_huge_stats_t *hstats)
{
unsigned i;
malloc_mutex_lock(&arena->lock);
*dss = dss_prec_names[arena->dss_prec];
+ *lg_dirty_mult = arena->lg_dirty_mult;
*nactive += arena->nactive;
*ndirty += arena->ndirty;
@@ -2264,10 +2956,15 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged;
+ astats->metadata_mapped += arena->stats.metadata_mapped;
+ astats->metadata_allocated += arena_metadata_allocated_get(arena);
astats->allocated_large += arena->stats.allocated_large;
astats->nmalloc_large += arena->stats.nmalloc_large;
astats->ndalloc_large += arena->stats.ndalloc_large;
astats->nrequests_large += arena->stats.nrequests_large;
+ astats->allocated_huge += arena->stats.allocated_huge;
+ astats->nmalloc_huge += arena->stats.nmalloc_huge;
+ astats->ndalloc_huge += arena->stats.ndalloc_huge;
for (i = 0; i < nlclasses; i++) {
lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
@@ -2275,16 +2972,22 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
lstats[i].nrequests += arena->stats.lstats[i].nrequests;
lstats[i].curruns += arena->stats.lstats[i].curruns;
}
+
+ for (i = 0; i < nhclasses; i++) {
+ hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
+ hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
+ hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
+ }
malloc_mutex_unlock(&arena->lock);
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
- bstats[i].allocated += bin->stats.allocated;
bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests;
+ bstats[i].curregs += bin->stats.curregs;
if (config_tcache) {
bstats[i].nfills += bin->stats.nfills;
bstats[i].nflushes += bin->stats.nflushes;
@@ -2296,27 +2999,42 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
}
}
-bool
-arena_new(arena_t *arena, unsigned ind)
+arena_t *
+arena_new(unsigned ind)
{
+ arena_t *arena;
unsigned i;
arena_bin_t *bin;
+ /*
+ * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
+ * because there is no way to clean up if base_alloc() OOMs.
+ */
+ if (config_stats) {
+ arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t))
+ + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
+ nhclasses) * sizeof(malloc_huge_stats_t));
+ } else
+ arena = (arena_t *)base_alloc(sizeof(arena_t));
+ if (arena == NULL)
+ return (NULL);
+
arena->ind = ind;
arena->nthreads = 0;
-
if (malloc_mutex_init(&arena->lock))
- return (true);
+ return (NULL);
if (config_stats) {
memset(&arena->stats, 0, sizeof(arena_stats_t));
- arena->stats.lstats =
- (malloc_large_stats_t *)base_alloc(nlclasses *
- sizeof(malloc_large_stats_t));
- if (arena->stats.lstats == NULL)
- return (true);
+ arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
+ + CACHELINE_CEILING(sizeof(arena_t)));
memset(arena->stats.lstats, 0, nlclasses *
sizeof(malloc_large_stats_t));
+ arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
+ + CACHELINE_CEILING(sizeof(arena_t)) +
+ QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
+ memset(arena->stats.hstats, 0, nhclasses *
+ sizeof(malloc_huge_stats_t));
if (config_tcache)
ql_new(&arena->tcache_ql);
}
@@ -2324,56 +3042,76 @@ arena_new(arena_t *arena, unsigned ind)
if (config_prof)
arena->prof_accumbytes = 0;
+ if (config_cache_oblivious) {
+ /*
+ * A nondeterministic seed based on the address of arena reduces
+ * the likelihood of lockstep non-uniform cache index
+ * utilization among identical concurrent processes, but at the
+ * cost of test repeatability. For debug builds, instead use a
+ * deterministic seed.
+ */
+ arena->offset_state = config_debug ? ind :
+ (uint64_t)(uintptr_t)arena;
+ }
+
arena->dss_prec = chunk_dss_prec_get();
- /* Initialize chunks. */
- arena_chunk_dirty_new(&arena->chunks_dirty);
arena->spare = NULL;
+ arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
+ arena->purging = false;
arena->nactive = 0;
arena->ndirty = 0;
- arena->npurgatory = 0;
arena_avail_tree_new(&arena->runs_avail);
+ qr_new(&arena->runs_dirty, rd_link);
+ qr_new(&arena->chunks_cache, cc_link);
+
+ ql_new(&arena->huge);
+ if (malloc_mutex_init(&arena->huge_mtx))
+ return (NULL);
+
+ extent_tree_szad_new(&arena->chunks_szad_cached);
+ extent_tree_ad_new(&arena->chunks_ad_cached);
+ extent_tree_szad_new(&arena->chunks_szad_retained);
+ extent_tree_ad_new(&arena->chunks_ad_retained);
+ if (malloc_mutex_init(&arena->chunks_mtx))
+ return (NULL);
+ ql_new(&arena->node_cache);
+ if (malloc_mutex_init(&arena->node_cache_mtx))
+ return (NULL);
+
+ arena->chunk_hooks = chunk_hooks_default;
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock))
- return (true);
+ return (NULL);
bin->runcur = NULL;
arena_run_tree_new(&bin->runs);
if (config_stats)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
- return (false);
+ return (arena);
}
/*
* Calculate bin_info->run_size such that it meets the following constraints:
*
- * *) bin_info->run_size >= min_run_size
- * *) bin_info->run_size <= arena_maxclass
- * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
+ * *) bin_info->run_size <= arena_maxrun
* *) bin_info->nregs <= RUN_MAXREGS
*
- * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
- * calculated here, since these settings are all interdependent.
+ * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
+ * these settings are all interdependent.
*/
-static size_t
-bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
+static void
+bin_info_run_size_calc(arena_bin_info_t *bin_info)
{
size_t pad_size;
- size_t try_run_size, good_run_size;
- uint32_t try_nregs, good_nregs;
- uint32_t try_hdr_size, good_hdr_size;
- uint32_t try_bitmap_offset, good_bitmap_offset;
- uint32_t try_ctx0_offset, good_ctx0_offset;
- uint32_t try_redzone0_offset, good_redzone0_offset;
-
- assert(min_run_size >= PAGE);
- assert(min_run_size <= arena_maxclass);
+ size_t try_run_size, perfect_run_size, actual_run_size;
+ uint32_t try_nregs, perfect_nregs, actual_nregs;
/*
* Determine redzone size based on minimum alignment and minimum
@@ -2382,8 +3120,9 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
* minimum alignment; without the padding, each redzone would have to
* be twice as large in order to maintain alignment.
*/
- if (config_fill && opt_redzone) {
- size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
+ if (config_fill && unlikely(opt_redzone)) {
+ size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) -
+ 1);
if (align_min <= REDZONE_MINSIZE) {
bin_info->redzone_size = REDZONE_MINSIZE;
pad_size = 0;
@@ -2399,127 +3138,114 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
(bin_info->redzone_size << 1);
/*
- * Calculate known-valid settings before entering the run_size
- * expansion loop, so that the first part of the loop always copies
- * valid settings.
- *
- * The do..while loop iteratively reduces the number of regions until
- * the run header and the regions no longer overlap. A closed formula
- * would be quite messy, since there is an interdependency between the
- * header's mask length and the number of regions.
+ * Compute run size under ideal conditions (no redzones, no limit on run
+ * size).
*/
- try_run_size = min_run_size;
- try_nregs = ((try_run_size - sizeof(arena_run_t)) /
- bin_info->reg_interval)
- + 1; /* Counter-act try_nregs-- in loop. */
- if (try_nregs > RUN_MAXREGS) {
- try_nregs = RUN_MAXREGS
- + 1; /* Counter-act try_nregs-- in loop. */
- }
- do {
- try_nregs--;
- try_hdr_size = sizeof(arena_run_t);
- /* Pad to a long boundary. */
- try_hdr_size = LONG_CEILING(try_hdr_size);
- try_bitmap_offset = try_hdr_size;
- /* Add space for bitmap. */
- try_hdr_size += bitmap_size(try_nregs);
- if (config_prof && opt_prof && prof_promote == false) {
- /* Pad to a quantum boundary. */
- try_hdr_size = QUANTUM_CEILING(try_hdr_size);
- try_ctx0_offset = try_hdr_size;
- /* Add space for one (prof_ctx_t *) per region. */
- try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
- } else
- try_ctx0_offset = 0;
- try_redzone0_offset = try_run_size - (try_nregs *
- bin_info->reg_interval) - pad_size;
- } while (try_hdr_size > try_redzone0_offset);
-
- /* run_size expansion loop. */
+ try_run_size = PAGE;
+ try_nregs = try_run_size / bin_info->reg_size;
do {
- /*
- * Copy valid settings before trying more aggressive settings.
- */
- good_run_size = try_run_size;
- good_nregs = try_nregs;
- good_hdr_size = try_hdr_size;
- good_bitmap_offset = try_bitmap_offset;
- good_ctx0_offset = try_ctx0_offset;
- good_redzone0_offset = try_redzone0_offset;
-
- /* Try more aggressive settings. */
+ perfect_run_size = try_run_size;
+ perfect_nregs = try_nregs;
+
try_run_size += PAGE;
- try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
- bin_info->reg_interval)
- + 1; /* Counter-act try_nregs-- in loop. */
- if (try_nregs > RUN_MAXREGS) {
- try_nregs = RUN_MAXREGS
- + 1; /* Counter-act try_nregs-- in loop. */
- }
- do {
- try_nregs--;
- try_hdr_size = sizeof(arena_run_t);
- /* Pad to a long boundary. */
- try_hdr_size = LONG_CEILING(try_hdr_size);
- try_bitmap_offset = try_hdr_size;
- /* Add space for bitmap. */
- try_hdr_size += bitmap_size(try_nregs);
- if (config_prof && opt_prof && prof_promote == false) {
- /* Pad to a quantum boundary. */
- try_hdr_size = QUANTUM_CEILING(try_hdr_size);
- try_ctx0_offset = try_hdr_size;
- /*
- * Add space for one (prof_ctx_t *) per region.
- */
- try_hdr_size += try_nregs *
- sizeof(prof_ctx_t *);
- }
- try_redzone0_offset = try_run_size - (try_nregs *
- bin_info->reg_interval) - pad_size;
- } while (try_hdr_size > try_redzone0_offset);
- } while (try_run_size <= arena_maxclass
- && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
- RUN_MAX_OVRHD_RELAX
- && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
- && try_nregs < RUN_MAXREGS);
+ try_nregs = try_run_size / bin_info->reg_size;
+ } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
+ assert(perfect_nregs <= RUN_MAXREGS);
+
+ actual_run_size = perfect_run_size;
+ actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
+
+ /*
+ * Redzones can require enough padding that not even a single region can
+ * fit within the number of pages that would normally be dedicated to a
+ * run for this size class. Increase the run size until at least one
+ * region fits.
+ */
+ while (actual_nregs == 0) {
+ assert(config_fill && unlikely(opt_redzone));
- assert(good_hdr_size <= good_redzone0_offset);
+ actual_run_size += PAGE;
+ actual_nregs = (actual_run_size - pad_size) /
+ bin_info->reg_interval;
+ }
+
+ /*
+ * Make sure that the run will fit within an arena chunk.
+ */
+ while (actual_run_size > arena_maxrun) {
+ actual_run_size -= PAGE;
+ actual_nregs = (actual_run_size - pad_size) /
+ bin_info->reg_interval;
+ }
+ assert(actual_nregs > 0);
+ assert(actual_run_size == s2u(actual_run_size));
/* Copy final settings. */
- bin_info->run_size = good_run_size;
- bin_info->nregs = good_nregs;
- bin_info->bitmap_offset = good_bitmap_offset;
- bin_info->ctx0_offset = good_ctx0_offset;
- bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
+ bin_info->run_size = actual_run_size;
+ bin_info->nregs = actual_nregs;
+ bin_info->reg0_offset = actual_run_size - (actual_nregs *
+ bin_info->reg_interval) - pad_size + bin_info->redzone_size;
+
+ if (actual_run_size > small_maxrun)
+ small_maxrun = actual_run_size;
assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
* bin_info->reg_interval) + pad_size == bin_info->run_size);
-
- return (good_run_size);
}
static void
bin_info_init(void)
{
arena_bin_info_t *bin_info;
- size_t prev_run_size = PAGE;
-#define SIZE_CLASS(bin, delta, size) \
- bin_info = &arena_bin_info[bin]; \
+#define BIN_INFO_INIT_bin_yes(index, size) \
+ bin_info = &arena_bin_info[index]; \
bin_info->reg_size = size; \
- prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
+ bin_info_run_size_calc(bin_info); \
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
+#define BIN_INFO_INIT_bin_no(index, size)
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+ BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES
-#undef SIZE_CLASS
+#undef BIN_INFO_INIT_bin_yes
+#undef BIN_INFO_INIT_bin_no
+#undef SC
}
-void
+static bool
+small_run_size_init(void)
+{
+
+ assert(small_maxrun != 0);
+
+ small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
+ LG_PAGE));
+ if (small_run_tab == NULL)
+ return (true);
+
+#define TAB_INIT_bin_yes(index, size) { \
+ arena_bin_info_t *bin_info = &arena_bin_info[index]; \
+ small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
+ }
+#define TAB_INIT_bin_no(index, size)
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+ TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
+ SIZE_CLASSES
+#undef TAB_INIT_bin_yes
+#undef TAB_INIT_bin_no
+#undef SC
+
+ return (false);
+}
+
+bool
arena_boot(void)
{
size_t header_size;
unsigned i;
+ arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
+
/*
* Compute the header size such that it is large enough to contain the
* page map. The page map is biased to omit entries for the header
@@ -2534,16 +3260,33 @@ arena_boot(void)
*/
map_bias = 0;
for (i = 0; i < 3; i++) {
- header_size = offsetof(arena_chunk_t, map) +
- (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
- map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
- != 0);
+ header_size = offsetof(arena_chunk_t, map_bits) +
+ ((sizeof(arena_chunk_map_bits_t) +
+ sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
+ map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
}
assert(map_bias > 0);
- arena_maxclass = chunksize - (map_bias << LG_PAGE);
+ map_misc_offset = offsetof(arena_chunk_t, map_bits) +
+ sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
+
+ arena_maxrun = chunksize - (map_bias << LG_PAGE);
+ assert(arena_maxrun > 0);
+ arena_maxclass = index2size(size2index(chunksize)-1);
+ if (arena_maxclass > arena_maxrun) {
+ /*
+ * For small chunk sizes it's possible for there to be fewer
+ * non-header pages available than are necessary to serve the
+ * size classes just below chunksize.
+ */
+ arena_maxclass = arena_maxrun;
+ }
+ assert(arena_maxclass > 0);
+ nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS);
+ nhclasses = NSIZES - nlclasses - NBINS;
bin_info_init();
+ return (small_run_size_init());
}
void
@@ -2552,6 +3295,9 @@ arena_prefork(arena_t *arena)
unsigned i;
malloc_mutex_prefork(&arena->lock);
+ malloc_mutex_prefork(&arena->huge_mtx);
+ malloc_mutex_prefork(&arena->chunks_mtx);
+ malloc_mutex_prefork(&arena->node_cache_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_prefork(&arena->bins[i].lock);
}
@@ -2563,6 +3309,9 @@ arena_postfork_parent(arena_t *arena)
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_parent(&arena->bins[i].lock);
+ malloc_mutex_postfork_parent(&arena->node_cache_mtx);
+ malloc_mutex_postfork_parent(&arena->chunks_mtx);
+ malloc_mutex_postfork_parent(&arena->huge_mtx);
malloc_mutex_postfork_parent(&arena->lock);
}
@@ -2573,5 +3322,8 @@ arena_postfork_child(arena_t *arena)
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_child(&arena->bins[i].lock);
+ malloc_mutex_postfork_child(&arena->node_cache_mtx);
+ malloc_mutex_postfork_child(&arena->chunks_mtx);
+ malloc_mutex_postfork_child(&arena->huge_mtx);
malloc_mutex_postfork_child(&arena->lock);
}
diff --git a/contrib/jemalloc/src/base.c b/contrib/jemalloc/src/base.c
index 4e62e8f..7cdcfed 100644
--- a/contrib/jemalloc/src/base.c
+++ b/contrib/jemalloc/src/base.c
@@ -5,107 +5,138 @@
/* Data. */
static malloc_mutex_t base_mtx;
-
-/*
- * Current pages that are being used for internal memory allocations. These
- * pages are carved up in cacheline-size quanta, so that there is no chance of
- * false cache line sharing.
- */
-static void *base_pages;
-static void *base_next_addr;
-static void *base_past_addr; /* Addr immediately past base_pages. */
+static extent_tree_t base_avail_szad;
static extent_node_t *base_nodes;
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static bool base_pages_alloc(size_t minsize);
+static size_t base_allocated;
+static size_t base_resident;
+static size_t base_mapped;
/******************************************************************************/
-static bool
-base_pages_alloc(size_t minsize)
+/* base_mtx must be held. */
+static extent_node_t *
+base_node_try_alloc(void)
{
- size_t csize;
- bool zero;
+ extent_node_t *node;
+
+ if (base_nodes == NULL)
+ return (NULL);
+ node = base_nodes;
+ base_nodes = *(extent_node_t **)node;
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
+ return (node);
+}
- assert(minsize != 0);
- csize = CHUNK_CEILING(minsize);
- zero = false;
- base_pages = chunk_alloc(csize, chunksize, true, &zero,
- chunk_dss_prec_get());
- if (base_pages == NULL)
- return (true);
- base_next_addr = base_pages;
- base_past_addr = (void *)((uintptr_t)base_pages + csize);
+/* base_mtx must be held. */
+static void
+base_node_dalloc(extent_node_t *node)
+{
- return (false);
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
+ *(extent_node_t **)node = base_nodes;
+ base_nodes = node;
}
-void *
-base_alloc(size_t size)
+/* base_mtx must be held. */
+static extent_node_t *
+base_chunk_alloc(size_t minsize)
{
- void *ret;
- size_t csize;
+ extent_node_t *node;
+ size_t csize, nsize;
+ void *addr;
- /* Round size up to nearest multiple of the cacheline size. */
- csize = CACHELINE_CEILING(size);
-
- malloc_mutex_lock(&base_mtx);
- /* Make sure there's enough space for the allocation. */
- if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
- if (base_pages_alloc(csize)) {
- malloc_mutex_unlock(&base_mtx);
- return (NULL);
+ assert(minsize != 0);
+ node = base_node_try_alloc();
+ /* Allocate enough space to also carve a node out if necessary. */
+ nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
+ csize = CHUNK_CEILING(minsize + nsize);
+ addr = chunk_alloc_base(csize);
+ if (addr == NULL) {
+ if (node != NULL)
+ base_node_dalloc(node);
+ return (NULL);
+ }
+ base_mapped += csize;
+ if (node == NULL) {
+ node = (extent_node_t *)addr;
+ addr = (void *)((uintptr_t)addr + nsize);
+ csize -= nsize;
+ if (config_stats) {
+ base_allocated += nsize;
+ base_resident += PAGE_CEILING(nsize);
}
}
- /* Allocate. */
- ret = base_next_addr;
- base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
- malloc_mutex_unlock(&base_mtx);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
-
- return (ret);
+ extent_node_init(node, NULL, addr, csize, true, true);
+ return (node);
}
+/*
+ * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
+ * sparse data structures such as radix tree nodes efficient with respect to
+ * physical memory usage.
+ */
void *
-base_calloc(size_t number, size_t size)
-{
- void *ret = base_alloc(number * size);
-
- if (ret != NULL)
- memset(ret, 0, number * size);
-
- return (ret);
-}
-
-extent_node_t *
-base_node_alloc(void)
+base_alloc(size_t size)
{
- extent_node_t *ret;
+ void *ret;
+ size_t csize, usize;
+ extent_node_t *node;
+ extent_node_t key;
+
+ /*
+ * Round size up to nearest multiple of the cacheline size, so that
+ * there is no chance of false cache line sharing.
+ */
+ csize = CACHELINE_CEILING(size);
+ usize = s2u(csize);
+ extent_node_init(&key, NULL, NULL, usize, false, false);
malloc_mutex_lock(&base_mtx);
- if (base_nodes != NULL) {
- ret = base_nodes;
- base_nodes = *(extent_node_t **)ret;
- malloc_mutex_unlock(&base_mtx);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
+ node = extent_tree_szad_nsearch(&base_avail_szad, &key);
+ if (node != NULL) {
+ /* Use existing space. */
+ extent_tree_szad_remove(&base_avail_szad, node);
} else {
- malloc_mutex_unlock(&base_mtx);
- ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
+ /* Try to allocate more space. */
+ node = base_chunk_alloc(csize);
+ }
+ if (node == NULL) {
+ ret = NULL;
+ goto label_return;
}
+ ret = extent_node_addr_get(node);
+ if (extent_node_size_get(node) > csize) {
+ extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
+ extent_node_size_set(node, extent_node_size_get(node) - csize);
+ extent_tree_szad_insert(&base_avail_szad, node);
+ } else
+ base_node_dalloc(node);
+ if (config_stats) {
+ base_allocated += csize;
+ /*
+ * Add one PAGE to base_resident for every page boundary that is
+ * crossed by the new allocation.
+ */
+ base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
+ PAGE_CEILING((uintptr_t)ret);
+ }
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
+label_return:
+ malloc_mutex_unlock(&base_mtx);
return (ret);
}
void
-base_node_dealloc(extent_node_t *node)
+base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
{
- VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
malloc_mutex_lock(&base_mtx);
- *(extent_node_t **)node = base_nodes;
- base_nodes = node;
+ assert(base_allocated <= base_resident);
+ assert(base_resident <= base_mapped);
+ *allocated = base_allocated;
+ *resident = base_resident;
+ *mapped = base_mapped;
malloc_mutex_unlock(&base_mtx);
}
@@ -113,9 +144,10 @@ bool
base_boot(void)
{
- base_nodes = NULL;
if (malloc_mutex_init(&base_mtx))
return (true);
+ extent_tree_szad_new(&base_avail_szad);
+ base_nodes = NULL;
return (false);
}
diff --git a/contrib/jemalloc/src/bitmap.c b/contrib/jemalloc/src/bitmap.c
index e2bd907..c733372 100644
--- a/contrib/jemalloc/src/bitmap.c
+++ b/contrib/jemalloc/src/bitmap.c
@@ -2,19 +2,6 @@
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static size_t bits2groups(size_t nbits);
-
-/******************************************************************************/
-
-static size_t
-bits2groups(size_t nbits)
-{
-
- return ((nbits >> LG_BITMAP_GROUP_NBITS) +
- !!(nbits & BITMAP_GROUP_NBITS_MASK));
-}
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
@@ -31,15 +18,16 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
* that requires only one group.
*/
binfo->levels[0].group_offset = 0;
- group_count = bits2groups(nbits);
+ group_count = BITMAP_BITS2GROUPS(nbits);
for (i = 1; group_count > 1; i++) {
assert(i < BITMAP_MAX_LEVELS);
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
- group_count = bits2groups(group_count);
+ group_count = BITMAP_BITS2GROUPS(group_count);
}
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
+ assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
binfo->nlevels = i;
binfo->nbits = nbits;
}
diff --git a/contrib/jemalloc/src/chunk.c b/contrib/jemalloc/src/chunk.c
index 90ab116..6ba1ca7 100644
--- a/contrib/jemalloc/src/chunk.c
+++ b/contrib/jemalloc/src/chunk.c
@@ -5,129 +5,315 @@
/* Data. */
const char *opt_dss = DSS_DEFAULT;
-size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
+size_t opt_lg_chunk = 0;
-malloc_mutex_t chunks_mtx;
-chunk_stats_t stats_chunks;
+/* Used exclusively for gdump triggering. */
+static size_t curchunks;
+static size_t highchunks;
-/*
- * Trees of chunks that were previously allocated (trees differ only in node
- * ordering). These are used when allocating chunks, in an attempt to re-use
- * address space. Depending on function, different tree orderings are needed,
- * which is why there are two trees with the same contents.
- */
-static extent_tree_t chunks_szad_mmap;
-static extent_tree_t chunks_ad_mmap;
-static extent_tree_t chunks_szad_dss;
-static extent_tree_t chunks_ad_dss;
-
-rtree_t *chunks_rtree;
+rtree_t chunks_rtree;
/* Various chunk-related settings. */
size_t chunksize;
size_t chunksize_mask; /* (chunksize - 1). */
size_t chunk_npages;
-size_t map_bias;
-size_t arena_maxclass; /* Max size class for arenas. */
+
+static void *chunk_alloc_default(void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
+static bool chunk_dalloc_default(void *chunk, size_t size, bool committed,
+ unsigned arena_ind);
+static bool chunk_commit_default(void *chunk, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool chunk_decommit_default(void *chunk, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
+ size_t size_b, bool committed, unsigned arena_ind);
+static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
+ size_t size_b, bool committed, unsigned arena_ind);
+
+const chunk_hooks_t chunk_hooks_default = {
+ chunk_alloc_default,
+ chunk_dalloc_default,
+ chunk_commit_default,
+ chunk_decommit_default,
+ chunk_purge_default,
+ chunk_split_default,
+ chunk_merge_default
+};
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
-static void *chunk_recycle(extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
- bool *zero);
-static void chunk_record(extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, void *chunk, size_t size);
+static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *chunk, size_t size, bool zeroed, bool committed);
/******************************************************************************/
+static chunk_hooks_t
+chunk_hooks_get_locked(arena_t *arena)
+{
+
+ return (arena->chunk_hooks);
+}
+
+chunk_hooks_t
+chunk_hooks_get(arena_t *arena)
+{
+ chunk_hooks_t chunk_hooks;
+
+ malloc_mutex_lock(&arena->chunks_mtx);
+ chunk_hooks = chunk_hooks_get_locked(arena);
+ malloc_mutex_unlock(&arena->chunks_mtx);
+
+ return (chunk_hooks);
+}
+
+chunk_hooks_t
+chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
+{
+ chunk_hooks_t old_chunk_hooks;
+
+ malloc_mutex_lock(&arena->chunks_mtx);
+ old_chunk_hooks = arena->chunk_hooks;
+ /*
+ * Copy each field atomically so that it is impossible for readers to
+ * see partially updated pointers. There are places where readers only
+ * need one hook function pointer (therefore no need to copy the
+ * entirety of arena->chunk_hooks), and stale reads do not affect
+ * correctness, so they perform unlocked reads.
+ */
+#define ATOMIC_COPY_HOOK(n) do { \
+ union { \
+ chunk_##n##_t **n; \
+ void **v; \
+ } u; \
+ u.n = &arena->chunk_hooks.n; \
+ atomic_write_p(u.v, chunk_hooks->n); \
+} while (0)
+ ATOMIC_COPY_HOOK(alloc);
+ ATOMIC_COPY_HOOK(dalloc);
+ ATOMIC_COPY_HOOK(commit);
+ ATOMIC_COPY_HOOK(decommit);
+ ATOMIC_COPY_HOOK(purge);
+ ATOMIC_COPY_HOOK(split);
+ ATOMIC_COPY_HOOK(merge);
+#undef ATOMIC_COPY_HOOK
+ malloc_mutex_unlock(&arena->chunks_mtx);
+
+ return (old_chunk_hooks);
+}
+
+static void
+chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ bool locked)
+{
+ static const chunk_hooks_t uninitialized_hooks =
+ CHUNK_HOOKS_INITIALIZER;
+
+ if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
+ 0) {
+ *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
+ chunk_hooks_get(arena);
+ }
+}
+
+static void
+chunk_hooks_assure_initialized_locked(arena_t *arena,
+ chunk_hooks_t *chunk_hooks)
+{
+
+ chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
+}
+
+static void
+chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
+{
+
+ chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
+}
+
+bool
+chunk_register(const void *chunk, const extent_node_t *node)
+{
+
+ assert(extent_node_addr_get(node) == chunk);
+
+ if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
+ return (true);
+ if (config_prof && opt_prof) {
+ size_t size = extent_node_size_get(node);
+ size_t nadd = (size == 0) ? 1 : size / chunksize;
+ size_t cur = atomic_add_z(&curchunks, nadd);
+ size_t high = atomic_read_z(&highchunks);
+ while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
+ /*
+ * Don't refresh cur, because it may have decreased
+ * since this thread lost the highchunks update race.
+ */
+ high = atomic_read_z(&highchunks);
+ }
+ if (cur > high && prof_gdump_get_unlocked())
+ prof_gdump();
+ }
+
+ return (false);
+}
+
+void
+chunk_deregister(const void *chunk, const extent_node_t *node)
+{
+ bool err;
+
+ err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
+ assert(!err);
+ if (config_prof && opt_prof) {
+ size_t size = extent_node_size_get(node);
+ size_t nsub = (size == 0) ? 1 : size / chunksize;
+ assert(atomic_read_z(&curchunks) >= nsub);
+ atomic_sub_z(&curchunks, nsub);
+ }
+}
+
+/*
+ * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
+ * fits.
+ */
+static extent_node_t *
+chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, size_t size)
+{
+ extent_node_t key;
+
+ assert(size == CHUNK_CEILING(size));
+
+ extent_node_init(&key, arena, NULL, size, false, false);
+ return (extent_tree_szad_nsearch(chunks_szad, &key));
+}
+
static void *
-chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
- size_t alignment, bool base, bool *zero)
+chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
+ bool dalloc_node)
{
void *ret;
extent_node_t *node;
- extent_node_t key;
size_t alloc_size, leadsize, trailsize;
- bool zeroed;
+ bool zeroed, committed;
- if (base) {
- /*
- * This function may need to call base_node_{,de}alloc(), but
- * the current chunk allocation request is on behalf of the
- * base allocator. Avoid deadlock (and if that weren't an
- * issue, potential for infinite recursion) by returning NULL.
- */
- return (NULL);
- }
+ assert(new_addr == NULL || alignment == chunksize);
+ /*
+ * Cached chunks use the node linkage embedded in their headers, in
+ * which case dalloc_node is true, and new_addr is non-NULL because
+ * we're operating on a specific chunk.
+ */
+ assert(dalloc_node || new_addr != NULL);
- alloc_size = size + alignment - chunksize;
+ alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
- key.addr = NULL;
- key.size = alloc_size;
- malloc_mutex_lock(&chunks_mtx);
- node = extent_tree_szad_nsearch(chunks_szad, &key);
- if (node == NULL) {
- malloc_mutex_unlock(&chunks_mtx);
+ malloc_mutex_lock(&arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
+ if (new_addr != NULL) {
+ extent_node_t key;
+ extent_node_init(&key, arena, new_addr, alloc_size, false,
+ false);
+ node = extent_tree_ad_search(chunks_ad, &key);
+ } else {
+ node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
+ alloc_size);
+ }
+ if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
+ size)) {
+ malloc_mutex_unlock(&arena->chunks_mtx);
return (NULL);
}
- leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
- (uintptr_t)node->addr;
- assert(node->size >= leadsize + size);
- trailsize = node->size - leadsize - size;
- ret = (void *)((uintptr_t)node->addr + leadsize);
- zeroed = node->zeroed;
+ leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
+ alignment) - (uintptr_t)extent_node_addr_get(node);
+ assert(new_addr == NULL || leadsize == 0);
+ assert(extent_node_size_get(node) >= leadsize + size);
+ trailsize = extent_node_size_get(node) - leadsize - size;
+ ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
+ zeroed = extent_node_zeroed_get(node);
if (zeroed)
- *zero = true;
+ *zero = true;
+ committed = extent_node_committed_get(node);
+ if (committed)
+ *commit = true;
+ /* Split the lead. */
+ if (leadsize != 0 &&
+ chunk_hooks->split(extent_node_addr_get(node),
+ extent_node_size_get(node), leadsize, size, false, arena->ind)) {
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ return (NULL);
+ }
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
+ arena_chunk_cache_maybe_remove(arena, node, cache);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
- node->size = leadsize;
+ extent_node_size_set(node, leadsize);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
}
if (trailsize != 0) {
+ /* Split the trail. */
+ if (chunk_hooks->split(ret, size + trailsize, size,
+ trailsize, false, arena->ind)) {
+ if (dalloc_node && node != NULL)
+ arena_node_dalloc(arena, node);
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
+ cache, ret, size + trailsize, zeroed, committed);
+ return (NULL);
+ }
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
- /*
- * An additional node is required, but
- * base_node_alloc() can cause a new base chunk to be
- * allocated. Drop chunks_mtx in order to avoid
- * deadlock, and if node allocation fails, deallocate
- * the result before returning an error.
- */
- malloc_mutex_unlock(&chunks_mtx);
- node = base_node_alloc();
+ node = arena_node_alloc(arena);
if (node == NULL) {
- chunk_dealloc(ret, size, true);
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ chunk_record(arena, chunk_hooks, chunks_szad,
+ chunks_ad, cache, ret, size + trailsize,
+ zeroed, committed);
return (NULL);
}
- malloc_mutex_lock(&chunks_mtx);
}
- node->addr = (void *)((uintptr_t)(ret) + size);
- node->size = trailsize;
- node->zeroed = zeroed;
+ extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
+ trailsize, zeroed, committed);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
}
- malloc_mutex_unlock(&chunks_mtx);
+ if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
+ ret, size, zeroed, committed);
+ return (NULL);
+ }
+ malloc_mutex_unlock(&arena->chunks_mtx);
- if (node != NULL)
- base_node_dealloc(node);
+ assert(dalloc_node || node != NULL);
+ if (dalloc_node && node != NULL)
+ arena_node_dalloc(arena, node);
if (*zero) {
- if (zeroed == false)
+ if (!zeroed)
memset(ret, 0, size);
else if (config_debug) {
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
- VALGRIND_MAKE_MEM_DEFINED(ret, size);
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
@@ -136,138 +322,214 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
}
/*
- * If the caller specifies (*zero == false), it is still possible to receive
- * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
- * takes advantage of this to avoid demanding zeroed chunks, but taking
- * advantage of them if they are returned.
+ * If the caller specifies (!*zero), it is still possible to receive zeroed
+ * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
+ * advantage of this to avoid demanding zeroed chunks, but taking advantage of
+ * them if they are returned.
*/
-void *
-chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
- dss_prec_t dss_prec)
+static void *
+chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit, dss_prec_t dss_prec)
{
void *ret;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
+ /* Retained. */
+ if ((ret = chunk_recycle(arena, &chunk_hooks,
+ &arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
+ new_addr, size, alignment, zero, commit, true)) != NULL)
+ return (ret);
+
/* "primary" dss. */
- if (config_dss && dss_prec == dss_prec_primary) {
- if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
- alignment, base, zero)) != NULL)
- goto label_return;
- if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
- goto label_return;
- }
- /* mmap. */
- if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
- alignment, base, zero)) != NULL)
- goto label_return;
- if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
- goto label_return;
+ if (have_dss && dss_prec == dss_prec_primary && (ret =
+ chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
+ NULL)
+ return (ret);
+ /*
+ * mmap. Requesting an address is not implemented for
+ * chunk_alloc_mmap(), so only call it if (new_addr == NULL).
+ */
+ if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
+ commit)) != NULL)
+ return (ret);
/* "secondary" dss. */
- if (config_dss && dss_prec == dss_prec_secondary) {
- if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
- alignment, base, zero)) != NULL)
- goto label_return;
- if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
- goto label_return;
- }
+ if (have_dss && dss_prec == dss_prec_secondary && (ret =
+ chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
+ NULL)
+ return (ret);
/* All strategies for allocation failed. */
- ret = NULL;
-label_return:
- if (ret != NULL) {
- if (config_ivsalloc && base == false) {
- if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
- chunk_dealloc(ret, size, true);
- return (NULL);
- }
- }
- if (config_stats || config_prof) {
- bool gdump;
- malloc_mutex_lock(&chunks_mtx);
- if (config_stats)
- stats_chunks.nchunks += (size / chunksize);
- stats_chunks.curchunks += (size / chunksize);
- if (stats_chunks.curchunks > stats_chunks.highchunks) {
- stats_chunks.highchunks =
- stats_chunks.curchunks;
- if (config_prof)
- gdump = true;
- } else if (config_prof)
- gdump = false;
- malloc_mutex_unlock(&chunks_mtx);
- if (config_prof && opt_prof && opt_prof_gdump && gdump)
- prof_gdump();
- }
- if (config_valgrind)
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- }
- assert(CHUNK_ADDR2BASE(ret) == ret);
+ return (NULL);
+}
+
+void *
+chunk_alloc_base(size_t size)
+{
+ void *ret;
+ bool zero, commit;
+
+ /*
+ * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
+ * because it's critical that chunk_alloc_base() return untouched
+ * demand-zeroed virtual memory.
+ */
+ zero = true;
+ commit = true;
+ ret = chunk_alloc_mmap(size, chunksize, &zero, &commit);
+ if (ret == NULL)
+ return (NULL);
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+
return (ret);
}
-static void
-chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
- size_t size)
+void *
+chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool dalloc_node)
{
- bool unzeroed;
- extent_node_t *xnode, *node, *prev, *xprev, key;
+ void *ret;
+ bool commit;
+
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
- unzeroed = pages_purge(chunk, size);
- VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
+ commit = true;
+ ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
+ &arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
+ &commit, dalloc_node);
+ if (ret == NULL)
+ return (NULL);
+ assert(commit);
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ return (ret);
+}
+
+static arena_t *
+chunk_arena_get(unsigned arena_ind)
+{
+ arena_t *arena;
+ /* Dodge tsd for a0 in order to avoid bootstrapping issues. */
+ arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
+ false, true);
/*
- * Allocate a node before acquiring chunks_mtx even though it might not
- * be needed, because base_node_alloc() may cause a new base chunk to
- * be allocated, which could cause deadlock if chunks_mtx were already
- * held.
+ * The arena we're allocating on behalf of must have been initialized
+ * already.
*/
- xnode = base_node_alloc();
- /* Use xprev to implement conditional deferred deallocation of prev. */
- xprev = NULL;
+ assert(arena != NULL);
+ return (arena);
+}
+
+static void *
+chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool *commit, unsigned arena_ind)
+{
+ void *ret;
+ arena_t *arena;
+
+ arena = chunk_arena_get(arena_ind);
+ ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
+ commit, arena->dss_prec);
+ if (ret == NULL)
+ return (NULL);
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- malloc_mutex_lock(&chunks_mtx);
- key.addr = (void *)((uintptr_t)chunk + size);
+ return (ret);
+}
+
+void *
+chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit)
+{
+ void *ret;
+
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
+ arena->ind);
+ if (ret == NULL)
+ return (NULL);
+ if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
+ return (ret);
+}
+
+static void
+chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *chunk, size_t size, bool zeroed, bool committed)
+{
+ bool unzeroed;
+ extent_node_t *node, *prev;
+ extent_node_t key;
+
+ assert(!cache || !zeroed);
+ unzeroed = cache || !zeroed;
+ JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
+
+ malloc_mutex_lock(&arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
+ extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
+ false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
- if (node != NULL && node->addr == key.addr) {
+ if (node != NULL && extent_node_addr_get(node) ==
+ extent_node_addr_get(&key) && extent_node_committed_get(node) ==
+ committed && !chunk_hooks->merge(chunk, size,
+ extent_node_addr_get(node), extent_node_size_get(node), false,
+ arena->ind)) {
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, node);
- node->addr = chunk;
- node->size += size;
- node->zeroed = (node->zeroed && (unzeroed == false));
+ arena_chunk_cache_maybe_remove(arena, node, cache);
+ extent_node_addr_set(node, chunk);
+ extent_node_size_set(node, size + extent_node_size_get(node));
+ extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
+ !unzeroed);
extent_tree_szad_insert(chunks_szad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
- if (xnode == NULL) {
+ node = arena_node_alloc(arena);
+ if (node == NULL) {
/*
- * base_node_alloc() failed, which is an exceedingly
- * unlikely failure. Leak chunk; its pages have
- * already been purged, so this is only a virtual
- * memory leak.
+ * Node allocation failed, which is an exceedingly
+ * unlikely failure. Leak chunk after making sure its
+ * pages have already been purged, so that this is only
+ * a virtual memory leak.
*/
+ if (cache) {
+ chunk_purge_wrapper(arena, chunk_hooks, chunk,
+ size, 0, size);
+ }
goto label_return;
}
- node = xnode;
- xnode = NULL; /* Prevent deallocation below. */
- node->addr = chunk;
- node->size = size;
- node->zeroed = (unzeroed == false);
+ extent_node_init(node, arena, chunk, size, !unzeroed,
+ committed);
extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
}
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(chunks_ad, node);
- if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
- chunk) {
+ if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
+ extent_node_size_get(prev)) == chunk &&
+ extent_node_committed_get(prev) == committed &&
+ !chunk_hooks->merge(extent_node_addr_get(prev),
+ extent_node_size_get(prev), chunk, size, false, arena->ind)) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
@@ -275,44 +537,42 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
*/
extent_tree_szad_remove(chunks_szad, prev);
extent_tree_ad_remove(chunks_ad, prev);
-
+ arena_chunk_cache_maybe_remove(arena, prev, cache);
extent_tree_szad_remove(chunks_szad, node);
- node->addr = prev->addr;
- node->size += prev->size;
- node->zeroed = (node->zeroed && prev->zeroed);
+ arena_chunk_cache_maybe_remove(arena, node, cache);
+ extent_node_addr_set(node, extent_node_addr_get(prev));
+ extent_node_size_set(node, extent_node_size_get(prev) +
+ extent_node_size_get(node));
+ extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
+ extent_node_zeroed_get(node));
extent_tree_szad_insert(chunks_szad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
- xprev = prev;
+ arena_node_dalloc(arena, prev);
}
label_return:
- malloc_mutex_unlock(&chunks_mtx);
- /*
- * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
- * avoid potential deadlock.
- */
- if (xnode != NULL)
- base_node_dealloc(xnode);
- if (xprev != NULL)
- base_node_dealloc(xprev);
+ malloc_mutex_unlock(&arena->chunks_mtx);
}
void
-chunk_unmap(void *chunk, size_t size)
+chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, bool committed)
{
+
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
- if (config_dss && chunk_in_dss(chunk))
- chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
- else if (chunk_dealloc_mmap(chunk, size))
- chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
+ chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
+ &arena->chunks_ad_cached, true, chunk, size, false, committed);
+ arena_maybe_purge(arena);
}
void
-chunk_dealloc(void *chunk, size_t size, bool unmap)
+chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, bool zeroed, bool committed)
{
assert(chunk != NULL);
@@ -320,22 +580,149 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
assert(size != 0);
assert((size & chunksize_mask) == 0);
- if (config_ivsalloc)
- rtree_set(chunks_rtree, (uintptr_t)chunk, 0);
- if (config_stats || config_prof) {
- malloc_mutex_lock(&chunks_mtx);
- assert(stats_chunks.curchunks >= (size / chunksize));
- stats_chunks.curchunks -= (size / chunksize);
- malloc_mutex_unlock(&chunks_mtx);
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ /* Try to deallocate. */
+ if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
+ return;
+ /* Try to decommit; purge if that fails. */
+ if (committed) {
+ committed = chunk_hooks->decommit(chunk, size, 0, size,
+ arena->ind);
}
+ zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
+ arena->ind);
+ chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
+ &arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
+}
+
+static bool
+chunk_dalloc_default(void *chunk, size_t size, bool committed,
+ unsigned arena_ind)
+{
+
+ if (!have_dss || !chunk_in_dss(chunk))
+ return (chunk_dalloc_mmap(chunk, size));
+ return (true);
+}
+
+void
+chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, bool committed)
+{
+
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ chunk_hooks->dalloc(chunk, size, committed, arena->ind);
+ if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
+ JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
+}
+
+static bool
+chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
+ length));
+}
+
+static bool
+chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
- if (unmap)
- chunk_unmap(chunk, size);
+ return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
+ length));
+}
+
+bool
+chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
+{
+
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+
+ return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
+ length));
+}
+
+static bool
+chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
+ length));
+}
+
+bool
+chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, size_t offset, size_t length)
+{
+
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
+}
+
+static bool
+chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ if (!maps_coalesce)
+ return (true);
+ return (false);
+}
+
+static bool
+chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ if (!maps_coalesce)
+ return (true);
+ if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
+ return (true);
+
+ return (false);
+}
+
+static rtree_node_elm_t *
+chunks_rtree_node_alloc(size_t nelms)
+{
+
+ return ((rtree_node_elm_t *)base_alloc(nelms *
+ sizeof(rtree_node_elm_t)));
}
bool
chunk_boot(void)
{
+#ifdef _WIN32
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+
+ /*
+ * Verify actual page size is equal to or an integral multiple of
+ * configured page size.
+ */
+ if (info.dwPageSize & ((1U << LG_PAGE) - 1))
+ return (true);
+
+ /*
+ * Configure chunksize (if not set) to match granularity (usually 64K),
+ * so pages_map will always take fast path.
+ */
+ if (!opt_lg_chunk) {
+ opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
+ - 1;
+ }
+#else
+ if (!opt_lg_chunk)
+ opt_lg_chunk = LG_CHUNK_DEFAULT;
+#endif
/* Set variables according to the value of opt_lg_chunk. */
chunksize = (ZU(1) << opt_lg_chunk);
@@ -343,23 +730,11 @@ chunk_boot(void)
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> LG_PAGE);
- if (config_stats || config_prof) {
- if (malloc_mutex_init(&chunks_mtx))
- return (true);
- memset(&stats_chunks, 0, sizeof(chunk_stats_t));
- }
- if (config_dss && chunk_dss_boot())
+ if (have_dss && chunk_dss_boot())
+ return (true);
+ if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
+ opt_lg_chunk, chunks_rtree_node_alloc, NULL))
return (true);
- extent_tree_szad_new(&chunks_szad_mmap);
- extent_tree_ad_new(&chunks_ad_mmap);
- extent_tree_szad_new(&chunks_szad_dss);
- extent_tree_ad_new(&chunks_ad_dss);
- if (config_ivsalloc) {
- chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
- opt_lg_chunk, base_alloc, NULL);
- if (chunks_rtree == NULL)
- return (true);
- }
return (false);
}
@@ -368,9 +743,6 @@ void
chunk_prefork(void)
{
- malloc_mutex_prefork(&chunks_mtx);
- if (config_ivsalloc)
- rtree_prefork(chunks_rtree);
chunk_dss_prefork();
}
@@ -379,9 +751,6 @@ chunk_postfork_parent(void)
{
chunk_dss_postfork_parent();
- if (config_ivsalloc)
- rtree_postfork_parent(chunks_rtree);
- malloc_mutex_postfork_parent(&chunks_mtx);
}
void
@@ -389,7 +758,4 @@ chunk_postfork_child(void)
{
chunk_dss_postfork_child();
- if (config_ivsalloc)
- rtree_postfork_child(chunks_rtree);
- malloc_mutex_postfork_child(&chunks_mtx);
}
diff --git a/contrib/jemalloc/src/chunk_dss.c b/contrib/jemalloc/src/chunk_dss.c
index 510bb8b..de0546d 100644
--- a/contrib/jemalloc/src/chunk_dss.c
+++ b/contrib/jemalloc/src/chunk_dss.c
@@ -32,7 +32,7 @@ static void *
chunk_dss_sbrk(intptr_t increment)
{
-#ifdef JEMALLOC_HAVE_SBRK
+#ifdef JEMALLOC_DSS
return (sbrk(increment));
#else
not_implemented();
@@ -45,7 +45,7 @@ chunk_dss_prec_get(void)
{
dss_prec_t ret;
- if (config_dss == false)
+ if (!have_dss)
return (dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
ret = dss_prec_default;
@@ -57,8 +57,8 @@ bool
chunk_dss_prec_set(dss_prec_t dss_prec)
{
- if (config_dss == false)
- return (true);
+ if (!have_dss)
+ return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
dss_prec_default = dss_prec;
malloc_mutex_unlock(&dss_mtx);
@@ -66,11 +66,12 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
}
void *
-chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
+chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit)
{
void *ret;
- cassert(config_dss);
+ cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0);
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
@@ -93,8 +94,17 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
* malloc.
*/
do {
+ /* Avoid an unnecessary system call. */
+ if (new_addr != NULL && dss_max != new_addr)
+ break;
+
/* Get the current end of the DSS. */
dss_max = chunk_dss_sbrk(0);
+
+ /* Make sure the earlier condition still holds. */
+ if (new_addr != NULL && dss_max != new_addr)
+ break;
+
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
@@ -123,12 +133,20 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
/* Success. */
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
- if (cpad_size != 0)
- chunk_unmap(cpad, cpad_size);
+ if (cpad_size != 0) {
+ chunk_hooks_t chunk_hooks =
+ CHUNK_HOOKS_INITIALIZER;
+ chunk_dalloc_wrapper(arena,
+ &chunk_hooks, cpad, cpad_size,
+ true);
+ }
if (*zero) {
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
+ ret, size);
memset(ret, 0, size);
}
+ if (!*commit)
+ *commit = pages_decommit(ret, size);
return (ret);
}
} while (dss_prev != (void *)-1);
@@ -143,7 +161,7 @@ chunk_in_dss(void *chunk)
{
bool ret;
- cassert(config_dss);
+ cassert(have_dss);
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
@@ -160,7 +178,7 @@ bool
chunk_dss_boot(void)
{
- cassert(config_dss);
+ cassert(have_dss);
if (malloc_mutex_init(&dss_mtx))
return (true);
@@ -175,7 +193,7 @@ void
chunk_dss_prefork(void)
{
- if (config_dss)
+ if (have_dss)
malloc_mutex_prefork(&dss_mtx);
}
@@ -183,7 +201,7 @@ void
chunk_dss_postfork_parent(void)
{
- if (config_dss)
+ if (have_dss)
malloc_mutex_postfork_parent(&dss_mtx);
}
@@ -191,7 +209,7 @@ void
chunk_dss_postfork_child(void)
{
- if (config_dss)
+ if (have_dss)
malloc_mutex_postfork_child(&dss_mtx);
}
diff --git a/contrib/jemalloc/src/chunk_mmap.c b/contrib/jemalloc/src/chunk_mmap.c
index 2056d79..36eb075 100644
--- a/contrib/jemalloc/src/chunk_mmap.c
+++ b/contrib/jemalloc/src/chunk_mmap.c
@@ -2,145 +2,9 @@
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void *pages_map(void *addr, size_t size);
-static void pages_unmap(void *addr, size_t size);
-static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
- bool *zero);
-
-/******************************************************************************/
-
-static void *
-pages_map(void *addr, size_t size)
-{
- void *ret;
-
- assert(size != 0);
-
-#ifdef _WIN32
- /*
- * If VirtualAlloc can't allocate at the given address when one is
- * given, it fails and returns NULL.
- */
- ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
- PAGE_READWRITE);
-#else
- /*
- * We don't use MAP_FIXED here, because it can cause the *replacement*
- * of existing mappings, and we only want to create new mappings.
- */
- ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
- -1, 0);
- assert(ret != NULL);
-
- if (ret == MAP_FAILED)
- ret = NULL;
- else if (addr != NULL && ret != addr) {
- /*
- * We succeeded in mapping memory, but not in the right place.
- */
- if (munmap(ret, size) == -1) {
- char buf[BUFERROR_BUF];
-
- buferror(get_errno(), buf, sizeof(buf));
- malloc_printf("<jemalloc: Error in munmap(): %s\n",
- buf);
- if (opt_abort)
- abort();
- }
- ret = NULL;
- }
-#endif
- assert(ret == NULL || (addr == NULL && ret != addr)
- || (addr != NULL && ret == addr));
- return (ret);
-}
-
-static void
-pages_unmap(void *addr, size_t size)
-{
-
-#ifdef _WIN32
- if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
-#else
- if (munmap(addr, size) == -1)
-#endif
- {
- char buf[BUFERROR_BUF];
-
- buferror(get_errno(), buf, sizeof(buf));
- malloc_printf("<jemalloc>: Error in "
-#ifdef _WIN32
- "VirtualFree"
-#else
- "munmap"
-#endif
- "(): %s\n", buf);
- if (opt_abort)
- abort();
- }
-}
-
-static void *
-pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
-{
- void *ret = (void *)((uintptr_t)addr + leadsize);
-
- assert(alloc_size >= leadsize + size);
-#ifdef _WIN32
- {
- void *new_addr;
-
- pages_unmap(addr, alloc_size);
- new_addr = pages_map(ret, size);
- if (new_addr == ret)
- return (ret);
- if (new_addr)
- pages_unmap(new_addr, size);
- return (NULL);
- }
-#else
- {
- size_t trailsize = alloc_size - leadsize - size;
-
- if (leadsize != 0)
- pages_unmap(addr, leadsize);
- if (trailsize != 0)
- pages_unmap((void *)((uintptr_t)ret + size), trailsize);
- return (ret);
- }
-#endif
-}
-
-bool
-pages_purge(void *addr, size_t length)
-{
- bool unzeroed;
-
-#ifdef _WIN32
- VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
- unzeroed = true;
-#else
-# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
-# define JEMALLOC_MADV_PURGE MADV_DONTNEED
-# define JEMALLOC_MADV_ZEROS true
-# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
-# define JEMALLOC_MADV_PURGE MADV_FREE
-# define JEMALLOC_MADV_ZEROS false
-# else
-# error "No method defined for purging unused dirty pages."
-# endif
- int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
- unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
-# undef JEMALLOC_MADV_PURGE
-# undef JEMALLOC_MADV_ZEROS
-#endif
- return (unzeroed);
-}
static void *
-chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
+chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret, *pages;
size_t alloc_size, leadsize;
@@ -160,11 +24,13 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
assert(ret != NULL);
*zero = true;
+ if (!*commit)
+ *commit = pages_decommit(ret, size);
return (ret);
}
void *
-chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
+chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
size_t offset;
@@ -191,20 +57,22 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
pages_unmap(ret, size);
- return (chunk_alloc_mmap_slow(size, alignment, zero));
+ return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
}
assert(ret != NULL);
*zero = true;
+ if (!*commit)
+ *commit = pages_decommit(ret, size);
return (ret);
}
bool
-chunk_dealloc_mmap(void *chunk, size_t size)
+chunk_dalloc_mmap(void *chunk, size_t size)
{
if (config_munmap)
pages_unmap(chunk, size);
- return (config_munmap == false);
+ return (!config_munmap);
}
diff --git a/contrib/jemalloc/src/ckh.c b/contrib/jemalloc/src/ckh.c
index 04c5296..53a1c1e 100644
--- a/contrib/jemalloc/src/ckh.c
+++ b/contrib/jemalloc/src/ckh.c
@@ -40,8 +40,8 @@
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static bool ckh_grow(ckh_t *ckh);
-static void ckh_shrink(ckh_t *ckh);
+static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
+static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
/******************************************************************************/
@@ -185,7 +185,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
}
bucket = tbucket;
- if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data))
return (false);
}
}
@@ -201,12 +201,12 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
/* Try to insert in primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data))
return (false);
/* Try to insert in secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data))
return (false);
/*
@@ -243,7 +243,7 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
}
static bool
-ckh_grow(ckh_t *ckh)
+ckh_grow(tsd_t *tsd, ckh_t *ckh)
{
bool ret;
ckhc_t *tab, *ttab;
@@ -270,7 +270,8 @@ ckh_grow(ckh_t *ckh)
ret = true;
goto label_return;
}
- tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+ tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
+ true, NULL);
if (tab == NULL) {
ret = true;
goto label_return;
@@ -281,13 +282,13 @@ ckh_grow(ckh_t *ckh)
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
- if (ckh_rebuild(ckh, tab) == false) {
- idalloc(tab);
+ if (!ckh_rebuild(ckh, tab)) {
+ idalloctm(tsd, tab, tcache_get(tsd, false), true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloc(ckh->tab);
+ idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
@@ -298,7 +299,7 @@ label_return:
}
static void
-ckh_shrink(ckh_t *ckh)
+ckh_shrink(tsd_t *tsd, ckh_t *ckh)
{
ckhc_t *tab, *ttab;
size_t lg_curcells, usize;
@@ -313,7 +314,8 @@ ckh_shrink(ckh_t *ckh)
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0)
return;
- tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+ tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
+ NULL);
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
@@ -327,8 +329,8 @@ ckh_shrink(ckh_t *ckh)
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
- if (ckh_rebuild(ckh, tab) == false) {
- idalloc(tab);
+ if (!ckh_rebuild(ckh, tab)) {
+ idalloctm(tsd, tab, tcache_get(tsd, false), true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
@@ -336,7 +338,7 @@ ckh_shrink(ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloc(ckh->tab);
+ idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
@@ -345,7 +347,8 @@ ckh_shrink(ckh_t *ckh)
}
bool
-ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
+ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+ ckh_keycomp_t *keycomp)
{
bool ret;
size_t mincells, usize;
@@ -366,10 +369,10 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ckh->count = 0;
/*
- * Find the minimum power of 2 that is large enough to fit aBaseCount
+ * Find the minimum power of 2 that is large enough to fit minitems
* entries. We are using (2+,2) cuckoo hashing, which has an expected
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
- * factor that will typically allow 2^aLgMinItems to fit without ever
+ * factor that will typically allow mincells items to fit without ever
* growing the table.
*/
assert(LG_CKH_BUCKET_CELLS > 0);
@@ -388,7 +391,8 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ret = true;
goto label_return;
}
- ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+ ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
+ NULL);
if (ckh->tab == NULL) {
ret = true;
goto label_return;
@@ -400,16 +404,16 @@ label_return:
}
void
-ckh_delete(ckh_t *ckh)
+ckh_delete(tsd_t *tsd, ckh_t *ckh)
{
assert(ckh != NULL);
#ifdef CKH_VERBOSE
malloc_printf(
- "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
- " nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
- " nrelocs: %"PRIu64"\n", __func__, ckh,
+ "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
+ " nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
+ " nrelocs: %"FMTu64"\n", __func__, ckh,
(unsigned long long)ckh->ngrows,
(unsigned long long)ckh->nshrinks,
(unsigned long long)ckh->nshrinkfails,
@@ -417,7 +421,7 @@ ckh_delete(ckh_t *ckh)
(unsigned long long)ckh->nrelocs);
#endif
- idalloc(ckh->tab);
+ idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
if (config_debug)
memset(ckh, 0x5a, sizeof(ckh_t));
}
@@ -452,7 +456,7 @@ ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
}
bool
-ckh_insert(ckh_t *ckh, const void *key, const void *data)
+ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
{
bool ret;
@@ -464,7 +468,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
#endif
while (ckh_try_insert(ckh, &key, &data)) {
- if (ckh_grow(ckh)) {
+ if (ckh_grow(tsd, ckh)) {
ret = true;
goto label_return;
}
@@ -476,7 +480,8 @@ label_return:
}
bool
-ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
+ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+ void **data)
{
size_t cell;
@@ -497,7 +502,7 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
> ckh->lg_minbuckets) {
/* Ignore error due to OOM. */
- ckh_shrink(ckh);
+ ckh_shrink(tsd, ckh);
}
return (false);
diff --git a/contrib/jemalloc/src/ctl.c b/contrib/jemalloc/src/ctl.c
index cc2c5ae..3de8e60 100644
--- a/contrib/jemalloc/src/ctl.c
+++ b/contrib/jemalloc/src/ctl.c
@@ -7,7 +7,6 @@
/*
* ctl_mtx protects the following:
* - ctl_stats.*
- * - opt_prof_active
*/
static malloc_mutex_t ctl_mtx;
static bool ctl_initialized;
@@ -17,14 +16,14 @@ static ctl_stats_t ctl_stats;
/******************************************************************************/
/* Helpers for named and indexed nodes. */
-static inline const ctl_named_node_t *
+JEMALLOC_INLINE_C const ctl_named_node_t *
ctl_named_node(const ctl_node_t *node)
{
return ((node->named) ? (const ctl_named_node_t *)node : NULL);
}
-static inline const ctl_named_node_t *
+JEMALLOC_INLINE_C const ctl_named_node_t *
ctl_named_children(const ctl_named_node_t *node, int index)
{
const ctl_named_node_t *children = ctl_named_node(node->children);
@@ -32,12 +31,11 @@ ctl_named_children(const ctl_named_node_t *node, int index)
return (children ? &children[index] : NULL);
}
-static inline const ctl_indexed_node_t *
+JEMALLOC_INLINE_C const ctl_indexed_node_t *
ctl_indexed_node(const ctl_node_t *node)
{
- return ((node->named == false) ? (const ctl_indexed_node_t *)node :
- NULL);
+ return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
}
/******************************************************************************/
@@ -68,16 +66,17 @@ CTL_PROTO(version)
CTL_PROTO(epoch)
CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_flush)
+CTL_PROTO(thread_prof_name)
+CTL_PROTO(thread_prof_active)
CTL_PROTO(thread_arena)
CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
+CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug)
-CTL_PROTO(config_dss)
CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock)
-CTL_PROTO(config_mremap)
CTL_PROTO(config_munmap)
CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc)
@@ -99,22 +98,27 @@ CTL_PROTO(opt_zero)
CTL_PROTO(opt_quarantine)
CTL_PROTO(opt_redzone)
CTL_PROTO(opt_utrace)
-CTL_PROTO(opt_valgrind)
CTL_PROTO(opt_xmalloc)
CTL_PROTO(opt_tcache)
CTL_PROTO(opt_lg_tcache_max)
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
+CTL_PROTO(opt_prof_thread_active_init)
CTL_PROTO(opt_lg_prof_sample)
CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
+CTL_PROTO(tcache_create)
+CTL_PROTO(tcache_flush)
+CTL_PROTO(tcache_destroy)
CTL_PROTO(arena_i_purge)
static void arena_purge(unsigned arena_ind);
CTL_PROTO(arena_i_dss)
+CTL_PROTO(arena_i_lg_dirty_mult)
+CTL_PROTO(arena_i_chunk_hooks)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
@@ -122,25 +126,26 @@ CTL_PROTO(arenas_bin_i_run_size)
INDEX_PROTO(arenas_bin_i)
CTL_PROTO(arenas_lrun_i_size)
INDEX_PROTO(arenas_lrun_i)
+CTL_PROTO(arenas_hchunk_i_size)
+INDEX_PROTO(arenas_hchunk_i)
CTL_PROTO(arenas_narenas)
CTL_PROTO(arenas_initialized)
+CTL_PROTO(arenas_lg_dirty_mult)
CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max)
CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins)
CTL_PROTO(arenas_nlruns)
-CTL_PROTO(arenas_purge)
+CTL_PROTO(arenas_nhchunks)
CTL_PROTO(arenas_extend)
+CTL_PROTO(prof_thread_active_init)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
+CTL_PROTO(prof_gdump)
+CTL_PROTO(prof_reset)
CTL_PROTO(prof_interval)
-CTL_PROTO(stats_chunks_current)
-CTL_PROTO(stats_chunks_total)
-CTL_PROTO(stats_chunks_high)
-CTL_PROTO(stats_huge_allocated)
-CTL_PROTO(stats_huge_nmalloc)
-CTL_PROTO(stats_huge_ndalloc)
+CTL_PROTO(lg_prof_sample)
CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc)
@@ -149,10 +154,14 @@ CTL_PROTO(stats_arenas_i_large_allocated)
CTL_PROTO(stats_arenas_i_large_nmalloc)
CTL_PROTO(stats_arenas_i_large_ndalloc)
CTL_PROTO(stats_arenas_i_large_nrequests)
-CTL_PROTO(stats_arenas_i_bins_j_allocated)
+CTL_PROTO(stats_arenas_i_huge_allocated)
+CTL_PROTO(stats_arenas_i_huge_nmalloc)
+CTL_PROTO(stats_arenas_i_huge_ndalloc)
+CTL_PROTO(stats_arenas_i_huge_nrequests)
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
CTL_PROTO(stats_arenas_i_bins_j_nrequests)
+CTL_PROTO(stats_arenas_i_bins_j_curregs)
CTL_PROTO(stats_arenas_i_bins_j_nfills)
CTL_PROTO(stats_arenas_i_bins_j_nflushes)
CTL_PROTO(stats_arenas_i_bins_j_nruns)
@@ -164,18 +173,28 @@ CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
CTL_PROTO(stats_arenas_i_lruns_j_curruns)
INDEX_PROTO(stats_arenas_i_lruns_j)
+CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
+CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
+CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
+CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks)
+INDEX_PROTO(stats_arenas_i_hchunks_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_dss)
+CTL_PROTO(stats_arenas_i_lg_dirty_mult)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged)
+CTL_PROTO(stats_arenas_i_metadata_mapped)
+CTL_PROTO(stats_arenas_i_metadata_allocated)
INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
+CTL_PROTO(stats_metadata)
+CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
/******************************************************************************/
@@ -197,71 +216,84 @@ CTL_PROTO(stats_mapped)
*/
#define INDEX(i) {false}, i##_index
-static const ctl_named_node_t tcache_node[] = {
+static const ctl_named_node_t thread_tcache_node[] = {
{NAME("enabled"), CTL(thread_tcache_enabled)},
{NAME("flush"), CTL(thread_tcache_flush)}
};
+static const ctl_named_node_t thread_prof_node[] = {
+ {NAME("name"), CTL(thread_prof_name)},
+ {NAME("active"), CTL(thread_prof_active)}
+};
+
static const ctl_named_node_t thread_node[] = {
{NAME("arena"), CTL(thread_arena)},
{NAME("allocated"), CTL(thread_allocated)},
{NAME("allocatedp"), CTL(thread_allocatedp)},
{NAME("deallocated"), CTL(thread_deallocated)},
{NAME("deallocatedp"), CTL(thread_deallocatedp)},
- {NAME("tcache"), CHILD(named, tcache)}
+ {NAME("tcache"), CHILD(named, thread_tcache)},
+ {NAME("prof"), CHILD(named, thread_prof)}
};
static const ctl_named_node_t config_node[] = {
- {NAME("debug"), CTL(config_debug)},
- {NAME("dss"), CTL(config_dss)},
- {NAME("fill"), CTL(config_fill)},
- {NAME("lazy_lock"), CTL(config_lazy_lock)},
- {NAME("mremap"), CTL(config_mremap)},
- {NAME("munmap"), CTL(config_munmap)},
- {NAME("prof"), CTL(config_prof)},
- {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
- {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
- {NAME("stats"), CTL(config_stats)},
- {NAME("tcache"), CTL(config_tcache)},
- {NAME("tls"), CTL(config_tls)},
- {NAME("utrace"), CTL(config_utrace)},
- {NAME("valgrind"), CTL(config_valgrind)},
- {NAME("xmalloc"), CTL(config_xmalloc)}
+ {NAME("cache_oblivious"), CTL(config_cache_oblivious)},
+ {NAME("debug"), CTL(config_debug)},
+ {NAME("fill"), CTL(config_fill)},
+ {NAME("lazy_lock"), CTL(config_lazy_lock)},
+ {NAME("munmap"), CTL(config_munmap)},
+ {NAME("prof"), CTL(config_prof)},
+ {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
+ {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
+ {NAME("stats"), CTL(config_stats)},
+ {NAME("tcache"), CTL(config_tcache)},
+ {NAME("tls"), CTL(config_tls)},
+ {NAME("utrace"), CTL(config_utrace)},
+ {NAME("valgrind"), CTL(config_valgrind)},
+ {NAME("xmalloc"), CTL(config_xmalloc)}
};
static const ctl_named_node_t opt_node[] = {
- {NAME("abort"), CTL(opt_abort)},
- {NAME("dss"), CTL(opt_dss)},
- {NAME("lg_chunk"), CTL(opt_lg_chunk)},
- {NAME("narenas"), CTL(opt_narenas)},
- {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
- {NAME("stats_print"), CTL(opt_stats_print)},
- {NAME("junk"), CTL(opt_junk)},
- {NAME("zero"), CTL(opt_zero)},
- {NAME("quarantine"), CTL(opt_quarantine)},
- {NAME("redzone"), CTL(opt_redzone)},
- {NAME("utrace"), CTL(opt_utrace)},
- {NAME("valgrind"), CTL(opt_valgrind)},
- {NAME("xmalloc"), CTL(opt_xmalloc)},
- {NAME("tcache"), CTL(opt_tcache)},
- {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
- {NAME("prof"), CTL(opt_prof)},
- {NAME("prof_prefix"), CTL(opt_prof_prefix)},
- {NAME("prof_active"), CTL(opt_prof_active)},
- {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
- {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
- {NAME("prof_gdump"), CTL(opt_prof_gdump)},
- {NAME("prof_final"), CTL(opt_prof_final)},
- {NAME("prof_leak"), CTL(opt_prof_leak)},
- {NAME("prof_accum"), CTL(opt_prof_accum)}
+ {NAME("abort"), CTL(opt_abort)},
+ {NAME("dss"), CTL(opt_dss)},
+ {NAME("lg_chunk"), CTL(opt_lg_chunk)},
+ {NAME("narenas"), CTL(opt_narenas)},
+ {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
+ {NAME("stats_print"), CTL(opt_stats_print)},
+ {NAME("junk"), CTL(opt_junk)},
+ {NAME("zero"), CTL(opt_zero)},
+ {NAME("quarantine"), CTL(opt_quarantine)},
+ {NAME("redzone"), CTL(opt_redzone)},
+ {NAME("utrace"), CTL(opt_utrace)},
+ {NAME("xmalloc"), CTL(opt_xmalloc)},
+ {NAME("tcache"), CTL(opt_tcache)},
+ {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
+ {NAME("prof"), CTL(opt_prof)},
+ {NAME("prof_prefix"), CTL(opt_prof_prefix)},
+ {NAME("prof_active"), CTL(opt_prof_active)},
+ {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
+ {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
+ {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
+ {NAME("prof_gdump"), CTL(opt_prof_gdump)},
+ {NAME("prof_final"), CTL(opt_prof_final)},
+ {NAME("prof_leak"), CTL(opt_prof_leak)},
+ {NAME("prof_accum"), CTL(opt_prof_accum)}
+};
+
+static const ctl_named_node_t tcache_node[] = {
+ {NAME("create"), CTL(tcache_create)},
+ {NAME("flush"), CTL(tcache_flush)},
+ {NAME("destroy"), CTL(tcache_destroy)}
};
static const ctl_named_node_t arena_i_node[] = {
- {NAME("purge"), CTL(arena_i_purge)},
- {NAME("dss"), CTL(arena_i_dss)}
+ {NAME("purge"), CTL(arena_i_purge)},
+ {NAME("dss"), CTL(arena_i_dss)},
+ {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
+ {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)}
};
static const ctl_named_node_t super_arena_i_node[] = {
- {NAME(""), CHILD(named, arena_i)}
+ {NAME(""), CHILD(named, arena_i)}
};
static const ctl_indexed_node_t arena_node[] = {
@@ -269,12 +301,12 @@ static const ctl_indexed_node_t arena_node[] = {
};
static const ctl_named_node_t arenas_bin_i_node[] = {
- {NAME("size"), CTL(arenas_bin_i_size)},
- {NAME("nregs"), CTL(arenas_bin_i_nregs)},
- {NAME("run_size"), CTL(arenas_bin_i_run_size)}
+ {NAME("size"), CTL(arenas_bin_i_size)},
+ {NAME("nregs"), CTL(arenas_bin_i_nregs)},
+ {NAME("run_size"), CTL(arenas_bin_i_run_size)}
};
static const ctl_named_node_t super_arenas_bin_i_node[] = {
- {NAME(""), CHILD(named, arenas_bin_i)}
+ {NAME(""), CHILD(named, arenas_bin_i)}
};
static const ctl_indexed_node_t arenas_bin_node[] = {
@@ -282,76 +314,93 @@ static const ctl_indexed_node_t arenas_bin_node[] = {
};
static const ctl_named_node_t arenas_lrun_i_node[] = {
- {NAME("size"), CTL(arenas_lrun_i_size)}
+ {NAME("size"), CTL(arenas_lrun_i_size)}
};
static const ctl_named_node_t super_arenas_lrun_i_node[] = {
- {NAME(""), CHILD(named, arenas_lrun_i)}
+ {NAME(""), CHILD(named, arenas_lrun_i)}
};
static const ctl_indexed_node_t arenas_lrun_node[] = {
{INDEX(arenas_lrun_i)}
};
+static const ctl_named_node_t arenas_hchunk_i_node[] = {
+ {NAME("size"), CTL(arenas_hchunk_i_size)}
+};
+static const ctl_named_node_t super_arenas_hchunk_i_node[] = {
+ {NAME(""), CHILD(named, arenas_hchunk_i)}
+};
+
+static const ctl_indexed_node_t arenas_hchunk_node[] = {
+ {INDEX(arenas_hchunk_i)}
+};
+
static const ctl_named_node_t arenas_node[] = {
- {NAME("narenas"), CTL(arenas_narenas)},
- {NAME("initialized"), CTL(arenas_initialized)},
- {NAME("quantum"), CTL(arenas_quantum)},
- {NAME("page"), CTL(arenas_page)},
- {NAME("tcache_max"), CTL(arenas_tcache_max)},
- {NAME("nbins"), CTL(arenas_nbins)},
- {NAME("nhbins"), CTL(arenas_nhbins)},
- {NAME("bin"), CHILD(indexed, arenas_bin)},
- {NAME("nlruns"), CTL(arenas_nlruns)},
- {NAME("lrun"), CHILD(indexed, arenas_lrun)},
- {NAME("purge"), CTL(arenas_purge)},
- {NAME("extend"), CTL(arenas_extend)}
+ {NAME("narenas"), CTL(arenas_narenas)},
+ {NAME("initialized"), CTL(arenas_initialized)},
+ {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)},
+ {NAME("quantum"), CTL(arenas_quantum)},
+ {NAME("page"), CTL(arenas_page)},
+ {NAME("tcache_max"), CTL(arenas_tcache_max)},
+ {NAME("nbins"), CTL(arenas_nbins)},
+ {NAME("nhbins"), CTL(arenas_nhbins)},
+ {NAME("bin"), CHILD(indexed, arenas_bin)},
+ {NAME("nlruns"), CTL(arenas_nlruns)},
+ {NAME("lrun"), CHILD(indexed, arenas_lrun)},
+ {NAME("nhchunks"), CTL(arenas_nhchunks)},
+ {NAME("hchunk"), CHILD(indexed, arenas_hchunk)},
+ {NAME("extend"), CTL(arenas_extend)}
};
static const ctl_named_node_t prof_node[] = {
+ {NAME("thread_active_init"), CTL(prof_thread_active_init)},
{NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)},
- {NAME("interval"), CTL(prof_interval)}
+ {NAME("gdump"), CTL(prof_gdump)},
+ {NAME("reset"), CTL(prof_reset)},
+ {NAME("interval"), CTL(prof_interval)},
+ {NAME("lg_sample"), CTL(lg_prof_sample)}
};
-static const ctl_named_node_t stats_chunks_node[] = {
- {NAME("current"), CTL(stats_chunks_current)},
- {NAME("total"), CTL(stats_chunks_total)},
- {NAME("high"), CTL(stats_chunks_high)}
-};
-
-static const ctl_named_node_t stats_huge_node[] = {
- {NAME("allocated"), CTL(stats_huge_allocated)},
- {NAME("nmalloc"), CTL(stats_huge_nmalloc)},
- {NAME("ndalloc"), CTL(stats_huge_ndalloc)}
+static const ctl_named_node_t stats_arenas_i_metadata_node[] = {
+ {NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)},
+ {NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)}
};
static const ctl_named_node_t stats_arenas_i_small_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
+ {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
};
static const ctl_named_node_t stats_arenas_i_large_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
+ {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
+};
+
+static const ctl_named_node_t stats_arenas_i_huge_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)}
};
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
- {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
- {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
- {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
- {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
- {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
+ {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
+ {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
+ {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
+ {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
+ {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
+ {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
+ {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
};
static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
+ {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
};
static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
@@ -359,35 +408,53 @@ static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
};
static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
- {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
- {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
+ {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
+ {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
};
static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
+ {NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
};
static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
{INDEX(stats_arenas_i_lruns_j)}
};
+static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = {
+ {NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)},
+ {NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)}
+};
+static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i_hchunks_j)}
+};
+
+static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = {
+ {INDEX(stats_arenas_i_hchunks_j)}
+};
+
static const ctl_named_node_t stats_arenas_i_node[] = {
- {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
- {NAME("dss"), CTL(stats_arenas_i_dss)},
- {NAME("pactive"), CTL(stats_arenas_i_pactive)},
- {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
- {NAME("mapped"), CTL(stats_arenas_i_mapped)},
- {NAME("npurge"), CTL(stats_arenas_i_npurge)},
- {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
- {NAME("purged"), CTL(stats_arenas_i_purged)},
- {NAME("small"), CHILD(named, stats_arenas_i_small)},
- {NAME("large"), CHILD(named, stats_arenas_i_large)},
- {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
- {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}
+ {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
+ {NAME("dss"), CTL(stats_arenas_i_dss)},
+ {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
+ {NAME("pactive"), CTL(stats_arenas_i_pactive)},
+ {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
+ {NAME("mapped"), CTL(stats_arenas_i_mapped)},
+ {NAME("npurge"), CTL(stats_arenas_i_npurge)},
+ {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
+ {NAME("purged"), CTL(stats_arenas_i_purged)},
+ {NAME("metadata"), CHILD(named, stats_arenas_i_metadata)},
+ {NAME("small"), CHILD(named, stats_arenas_i_small)},
+ {NAME("large"), CHILD(named, stats_arenas_i_large)},
+ {NAME("huge"), CHILD(named, stats_arenas_i_huge)},
+ {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
+ {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)},
+ {NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)}
};
static const ctl_named_node_t super_stats_arenas_i_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i)}
+ {NAME(""), CHILD(named, stats_arenas_i)}
};
static const ctl_indexed_node_t stats_arenas_node[] = {
@@ -395,13 +462,13 @@ static const ctl_indexed_node_t stats_arenas_node[] = {
};
static const ctl_named_node_t stats_node[] = {
- {NAME("cactive"), CTL(stats_cactive)},
- {NAME("allocated"), CTL(stats_allocated)},
- {NAME("active"), CTL(stats_active)},
- {NAME("mapped"), CTL(stats_mapped)},
- {NAME("chunks"), CHILD(named, stats_chunks)},
- {NAME("huge"), CHILD(named, stats_huge)},
- {NAME("arenas"), CHILD(indexed, stats_arenas)}
+ {NAME("cactive"), CTL(stats_cactive)},
+ {NAME("allocated"), CTL(stats_allocated)},
+ {NAME("active"), CTL(stats_active)},
+ {NAME("metadata"), CTL(stats_metadata)},
+ {NAME("resident"), CTL(stats_resident)},
+ {NAME("mapped"), CTL(stats_mapped)},
+ {NAME("arenas"), CHILD(indexed, stats_arenas)}
};
static const ctl_named_node_t root_node[] = {
@@ -410,6 +477,7 @@ static const ctl_named_node_t root_node[] = {
{NAME("thread"), CHILD(named, thread)},
{NAME("config"), CHILD(named, config)},
{NAME("opt"), CHILD(named, opt)},
+ {NAME("tcache"), CHILD(named, tcache)},
{NAME("arena"), CHILD(indexed, arena)},
{NAME("arenas"), CHILD(named, arenas)},
{NAME("prof"), CHILD(named, prof)},
@@ -431,12 +499,19 @@ ctl_arena_init(ctl_arena_stats_t *astats)
{
if (astats->lstats == NULL) {
- astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
+ astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses *
sizeof(malloc_large_stats_t));
if (astats->lstats == NULL)
return (true);
}
+ if (astats->hstats == NULL) {
+ astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
+ sizeof(malloc_huge_stats_t));
+ if (astats->hstats == NULL)
+ return (true);
+ }
+
return (false);
}
@@ -445,6 +520,7 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
{
astats->dss = dss_prec_names[dss_prec_limit];
+ astats->lg_dirty_mult = -1;
astats->pactive = 0;
astats->pdirty = 0;
if (config_stats) {
@@ -456,6 +532,8 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
memset(astats->lstats, 0, nlclasses *
sizeof(malloc_large_stats_t));
+ memset(astats->hstats, 0, nhclasses *
+ sizeof(malloc_huge_stats_t));
}
}
@@ -464,11 +542,13 @@ ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{
unsigned i;
- arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
- &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats);
+ arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult,
+ &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats,
+ cstats->lstats, cstats->hstats);
for (i = 0; i < NBINS; i++) {
- cstats->allocated_small += cstats->bstats[i].allocated;
+ cstats->allocated_small += cstats->bstats[i].curregs *
+ index2size(i);
cstats->nmalloc_small += cstats->bstats[i].nmalloc;
cstats->ndalloc_small += cstats->bstats[i].ndalloc;
cstats->nrequests_small += cstats->bstats[i].nrequests;
@@ -488,6 +568,9 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->astats.nmadvise += astats->astats.nmadvise;
sstats->astats.purged += astats->astats.purged;
+ sstats->astats.metadata_mapped += astats->astats.metadata_mapped;
+ sstats->astats.metadata_allocated += astats->astats.metadata_allocated;
+
sstats->allocated_small += astats->allocated_small;
sstats->nmalloc_small += astats->nmalloc_small;
sstats->ndalloc_small += astats->ndalloc_small;
@@ -498,18 +581,15 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
sstats->astats.nrequests_large += astats->astats.nrequests_large;
- for (i = 0; i < nlclasses; i++) {
- sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
- sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
- sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
- sstats->lstats[i].curruns += astats->lstats[i].curruns;
- }
+ sstats->astats.allocated_huge += astats->astats.allocated_huge;
+ sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
+ sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
for (i = 0; i < NBINS; i++) {
- sstats->bstats[i].allocated += astats->bstats[i].allocated;
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
+ sstats->bstats[i].curregs += astats->bstats[i].curregs;
if (config_tcache) {
sstats->bstats[i].nfills += astats->bstats[i].nfills;
sstats->bstats[i].nflushes +=
@@ -519,6 +599,19 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->bstats[i].reruns += astats->bstats[i].reruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns;
}
+
+ for (i = 0; i < nlclasses; i++) {
+ sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
+ sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
+ sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
+ sstats->lstats[i].curruns += astats->lstats[i].curruns;
+ }
+
+ for (i = 0; i < nhclasses; i++) {
+ sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
+ sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
+ sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks;
+ }
}
static void
@@ -547,27 +640,23 @@ static bool
ctl_grow(void)
{
ctl_arena_stats_t *astats;
- arena_t **tarenas;
- /* Allocate extended arena stats and arenas arrays. */
- astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) *
+ /* Initialize new arena. */
+ if (arena_init(ctl_stats.narenas) == NULL)
+ return (true);
+
+ /* Allocate extended arena stats. */
+ astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) *
sizeof(ctl_arena_stats_t));
if (astats == NULL)
return (true);
- tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
- sizeof(arena_t *));
- if (tarenas == NULL) {
- idalloc(astats);
- return (true);
- }
/* Initialize the new astats element. */
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
- idalloc(tarenas);
- idalloc(astats);
+ a0dalloc(astats);
return (true);
}
/* Swap merged stats to their new location. */
@@ -580,32 +669,7 @@ ctl_grow(void)
memcpy(&astats[ctl_stats.narenas + 1], &tstats,
sizeof(ctl_arena_stats_t));
}
- /* Initialize the new arenas element. */
- tarenas[ctl_stats.narenas] = NULL;
- {
- arena_t **arenas_old = arenas;
- /*
- * Swap extended arenas array into place. Although ctl_mtx
- * protects this function from other threads extending the
- * array, it does not protect from other threads mutating it
- * (i.e. initializing arenas and setting array elements to
- * point to them). Therefore, array copying must happen under
- * the protection of arenas_lock.
- */
- malloc_mutex_lock(&arenas_lock);
- arenas = tarenas;
- memcpy(arenas, arenas_old, ctl_stats.narenas *
- sizeof(arena_t *));
- narenas_total++;
- arenas_extend(narenas_total - 1);
- malloc_mutex_unlock(&arenas_lock);
- /*
- * Deallocate arenas_old only if it came from imalloc() (not
- * base_alloc()).
- */
- if (ctl_stats.narenas != narenas_auto)
- idalloc(arenas_old);
- }
+ a0dalloc(ctl_stats.arenas);
ctl_stats.arenas = astats;
ctl_stats.narenas++;
@@ -615,23 +679,11 @@ ctl_grow(void)
static void
ctl_refresh(void)
{
+ tsd_t *tsd;
unsigned i;
+ bool refreshed;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
- if (config_stats) {
- malloc_mutex_lock(&chunks_mtx);
- ctl_stats.chunks.current = stats_chunks.curchunks;
- ctl_stats.chunks.total = stats_chunks.nchunks;
- ctl_stats.chunks.high = stats_chunks.highchunks;
- malloc_mutex_unlock(&chunks_mtx);
-
- malloc_mutex_lock(&huge_mtx);
- ctl_stats.huge.allocated = huge_allocated;
- ctl_stats.huge.nmalloc = huge_nmalloc;
- ctl_stats.huge.ndalloc = huge_ndalloc;
- malloc_mutex_unlock(&huge_mtx);
- }
-
/*
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
@@ -639,15 +691,22 @@ ctl_refresh(void)
ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
- malloc_mutex_lock(&arenas_lock);
- memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
+ tsd = tsd_fetch();
+ for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
+ tarenas[i] = arena_get(tsd, i, false, false);
+ if (tarenas[i] == NULL && !refreshed) {
+ tarenas[i] = arena_get(tsd, i, false, true);
+ refreshed = true;
+ }
+ }
+
for (i = 0; i < ctl_stats.narenas; i++) {
- if (arenas[i] != NULL)
- ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
+ if (tarenas[i] != NULL)
+ ctl_stats.arenas[i].nthreads = arena_nbound(i);
else
ctl_stats.arenas[i].nthreads = 0;
}
- malloc_mutex_unlock(&arenas_lock);
+
for (i = 0; i < ctl_stats.narenas; i++) {
bool initialized = (tarenas[i] != NULL);
@@ -657,14 +716,24 @@ ctl_refresh(void)
}
if (config_stats) {
+ size_t base_allocated, base_resident, base_mapped;
+ base_stats_get(&base_allocated, &base_resident, &base_mapped);
ctl_stats.allocated =
- ctl_stats.arenas[ctl_stats.narenas].allocated_small
- + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
- + ctl_stats.huge.allocated;
+ ctl_stats.arenas[ctl_stats.narenas].allocated_small +
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
ctl_stats.active =
- (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
- + ctl_stats.huge.allocated;
- ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
+ (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
+ ctl_stats.metadata = base_allocated +
+ ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
+ ctl_stats.arenas[ctl_stats.narenas].astats
+ .metadata_allocated;
+ ctl_stats.resident = base_resident +
+ ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
+ ((ctl_stats.arenas[ctl_stats.narenas].pactive +
+ ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
+ ctl_stats.mapped = base_mapped +
+ ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
}
ctl_epoch++;
@@ -676,14 +745,13 @@ ctl_init(void)
bool ret;
malloc_mutex_lock(&ctl_mtx);
- if (ctl_initialized == false) {
+ if (!ctl_initialized) {
/*
* Allocate space for one extra arena stats element, which
* contains summed stats across all arenas.
*/
- assert(narenas_auto == narenas_total_get());
- ctl_stats.narenas = narenas_auto;
- ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
+ ctl_stats.narenas = narenas_total_get();
+ ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc(
(ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
if (ctl_stats.arenas == NULL) {
ret = true;
@@ -701,6 +769,15 @@ ctl_init(void)
unsigned i;
for (i = 0; i <= ctl_stats.narenas; i++) {
if (ctl_arena_init(&ctl_stats.arenas[i])) {
+ unsigned j;
+ for (j = 0; j < i; j++) {
+ a0dalloc(
+ ctl_stats.arenas[j].lstats);
+ a0dalloc(
+ ctl_stats.arenas[j].hstats);
+ }
+ a0dalloc(ctl_stats.arenas);
+ ctl_stats.arenas = NULL;
ret = true;
goto label_return;
}
@@ -826,7 +903,7 @@ ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node;
- if (ctl_initialized == false && ctl_init()) {
+ if (!ctl_initialized && ctl_init()) {
ret = EAGAIN;
goto label_return;
}
@@ -853,7 +930,7 @@ ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
{
int ret;
- if (ctl_initialized == false && ctl_init()) {
+ if (!ctl_initialized && ctl_init()) {
ret = EAGAIN;
goto label_return;
}
@@ -871,7 +948,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
const ctl_named_node_t *node;
size_t i;
- if (ctl_initialized == false && ctl_init()) {
+ if (!ctl_initialized && ctl_init()) {
ret = EAGAIN;
goto label_return;
}
@@ -963,6 +1040,14 @@ ctl_postfork_child(void)
} \
} while (0)
+#define READ_XOR_WRITE() do { \
+ if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
+ newlen != 0)) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
@@ -971,8 +1056,8 @@ ctl_postfork_child(void)
memcpy(oldp, (void *)&(v), copylen); \
ret = EINVAL; \
goto label_return; \
- } else \
- *(t *)oldp = (v); \
+ } \
+ *(t *)oldp = (v); \
} \
} while (0)
@@ -998,7 +1083,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
int ret; \
t oldval; \
\
- if ((c) == false) \
+ if (!(c)) \
return (ENOENT); \
if (l) \
malloc_mutex_lock(&ctl_mtx); \
@@ -1021,7 +1106,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
int ret; \
t oldval; \
\
- if ((c) == false) \
+ if (!(c)) \
return (ENOENT); \
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
@@ -1065,7 +1150,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
int ret; \
t oldval; \
\
- if ((c) == false) \
+ if (!(c)) \
return (ENOENT); \
READONLY(); \
oldval = (v); \
@@ -1093,6 +1178,27 @@ label_return: \
return (ret); \
}
+#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
+static int \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ tsd_t *tsd; \
+ \
+ if (!(c)) \
+ return (ENOENT); \
+ READONLY(); \
+ tsd = tsd_fetch(); \
+ oldval = (m(tsd)); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ return (ret); \
+}
+
#define CTL_RO_BOOL_CONFIG_GEN(n) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
@@ -1135,11 +1241,10 @@ label_return:
/******************************************************************************/
+CTL_RO_BOOL_CONFIG_GEN(config_cache_oblivious)
CTL_RO_BOOL_CONFIG_GEN(config_debug)
-CTL_RO_BOOL_CONFIG_GEN(config_dss)
CTL_RO_BOOL_CONFIG_GEN(config_fill)
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
-CTL_RO_BOOL_CONFIG_GEN(config_mremap)
CTL_RO_BOOL_CONFIG_GEN(config_munmap)
CTL_RO_BOOL_CONFIG_GEN(config_prof)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
@@ -1159,18 +1264,19 @@ CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
-CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
+CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
-CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
-CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
+CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
+ opt_prof_thread_active_init, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
@@ -1185,14 +1291,21 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
+ tsd_t *tsd;
+ arena_t *oldarena;
unsigned newind, oldind;
+ tsd = tsd_fetch();
+ oldarena = arena_choose(tsd, NULL);
+ if (oldarena == NULL)
+ return (EAGAIN);
+
malloc_mutex_lock(&ctl_mtx);
- newind = oldind = choose_arena(NULL)->ind;
+ newind = oldind = oldarena->ind;
WRITE(newind, unsigned);
READ(oldind, unsigned);
if (newind != oldind) {
- arena_t *arena;
+ arena_t *newarena;
if (newind >= ctl_stats.narenas) {
/* New arena index is out of range. */
@@ -1201,28 +1314,20 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
}
/* Initialize arena if necessary. */
- malloc_mutex_lock(&arenas_lock);
- if ((arena = arenas[newind]) == NULL && (arena =
- arenas_extend(newind)) == NULL) {
- malloc_mutex_unlock(&arenas_lock);
+ newarena = arena_get(tsd, newind, true, true);
+ if (newarena == NULL) {
ret = EAGAIN;
goto label_return;
}
- assert(arena == arenas[newind]);
- arenas[oldind]->nthreads--;
- arenas[newind]->nthreads++;
- malloc_mutex_unlock(&arenas_lock);
-
- /* Set new arena association. */
+ /* Set new arena/tcache associations. */
+ arena_migrate(tsd, oldind, newind);
if (config_tcache) {
- tcache_t *tcache;
- if ((uintptr_t)(tcache = *tcache_tsd_get()) >
- (uintptr_t)TCACHE_STATE_MAX) {
- tcache_arena_dissociate(tcache);
- tcache_arena_associate(tcache, arena);
+ tcache_t *tcache = tsd_tcache_get(tsd);
+ if (tcache != NULL) {
+ tcache_arena_reassociate(tcache, oldarena,
+ newarena);
}
}
- arenas_tsd_set(&arena);
}
ret = 0;
@@ -1231,14 +1336,14 @@ label_return:
return (ret);
}
-CTL_RO_NL_CGEN(config_stats, thread_allocated,
- thread_allocated_tsd_get()->allocated, uint64_t)
-CTL_RO_NL_CGEN(config_stats, thread_allocatedp,
- &thread_allocated_tsd_get()->allocated, uint64_t *)
-CTL_RO_NL_CGEN(config_stats, thread_deallocated,
- thread_allocated_tsd_get()->deallocated, uint64_t)
-CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
- &thread_allocated_tsd_get()->deallocated, uint64_t *)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
+ uint64_t)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
+ uint64_t *)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
+ uint64_t)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
+ tsd_thread_deallocatedp_get, uint64_t *)
static int
thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
@@ -1247,7 +1352,7 @@ thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
int ret;
bool oldval;
- if (config_tcache == false)
+ if (!config_tcache)
return (ENOENT);
oldval = tcache_enabled_get();
@@ -1271,7 +1376,7 @@ thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
{
int ret;
- if (config_tcache == false)
+ if (!config_tcache)
return (ENOENT);
READONLY();
@@ -1284,17 +1389,170 @@ label_return:
return (ret);
}
+static int
+thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ READ_XOR_WRITE();
+
+ if (newp != NULL) {
+ tsd_t *tsd;
+
+ if (newlen != sizeof(const char *)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ tsd = tsd_fetch();
+
+ if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
+ 0)
+ goto label_return;
+ } else {
+ const char *oldname = prof_thread_name_get();
+ READ(oldname, const char *);
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ oldval = prof_thread_active_get();
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (prof_thread_active_set(*(bool *)newp)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+ }
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+/******************************************************************************/
+
+static int
+tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ tsd_t *tsd;
+ unsigned tcache_ind;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ tsd = tsd_fetch();
+
+ malloc_mutex_lock(&ctl_mtx);
+ READONLY();
+ if (tcaches_create(tsd, &tcache_ind)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ READ(tcache_ind, unsigned);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+static int
+tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ tsd_t *tsd;
+ unsigned tcache_ind;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ tsd = tsd_fetch();
+
+ WRITEONLY();
+ tcache_ind = UINT_MAX;
+ WRITE(tcache_ind, unsigned);
+ if (tcache_ind == UINT_MAX) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ tcaches_flush(tsd, tcache_ind);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ tsd_t *tsd;
+ unsigned tcache_ind;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ tsd = tsd_fetch();
+
+ WRITEONLY();
+ tcache_ind = UINT_MAX;
+ WRITE(tcache_ind, unsigned);
+ if (tcache_ind == UINT_MAX) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ tcaches_destroy(tsd, tcache_ind);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
/******************************************************************************/
/* ctl_mutex must be held during execution of this function. */
static void
arena_purge(unsigned arena_ind)
{
+ tsd_t *tsd;
+ unsigned i;
+ bool refreshed;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
- malloc_mutex_lock(&arenas_lock);
- memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
- malloc_mutex_unlock(&arenas_lock);
+ tsd = tsd_fetch();
+ for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
+ tarenas[i] = arena_get(tsd, i, false, false);
+ if (tarenas[i] == NULL && !refreshed) {
+ tarenas[i] = arena_get(tsd, i, false, true);
+ refreshed = true;
+ }
+ }
if (arena_ind == ctl_stats.narenas) {
unsigned i;
@@ -1330,47 +1588,117 @@ static int
arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
- int ret, i;
- bool match, err;
- const char *dss;
+ int ret;
+ const char *dss = NULL;
unsigned arena_ind = mib[1];
dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit;
malloc_mutex_lock(&ctl_mtx);
WRITE(dss, const char *);
- match = false;
- for (i = 0; i < dss_prec_limit; i++) {
- if (strcmp(dss_prec_names[i], dss) == 0) {
- dss_prec = i;
- match = true;
- break;
+ if (dss != NULL) {
+ int i;
+ bool match = false;
+
+ for (i = 0; i < dss_prec_limit; i++) {
+ if (strcmp(dss_prec_names[i], dss) == 0) {
+ dss_prec = i;
+ match = true;
+ break;
+ }
+ }
+
+ if (!match) {
+ ret = EINVAL;
+ goto label_return;
}
- }
- if (match == false) {
- ret = EINVAL;
- goto label_return;
}
if (arena_ind < ctl_stats.narenas) {
- arena_t *arena = arenas[arena_ind];
- if (arena != NULL) {
- dss_prec_old = arena_dss_prec_get(arena);
- arena_dss_prec_set(arena, dss_prec);
- err = false;
- } else
- err = true;
+ arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true);
+ if (arena == NULL || (dss_prec != dss_prec_limit &&
+ arena_dss_prec_set(arena, dss_prec))) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ dss_prec_old = arena_dss_prec_get(arena);
} else {
+ if (dss_prec != dss_prec_limit &&
+ chunk_dss_prec_set(dss_prec)) {
+ ret = EFAULT;
+ goto label_return;
+ }
dss_prec_old = chunk_dss_prec_get();
- err = chunk_dss_prec_set(dss_prec);
}
+
dss = dss_prec_names[dss_prec_old];
READ(dss, const char *);
- if (err) {
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+static int
+arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind = mib[1];
+ arena_t *arena;
+
+ arena = arena_get(tsd_fetch(), arena_ind, false, true);
+ if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = arena_lg_dirty_mult_get(arena);
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind = mib[1];
+ arena_t *arena;
+
+ malloc_mutex_lock(&ctl_mtx);
+ if (arena_ind < narenas_total_get() && (arena =
+ arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) {
+ if (newp != NULL) {
+ chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
+ WRITE(new_chunk_hooks, chunk_hooks_t);
+ old_chunk_hooks = chunk_hooks_set(arena,
+ &new_chunk_hooks);
+ READ(old_chunk_hooks, chunk_hooks_t);
+ } else {
+ chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena);
+ READ(old_chunk_hooks, chunk_hooks_t);
+ }
+ } else {
+ ret = EFAULT;
+ goto label_return;
+ }
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
@@ -1444,6 +1772,32 @@ label_return:
return (ret);
}
+static int
+arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = arena_lg_dirty_mult_default_get();
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
@@ -1461,8 +1815,8 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
return (super_arenas_bin_i_node);
}
-CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
-CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
+CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
+CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t)
static const ctl_named_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
{
@@ -1472,29 +1826,15 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
return (super_arenas_lrun_i_node);
}
-static int
-arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
+CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t)
+static const ctl_named_node_t *
+arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
{
- int ret;
- unsigned arena_ind;
-
- malloc_mutex_lock(&ctl_mtx);
- WRITEONLY();
- arena_ind = UINT_MAX;
- WRITE(arena_ind, unsigned);
- if (newp != NULL && arena_ind >= ctl_stats.narenas)
- ret = EFAULT;
- else {
- if (arena_ind == UINT_MAX)
- arena_ind = ctl_stats.narenas;
- arena_purge(arena_ind);
- ret = 0;
- }
-label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
+ if (i > nhclasses)
+ return (NULL);
+ return (super_arenas_hchunk_i_node);
}
static int
@@ -1522,31 +1862,52 @@ label_return:
/******************************************************************************/
static int
+prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_thread_active_init_set(*(bool *)newp);
+ } else
+ oldval = prof_thread_active_init_get();
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
bool oldval;
- if (config_prof == false)
+ if (!config_prof)
return (ENOENT);
- malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
- oldval = opt_prof_active;
if (newp != NULL) {
- /*
- * The memory barriers will tend to make opt_prof_active
- * propagate faster on systems with weak memory ordering.
- */
- mb_write();
- WRITE(opt_prof_active, bool);
- mb_write();
- }
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_active_set(*(bool *)newp);
+ } else
+ oldval = prof_active_get();
READ(oldval, bool);
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
@@ -1557,7 +1918,7 @@ prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ret;
const char *filename = NULL;
- if (config_prof == false)
+ if (!config_prof)
return (ENOENT);
WRITEONLY();
@@ -1573,24 +1934,71 @@ label_return:
return (ret);
}
+static int
+prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_gdump_set(*(bool *)newp);
+ } else
+ oldval = prof_gdump_get();
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ size_t lg_sample = lg_prof_sample;
+ tsd_t *tsd;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ WRITEONLY();
+ WRITE(lg_sample, size_t);
+ if (lg_sample >= (sizeof(uint64_t) << 3))
+ lg_sample = (sizeof(uint64_t) << 3) - 1;
+
+ tsd = tsd_fetch();
+
+ prof_reset(tsd, lg_sample);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
+CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
/******************************************************************************/
CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
+CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
+CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
- size_t)
-CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
-CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
-CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
-CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
-
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
+CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
+ ssize_t)
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
@@ -1602,6 +2010,10 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped,
+ ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated,
+ ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
ctl_stats.arenas[mib[2]].allocated_small, size_t)
@@ -1619,15 +2031,23 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
+ ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
+ ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
+ ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
+ ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t)
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
@@ -1666,13 +2086,32 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
return (super_stats_arenas_i_lruns_j_node);
}
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */
+ uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
+
+static const ctl_named_node_t *
+stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j)
+{
+
+ if (j > nhclasses)
+ return (NULL);
+ return (super_stats_arenas_i_hchunks_j_node);
+}
+
static const ctl_named_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t * ret;
malloc_mutex_lock(&ctl_mtx);
- if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) {
+ if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
ret = NULL;
goto label_return;
}
diff --git a/contrib/jemalloc/src/extent.c b/contrib/jemalloc/src/extent.c
index 8c09b48..13f9441 100644
--- a/contrib/jemalloc/src/extent.c
+++ b/contrib/jemalloc/src/extent.c
@@ -3,17 +3,32 @@
/******************************************************************************/
-static inline int
+JEMALLOC_INLINE_C size_t
+extent_quantize(size_t size)
+{
+
+ /*
+ * Round down to the nearest chunk size that can actually be requested
+ * during normal huge allocation.
+ */
+ return (index2size(size2index(size + 1) - 1));
+}
+
+JEMALLOC_INLINE_C int
extent_szad_comp(extent_node_t *a, extent_node_t *b)
{
int ret;
- size_t a_size = a->size;
- size_t b_size = b->size;
-
- ret = (a_size > b_size) - (a_size < b_size);
+ size_t a_qsize = extent_quantize(extent_node_size_get(a));
+ size_t b_qsize = extent_quantize(extent_node_size_get(b));
+
+ /*
+ * Compare based on quantized size rather than size, in order to sort
+ * equally useful extents only by address.
+ */
+ ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
if (ret == 0) {
- uintptr_t a_addr = (uintptr_t)a->addr;
- uintptr_t b_addr = (uintptr_t)b->addr;
+ uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
+ uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
ret = (a_addr > b_addr) - (a_addr < b_addr);
}
@@ -22,18 +37,17 @@ extent_szad_comp(extent_node_t *a, extent_node_t *b)
}
/* Generate red-black tree functions. */
-rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
+rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
extent_szad_comp)
-static inline int
+JEMALLOC_INLINE_C int
extent_ad_comp(extent_node_t *a, extent_node_t *b)
{
- uintptr_t a_addr = (uintptr_t)a->addr;
- uintptr_t b_addr = (uintptr_t)b->addr;
+ uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
+ uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
return ((a_addr > b_addr) - (a_addr < b_addr));
}
/* Generate red-black tree functions. */
-rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
- extent_ad_comp)
+rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
diff --git a/contrib/jemalloc/src/huge.c b/contrib/jemalloc/src/huge.c
index d72f213..54c2114 100644
--- a/contrib/jemalloc/src/huge.c
+++ b/contrib/jemalloc/src/huge.c
@@ -2,44 +2,68 @@
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
-/* Data. */
-uint64_t huge_nmalloc;
-uint64_t huge_ndalloc;
-size_t huge_allocated;
+static extent_node_t *
+huge_node_get(const void *ptr)
+{
+ extent_node_t *node;
-malloc_mutex_t huge_mtx;
+ node = chunk_lookup(ptr, true);
+ assert(!extent_node_achunk_get(node));
-/******************************************************************************/
+ return (node);
+}
-/* Tree of chunks that are stand-alone huge allocations. */
-static extent_tree_t huge;
+static bool
+huge_node_set(const void *ptr, extent_node_t *node)
+{
+
+ assert(extent_node_addr_get(node) == ptr);
+ assert(!extent_node_achunk_get(node));
+ return (chunk_register(ptr, node));
+}
+
+static void
+huge_node_unset(const void *ptr, const extent_node_t *node)
+{
+
+ chunk_deregister(ptr, node);
+}
void *
-huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
+huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+ tcache_t *tcache)
{
+ size_t usize;
- return (huge_palloc(size, chunksize, zero, dss_prec));
+ usize = s2u(size);
+ if (usize == 0) {
+ /* size_t overflow. */
+ return (NULL);
+ }
+
+ return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
}
void *
-huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
+huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
+ bool zero, tcache_t *tcache)
{
void *ret;
- size_t csize;
+ size_t usize;
extent_node_t *node;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
- csize = CHUNK_CEILING(size);
- if (csize == 0) {
- /* size is large enough to cause size_t wrap-around. */
+ usize = sa2u(size, alignment);
+ if (unlikely(usize == 0))
return (NULL);
- }
+ assert(usize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
- node = base_node_alloc();
+ node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
+ CACHELINE, false, tcache, true, arena);
if (node == NULL)
return (NULL);
@@ -48,145 +72,33 @@ huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
- ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
- if (ret == NULL) {
- base_node_dealloc(node);
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
+ size, alignment, &is_zeroed)) == NULL) {
+ idalloctm(tsd, node, tcache, true);
return (NULL);
}
- /* Insert node into huge. */
- node->addr = ret;
- node->size = csize;
-
- malloc_mutex_lock(&huge_mtx);
- extent_tree_ad_insert(&huge, node);
- if (config_stats) {
- stats_cactive_add(csize);
- huge_nmalloc++;
- huge_allocated += csize;
- }
- malloc_mutex_unlock(&huge_mtx);
-
- if (config_fill && zero == false) {
- if (opt_junk)
- memset(ret, 0xa5, csize);
- else if (opt_zero && is_zeroed == false)
- memset(ret, 0, csize);
- }
+ extent_node_init(node, arena, ret, size, is_zeroed, true);
- return (ret);
-}
-
-bool
-huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
-{
-
- /*
- * Avoid moving the allocation if the size class can be left the same.
- */
- if (oldsize > arena_maxclass
- && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
- && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
- assert(CHUNK_CEILING(oldsize) == oldsize);
- return (false);
- }
-
- /* Reallocation would require a move. */
- return (true);
-}
-
-void *
-huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
-{
- void *ret;
- size_t copysize;
-
- /* Try to avoid moving the allocation. */
- if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
- return (ptr);
-
- /*
- * size and oldsize are different enough that we need to use a
- * different size class. In that case, fall back to allocating new
- * space and copying.
- */
- if (alignment > chunksize)
- ret = huge_palloc(size + extra, alignment, zero, dss_prec);
- else
- ret = huge_malloc(size + extra, zero, dss_prec);
-
- if (ret == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, this time without extra. */
- if (alignment > chunksize)
- ret = huge_palloc(size, alignment, zero, dss_prec);
- else
- ret = huge_malloc(size, zero, dss_prec);
-
- if (ret == NULL)
- return (NULL);
+ if (huge_node_set(ret, node)) {
+ arena_chunk_dalloc_huge(arena, ret, size);
+ idalloctm(tsd, node, tcache, true);
+ return (NULL);
}
- /*
- * Copy at most size bytes (not size+extra), since the caller has no
- * expectation that the extra bytes will be reliably preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
+ /* Insert node into huge. */
+ malloc_mutex_lock(&arena->huge_mtx);
+ ql_elm_new(node, ql_link);
+ ql_tail_insert(&arena->huge, node, ql_link);
+ malloc_mutex_unlock(&arena->huge_mtx);
-#ifdef JEMALLOC_MREMAP
- /*
- * Use mremap(2) if this is a huge-->huge reallocation, and neither the
- * source nor the destination are in dss.
- */
- if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
- == false && chunk_in_dss(ret) == false))) {
- size_t newsize = huge_salloc(ret);
+ if (zero || (config_fill && unlikely(opt_zero))) {
+ if (!is_zeroed)
+ memset(ret, 0, size);
+ } else if (config_fill && unlikely(opt_junk_alloc))
+ memset(ret, 0xa5, size);
- /*
- * Remove ptr from the tree of huge allocations before
- * performing the remap operation, in order to avoid the
- * possibility of another thread acquiring that mapping before
- * this one removes it from the tree.
- */
- huge_dalloc(ptr, false);
- if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
- ret) == MAP_FAILED) {
- /*
- * Assuming no chunk management bugs in the allocator,
- * the only documented way an error can occur here is
- * if the application changed the map type for a
- * portion of the old allocation. This is firmly in
- * undefined behavior territory, so write a diagnostic
- * message, and optionally abort.
- */
- char buf[BUFERROR_BUF];
-
- buferror(get_errno(), buf, sizeof(buf));
- malloc_printf("<jemalloc>: Error in mremap(): %s\n",
- buf);
- if (opt_abort)
- abort();
- memcpy(ret, ptr, copysize);
- chunk_dealloc_mmap(ptr, oldsize);
- } else if (config_fill && zero == false && opt_junk && oldsize
- < newsize) {
- /*
- * mremap(2) clobbers the original mapping, so
- * junk/zero filling is not preserved. There is no
- * need to zero fill here, since any trailing
- * uninititialized memory is demand-zeroed by the
- * kernel, but junk filling must be redone.
- */
- memset(ret + oldsize, 0xa5, newsize - oldsize);
- }
- } else
-#endif
- {
- memcpy(ret, ptr, copysize);
- iqalloct(ptr, try_tcache_dalloc);
- }
return (ret);
}
@@ -198,12 +110,12 @@ static void
huge_dalloc_junk(void *ptr, size_t usize)
{
- if (config_fill && config_dss && opt_junk) {
+ if (config_fill && have_dss && unlikely(opt_junk_free)) {
/*
* Only bother junk filling if the chunk isn't about to be
* unmapped.
*/
- if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
+ if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
memset(ptr, 0x5a, usize);
}
}
@@ -213,135 +125,317 @@ huge_dalloc_junk(void *ptr, size_t usize)
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
-void
-huge_dalloc(void *ptr, bool unmap)
+static void
+huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
+ size_t size, size_t extra, bool zero)
{
- extent_node_t *node, key;
-
- malloc_mutex_lock(&huge_mtx);
-
- /* Extract from tree of huge allocations. */
- key.addr = ptr;
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
- assert(node->addr == ptr);
- extent_tree_ad_remove(&huge, node);
-
- if (config_stats) {
- stats_cactive_sub(node->size);
- huge_ndalloc++;
- huge_allocated -= node->size;
+ size_t usize_next;
+ extent_node_t *node;
+ arena_t *arena;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ bool zeroed;
+
+ /* Increase usize to incorporate extra. */
+ while (usize < s2u(size+extra) && (usize_next = s2u(usize+1)) < oldsize)
+ usize = usize_next;
+
+ if (oldsize == usize)
+ return;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+
+ /* Fill if necessary (shrinking). */
+ if (oldsize > usize) {
+ size_t sdiff = oldsize - usize;
+ zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr,
+ CHUNK_CEILING(usize), usize, sdiff);
+ if (config_fill && unlikely(opt_junk_free)) {
+ memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
+ zeroed = false;
+ }
+ } else
+ zeroed = true;
+
+ malloc_mutex_lock(&arena->huge_mtx);
+ /* Update the size of the huge allocation. */
+ assert(extent_node_size_get(node) != usize);
+ extent_node_size_set(node, usize);
+ /* Clear node's zeroed field if zeroing failed above. */
+ extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
+ malloc_mutex_unlock(&arena->huge_mtx);
+
+ arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
+
+ /* Fill if necessary (growing). */
+ if (oldsize < usize) {
+ if (zero || (config_fill && unlikely(opt_zero))) {
+ if (!zeroed) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ usize - oldsize);
+ }
+ } else if (config_fill && unlikely(opt_junk_alloc)) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
+ oldsize);
+ }
}
+}
- malloc_mutex_unlock(&huge_mtx);
+static bool
+huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
+{
+ extent_node_t *node;
+ arena_t *arena;
+ chunk_hooks_t chunk_hooks;
+ size_t cdiff;
+ bool zeroed;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ chunk_hooks = chunk_hooks_get(arena);
+
+ /* Split excess chunks. */
+ cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
+ if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
+ CHUNK_CEILING(usize), cdiff, true, arena->ind))
+ return (true);
+
+ if (oldsize > usize) {
+ size_t sdiff = oldsize - usize;
+ zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
+ CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
+ CHUNK_CEILING(usize), CHUNK_ADDR2OFFSET((uintptr_t)ptr +
+ usize), sdiff);
+ if (config_fill && unlikely(opt_junk_free)) {
+ huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
+ sdiff);
+ zeroed = false;
+ }
+ } else
+ zeroed = true;
- if (unmap)
- huge_dalloc_junk(node->addr, node->size);
+ malloc_mutex_lock(&arena->huge_mtx);
+ /* Update the size of the huge allocation. */
+ extent_node_size_set(node, usize);
+ /* Clear node's zeroed field if zeroing failed above. */
+ extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
+ malloc_mutex_unlock(&arena->huge_mtx);
- chunk_dealloc(node->addr, node->size, unmap);
+ /* Zap the excess chunks. */
+ arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
- base_node_dealloc(node);
+ return (false);
}
-size_t
-huge_salloc(const void *ptr)
-{
- size_t ret;
- extent_node_t *node, key;
+static bool
+huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
+ size_t usize;
+ extent_node_t *node;
+ arena_t *arena;
+ bool is_zeroed_subchunk, is_zeroed_chunk;
- malloc_mutex_lock(&huge_mtx);
+ usize = s2u(size);
+ if (usize == 0) {
+ /* size_t overflow. */
+ return (true);
+ }
- /* Extract from tree of huge allocations. */
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(&arena->huge_mtx);
+ is_zeroed_subchunk = extent_node_zeroed_get(node);
+ malloc_mutex_unlock(&arena->huge_mtx);
- ret = node->size;
+ /*
+ * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
+ * that it is possible to make correct junk/zero fill decisions below.
+ */
+ is_zeroed_chunk = zero;
- malloc_mutex_unlock(&huge_mtx);
+ if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
+ &is_zeroed_chunk))
+ return (true);
- return (ret);
-}
+ malloc_mutex_lock(&arena->huge_mtx);
+ /* Update the size of the huge allocation. */
+ extent_node_size_set(node, usize);
+ malloc_mutex_unlock(&arena->huge_mtx);
-dss_prec_t
-huge_dss_prec_get(arena_t *arena)
-{
+ if (zero || (config_fill && unlikely(opt_zero))) {
+ if (!is_zeroed_subchunk) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ CHUNK_CEILING(oldsize) - oldsize);
+ }
+ if (!is_zeroed_chunk) {
+ memset((void *)((uintptr_t)ptr +
+ CHUNK_CEILING(oldsize)), 0, usize -
+ CHUNK_CEILING(oldsize));
+ }
+ } else if (config_fill && unlikely(opt_junk_alloc)) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
+ oldsize);
+ }
- return (arena_dss_prec_get(choose_arena(arena)));
+ return (false);
}
-prof_ctx_t *
-huge_prof_ctx_get(const void *ptr)
+bool
+huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
+ bool zero)
{
- prof_ctx_t *ret;
- extent_node_t *node, key;
+ size_t usize;
+
+ /* Both allocations must be huge to avoid a move. */
+ if (oldsize < chunksize)
+ return (true);
- malloc_mutex_lock(&huge_mtx);
+ assert(s2u(oldsize) == oldsize);
+ usize = s2u(size);
+ if (usize == 0) {
+ /* size_t overflow. */
+ return (true);
+ }
- /* Extract from tree of huge allocations. */
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
+ /*
+ * Avoid moving the allocation if the existing chunk size accommodates
+ * the new size.
+ */
+ if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)
+ && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(s2u(size+extra))) {
+ huge_ralloc_no_move_similar(ptr, oldsize, usize, size, extra,
+ zero);
+ return (false);
+ }
- ret = node->prof_ctx;
+ /* Attempt to shrink the allocation in-place. */
+ if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize))
+ return (huge_ralloc_no_move_shrink(ptr, oldsize, usize));
- malloc_mutex_unlock(&huge_mtx);
+ /* Attempt to expand the allocation in-place. */
+ if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) {
+ if (extra == 0)
+ return (true);
- return (ret);
+ /* Try again, this time without extra. */
+ return (huge_ralloc_no_move_expand(ptr, oldsize, size, zero));
+ }
+ return (false);
}
-void
-huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
+void *
+huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, tcache_t *tcache)
{
- extent_node_t *node, key;
+ void *ret;
+ size_t copysize;
- malloc_mutex_lock(&huge_mtx);
+ /* Try to avoid moving the allocation. */
+ if (!huge_ralloc_no_move(ptr, oldsize, size, extra, zero))
+ return (ptr);
+
+ /*
+ * size and oldsize are different enough that we need to use a
+ * different size class. In that case, fall back to allocating new
+ * space and copying.
+ */
+ if (alignment > chunksize) {
+ ret = huge_palloc(tsd, arena, size + extra, alignment, zero,
+ tcache);
+ } else
+ ret = huge_malloc(tsd, arena, size + extra, zero, tcache);
- /* Extract from tree of huge allocations. */
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
+ if (ret == NULL) {
+ if (extra == 0)
+ return (NULL);
+ /* Try again, this time without extra. */
+ if (alignment > chunksize) {
+ ret = huge_palloc(tsd, arena, size, alignment, zero,
+ tcache);
+ } else
+ ret = huge_malloc(tsd, arena, size, zero, tcache);
- node->prof_ctx = ctx;
+ if (ret == NULL)
+ return (NULL);
+ }
- malloc_mutex_unlock(&huge_mtx);
+ /*
+ * Copy at most size bytes (not size+extra), since the caller has no
+ * expectation that the extra bytes will be reliably preserved.
+ */
+ copysize = (size < oldsize) ? size : oldsize;
+ memcpy(ret, ptr, copysize);
+ isqalloc(tsd, ptr, oldsize, tcache);
+ return (ret);
}
-bool
-huge_boot(void)
+void
+huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
+ extent_node_t *node;
+ arena_t *arena;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ huge_node_unset(ptr, node);
+ malloc_mutex_lock(&arena->huge_mtx);
+ ql_remove(&arena->huge, node, ql_link);
+ malloc_mutex_unlock(&arena->huge_mtx);
+
+ huge_dalloc_junk(extent_node_addr_get(node),
+ extent_node_size_get(node));
+ arena_chunk_dalloc_huge(extent_node_arena_get(node),
+ extent_node_addr_get(node), extent_node_size_get(node));
+ idalloctm(tsd, node, tcache, true);
+}
- /* Initialize chunks data. */
- if (malloc_mutex_init(&huge_mtx))
- return (true);
- extent_tree_ad_new(&huge);
-
- if (config_stats) {
- huge_nmalloc = 0;
- huge_ndalloc = 0;
- huge_allocated = 0;
- }
+arena_t *
+huge_aalloc(const void *ptr)
+{
- return (false);
+ return (extent_node_arena_get(huge_node_get(ptr)));
}
-void
-huge_prefork(void)
+size_t
+huge_salloc(const void *ptr)
{
+ size_t size;
+ extent_node_t *node;
+ arena_t *arena;
- malloc_mutex_prefork(&huge_mtx);
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(&arena->huge_mtx);
+ size = extent_node_size_get(node);
+ malloc_mutex_unlock(&arena->huge_mtx);
+
+ return (size);
}
-void
-huge_postfork_parent(void)
+prof_tctx_t *
+huge_prof_tctx_get(const void *ptr)
{
+ prof_tctx_t *tctx;
+ extent_node_t *node;
+ arena_t *arena;
- malloc_mutex_postfork_parent(&huge_mtx);
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(&arena->huge_mtx);
+ tctx = extent_node_prof_tctx_get(node);
+ malloc_mutex_unlock(&arena->huge_mtx);
+
+ return (tctx);
}
void
-huge_postfork_child(void)
+huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
{
+ extent_node_t *node;
+ arena_t *arena;
- malloc_mutex_postfork_child(&huge_mtx);
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(&arena->huge_mtx);
+ extent_node_prof_tctx_set(node, tctx);
+ malloc_mutex_unlock(&arena->huge_mtx);
}
diff --git a/contrib/jemalloc/src/jemalloc.c b/contrib/jemalloc/src/jemalloc.c
index 1ffa064..d078a1f 100644
--- a/contrib/jemalloc/src/jemalloc.c
+++ b/contrib/jemalloc/src/jemalloc.c
@@ -4,16 +4,12 @@
/******************************************************************************/
/* Data. */
-malloc_tsd_data(, arenas, arena_t *, NULL)
-malloc_tsd_data(, thread_allocated, thread_allocated_t,
- THREAD_ALLOCATED_INITIALIZER)
-
/* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
const char *__malloc_options_1_0 = NULL;
__sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
/* Runtime configuration options. */
-const char *je_malloc_conf;
+const char *je_malloc_conf JEMALLOC_ATTR(weak);
bool opt_abort =
#ifdef JEMALLOC_DEBUG
true
@@ -21,30 +17,152 @@ bool opt_abort =
false
#endif
;
-bool opt_junk =
+const char *opt_junk =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ "true"
+#else
+ "false"
+#endif
+ ;
+bool opt_junk_alloc =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ true
+#else
+ false
+#endif
+ ;
+bool opt_junk_free =
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
true
#else
false
#endif
;
+
size_t opt_quarantine = ZU(0);
bool opt_redzone = false;
bool opt_utrace = false;
-bool opt_valgrind = false;
bool opt_xmalloc = false;
bool opt_zero = false;
size_t opt_narenas = 0;
-unsigned ncpus;
+/* Initialized to true if the process is running inside Valgrind. */
+bool in_valgrind;
-malloc_mutex_t arenas_lock;
-arena_t **arenas;
-unsigned narenas_total;
-unsigned narenas_auto;
+unsigned ncpus;
-/* Set to true once the allocator has been initialized. */
-static bool malloc_initialized = false;
+/* Protects arenas initialization (arenas, narenas_total). */
+static malloc_mutex_t arenas_lock;
+/*
+ * Arenas that are used to service external requests. Not all elements of the
+ * arenas array are necessarily used; arenas are created lazily as needed.
+ *
+ * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
+ * arenas. arenas[narenas_auto..narenas_total) are only used if the application
+ * takes some action to create them and allocate from them.
+ */
+static arena_t **arenas;
+static unsigned narenas_total;
+static arena_t *a0; /* arenas[0]; read-only after initialization. */
+static unsigned narenas_auto; /* Read-only after initialization. */
+
+typedef enum {
+ malloc_init_uninitialized = 3,
+ malloc_init_a0_initialized = 2,
+ malloc_init_recursible = 1,
+ malloc_init_initialized = 0 /* Common case --> jnz. */
+} malloc_init_t;
+static malloc_init_t malloc_init_state = malloc_init_uninitialized;
+
+JEMALLOC_ALIGNED(CACHELINE)
+const size_t index2size_tab[NSIZES] = {
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+ ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
+ SIZE_CLASSES
+#undef SC
+};
+
+JEMALLOC_ALIGNED(CACHELINE)
+const uint8_t size2index_tab[] = {
+#if LG_TINY_MIN == 0
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_0(i) i,
+#elif LG_TINY_MIN == 1
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_1(i) i,
+#elif LG_TINY_MIN == 2
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_2(i) i,
+#elif LG_TINY_MIN == 3
+#define S2B_3(i) i,
+#elif LG_TINY_MIN == 4
+#define S2B_4(i) i,
+#elif LG_TINY_MIN == 5
+#define S2B_5(i) i,
+#elif LG_TINY_MIN == 6
+#define S2B_6(i) i,
+#elif LG_TINY_MIN == 7
+#define S2B_7(i) i,
+#elif LG_TINY_MIN == 8
+#define S2B_8(i) i,
+#elif LG_TINY_MIN == 9
+#define S2B_9(i) i,
+#elif LG_TINY_MIN == 10
+#define S2B_10(i) i,
+#elif LG_TINY_MIN == 11
+#define S2B_11(i) i,
+#else
+#error "Unsupported LG_TINY_MIN"
+#endif
+#if LG_TINY_MIN < 1
+#define S2B_1(i) S2B_0(i) S2B_0(i)
+#endif
+#if LG_TINY_MIN < 2
+#define S2B_2(i) S2B_1(i) S2B_1(i)
+#endif
+#if LG_TINY_MIN < 3
+#define S2B_3(i) S2B_2(i) S2B_2(i)
+#endif
+#if LG_TINY_MIN < 4
+#define S2B_4(i) S2B_3(i) S2B_3(i)
+#endif
+#if LG_TINY_MIN < 5
+#define S2B_5(i) S2B_4(i) S2B_4(i)
+#endif
+#if LG_TINY_MIN < 6
+#define S2B_6(i) S2B_5(i) S2B_5(i)
+#endif
+#if LG_TINY_MIN < 7
+#define S2B_7(i) S2B_6(i) S2B_6(i)
+#endif
+#if LG_TINY_MIN < 8
+#define S2B_8(i) S2B_7(i) S2B_7(i)
+#endif
+#if LG_TINY_MIN < 9
+#define S2B_9(i) S2B_8(i) S2B_8(i)
+#endif
+#if LG_TINY_MIN < 10
+#define S2B_10(i) S2B_9(i) S2B_9(i)
+#endif
+#if LG_TINY_MIN < 11
+#define S2B_11(i) S2B_10(i) S2B_10(i)
+#endif
+#define S2B_no(i)
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+ S2B_##lg_delta_lookup(index)
+ SIZE_CLASSES
+#undef S2B_3
+#undef S2B_4
+#undef S2B_5
+#undef S2B_6
+#undef S2B_7
+#undef S2B_8
+#undef S2B_9
+#undef S2B_10
+#undef S2B_11
+#undef S2B_no
+#undef SC
+};
#ifdef JEMALLOC_THREADED_INIT
/* Used to let the initializing thread recursively allocate. */
@@ -61,6 +179,9 @@ static bool malloc_initializer = NO_INITIALIZER;
/* Used to avoid initialization races. */
#ifdef _WIN32
+#if _WIN32_WINNT >= 0x0600
+static malloc_mutex_t init_lock = SRWLOCK_INIT;
+#else
static malloc_mutex_t init_lock;
JEMALLOC_ATTR(constructor)
@@ -76,7 +197,7 @@ _init_init_lock(void)
JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
#endif
-
+#endif
#else
static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
#endif
@@ -89,7 +210,7 @@ typedef struct {
#ifdef JEMALLOC_UTRACE
# define UTRACE(a, b, c) do { \
- if (opt_utrace) { \
+ if (unlikely(opt_utrace)) { \
int utrace_serrno = errno; \
malloc_utrace_t ut; \
ut.p = (a); \
@@ -109,6 +230,7 @@ typedef struct {
* definition.
*/
+static bool malloc_init_hard_a0(void);
static bool malloc_init_hard(void);
/******************************************************************************/
@@ -116,35 +238,332 @@ static bool malloc_init_hard(void);
* Begin miscellaneous support functions.
*/
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_initialized(void)
+{
+
+ return (malloc_init_state == malloc_init_initialized);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void
+malloc_thread_init(void)
+{
+
+ /*
+ * TSD initialization can't be safely done as a side effect of
+ * deallocation, because it is possible for a thread to do nothing but
+ * deallocate its TLS data via free(), in which case writing to TLS
+ * would cause write-after-free memory corruption. The quarantine
+ * facility *only* gets used as a side effect of deallocation, so make
+ * a best effort attempt at initializing its TSD by hooking all
+ * allocation events.
+ */
+ if (config_fill && unlikely(opt_quarantine))
+ quarantine_alloc_hook();
+}
+
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_init_a0(void)
+{
+
+ if (unlikely(malloc_init_state == malloc_init_uninitialized))
+ return (malloc_init_hard_a0());
+ return (false);
+}
+
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_init(void)
+{
+
+ if (unlikely(!malloc_initialized()) && malloc_init_hard())
+ return (true);
+ malloc_thread_init();
+
+ return (false);
+}
+
+/*
+ * The a0*() functions are used instead of i[mcd]alloc() in situations that
+ * cannot tolerate TLS variable access.
+ */
+
+arena_t *
+a0get(void)
+{
+
+ assert(a0 != NULL);
+ return (a0);
+}
+
+static void *
+a0ialloc(size_t size, bool zero, bool is_metadata)
+{
+
+ if (unlikely(malloc_init_a0()))
+ return (NULL);
+
+ return (iallocztm(NULL, size, zero, false, is_metadata, a0get()));
+}
+
+static void
+a0idalloc(void *ptr, bool is_metadata)
+{
+
+ idalloctm(NULL, ptr, false, is_metadata);
+}
+
+void *
+a0malloc(size_t size)
+{
+
+ return (a0ialloc(size, false, true));
+}
+
+void
+a0dalloc(void *ptr)
+{
+
+ a0idalloc(ptr, true);
+}
+
+/*
+ * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
+ * situations that cannot tolerate TLS variable access (TLS allocation and very
+ * early internal data structure initialization).
+ */
+
+void *
+bootstrap_malloc(size_t size)
+{
+
+ if (unlikely(size == 0))
+ size = 1;
+
+ return (a0ialloc(size, false, false));
+}
+
+void *
+bootstrap_calloc(size_t num, size_t size)
+{
+ size_t num_size;
+
+ num_size = num * size;
+ if (unlikely(num_size == 0)) {
+ assert(num == 0 || size == 0);
+ num_size = 1;
+ }
+
+ return (a0ialloc(num_size, true, false));
+}
+
+void
+bootstrap_free(void *ptr)
+{
+
+ if (unlikely(ptr == NULL))
+ return;
+
+ a0idalloc(ptr, false);
+}
+
/* Create a new arena and insert it into the arenas array at index ind. */
+static arena_t *
+arena_init_locked(unsigned ind)
+{
+ arena_t *arena;
+
+ /* Expand arenas if necessary. */
+ assert(ind <= narenas_total);
+ if (ind > MALLOCX_ARENA_MAX)
+ return (NULL);
+ if (ind == narenas_total) {
+ unsigned narenas_new = narenas_total + 1;
+ arena_t **arenas_new =
+ (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
+ sizeof(arena_t *)));
+ if (arenas_new == NULL)
+ return (NULL);
+ memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
+ arenas_new[ind] = NULL;
+ /*
+ * Deallocate only if arenas came from a0malloc() (not
+ * base_alloc()).
+ */
+ if (narenas_total != narenas_auto)
+ a0dalloc(arenas);
+ arenas = arenas_new;
+ narenas_total = narenas_new;
+ }
+
+ /*
+ * Another thread may have already initialized arenas[ind] if it's an
+ * auto arena.
+ */
+ arena = arenas[ind];
+ if (arena != NULL) {
+ assert(ind < narenas_auto);
+ return (arena);
+ }
+
+ /* Actually initialize the arena. */
+ arena = arenas[ind] = arena_new(ind);
+ return (arena);
+}
+
arena_t *
-arenas_extend(unsigned ind)
+arena_init(unsigned ind)
{
- arena_t *ret;
+ arena_t *arena;
+
+ malloc_mutex_lock(&arenas_lock);
+ arena = arena_init_locked(ind);
+ malloc_mutex_unlock(&arenas_lock);
+ return (arena);
+}
+
+unsigned
+narenas_total_get(void)
+{
+ unsigned narenas;
+
+ malloc_mutex_lock(&arenas_lock);
+ narenas = narenas_total;
+ malloc_mutex_unlock(&arenas_lock);
+
+ return (narenas);
+}
+
+static void
+arena_bind_locked(tsd_t *tsd, unsigned ind)
+{
+ arena_t *arena;
- ret = (arena_t *)base_alloc(sizeof(arena_t));
- if (ret != NULL && arena_new(ret, ind) == false) {
- arenas[ind] = ret;
- return (ret);
+ arena = arenas[ind];
+ arena->nthreads++;
+
+ if (tsd_nominal(tsd))
+ tsd_arena_set(tsd, arena);
+}
+
+static void
+arena_bind(tsd_t *tsd, unsigned ind)
+{
+
+ malloc_mutex_lock(&arenas_lock);
+ arena_bind_locked(tsd, ind);
+ malloc_mutex_unlock(&arenas_lock);
+}
+
+void
+arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
+{
+ arena_t *oldarena, *newarena;
+
+ malloc_mutex_lock(&arenas_lock);
+ oldarena = arenas[oldind];
+ newarena = arenas[newind];
+ oldarena->nthreads--;
+ newarena->nthreads++;
+ malloc_mutex_unlock(&arenas_lock);
+ tsd_arena_set(tsd, newarena);
+}
+
+unsigned
+arena_nbound(unsigned ind)
+{
+ unsigned nthreads;
+
+ malloc_mutex_lock(&arenas_lock);
+ nthreads = arenas[ind]->nthreads;
+ malloc_mutex_unlock(&arenas_lock);
+ return (nthreads);
+}
+
+static void
+arena_unbind(tsd_t *tsd, unsigned ind)
+{
+ arena_t *arena;
+
+ malloc_mutex_lock(&arenas_lock);
+ arena = arenas[ind];
+ arena->nthreads--;
+ malloc_mutex_unlock(&arenas_lock);
+ tsd_arena_set(tsd, NULL);
+}
+
+arena_t *
+arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
+{
+ arena_t *arena;
+ arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
+ unsigned narenas_cache = tsd_narenas_cache_get(tsd);
+ unsigned narenas_actual = narenas_total_get();
+
+ /* Deallocate old cache if it's too small. */
+ if (arenas_cache != NULL && narenas_cache < narenas_actual) {
+ a0dalloc(arenas_cache);
+ arenas_cache = NULL;
+ narenas_cache = 0;
+ tsd_arenas_cache_set(tsd, arenas_cache);
+ tsd_narenas_cache_set(tsd, narenas_cache);
+ }
+
+ /* Allocate cache if it's missing. */
+ if (arenas_cache == NULL) {
+ bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
+ assert(ind < narenas_actual || !init_if_missing);
+ narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
+
+ if (!*arenas_cache_bypassp) {
+ *arenas_cache_bypassp = true;
+ arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
+ narenas_cache);
+ *arenas_cache_bypassp = false;
+ } else
+ arenas_cache = NULL;
+ if (arenas_cache == NULL) {
+ /*
+ * This function must always tell the truth, even if
+ * it's slow, so don't let OOM or recursive allocation
+ * avoidance (note arenas_cache_bypass check) get in the
+ * way.
+ */
+ if (ind >= narenas_actual)
+ return (NULL);
+ malloc_mutex_lock(&arenas_lock);
+ arena = arenas[ind];
+ malloc_mutex_unlock(&arenas_lock);
+ return (arena);
+ }
+ tsd_arenas_cache_set(tsd, arenas_cache);
+ tsd_narenas_cache_set(tsd, narenas_cache);
}
- /* Only reached if there is an OOM error. */
/*
- * OOM here is quite inconvenient to propagate, since dealing with it
- * would require a check for failure in the fast path. Instead, punt
- * by using arenas[0]. In practice, this is an extremely unlikely
- * failure.
+ * Copy to cache. It's possible that the actual number of arenas has
+ * increased since narenas_total_get() was called above, but that causes
+ * no correctness issues unless two threads concurrently execute the
+ * arenas.extend mallctl, which we trust mallctl synchronization to
+ * prevent.
*/
- malloc_write("<jemalloc>: Error initializing arena\n");
- if (opt_abort)
- abort();
+ malloc_mutex_lock(&arenas_lock);
+ memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual);
+ malloc_mutex_unlock(&arenas_lock);
+ if (narenas_cache > narenas_actual) {
+ memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) *
+ (narenas_cache - narenas_actual));
+ }
- return (arenas[0]);
+ /* Read the refreshed cache, and init the arena if necessary. */
+ arena = arenas_cache[ind];
+ if (init_if_missing && arena == NULL)
+ arena = arenas_cache[ind] = arena_init(ind);
+ return (arena);
}
-/* Slow path, called only by choose_arena(). */
+/* Slow path, called only by arena_choose(). */
arena_t *
-choose_arena_hard(void)
+arena_choose_hard(tsd_t *tsd)
{
arena_t *ret;
@@ -154,7 +573,7 @@ choose_arena_hard(void)
choose = 0;
first_null = narenas_auto;
malloc_mutex_lock(&arenas_lock);
- assert(arenas[0] != NULL);
+ assert(a0get() != NULL);
for (i = 1; i < narenas_auto; i++) {
if (arenas[i] != NULL) {
/*
@@ -187,22 +606,71 @@ choose_arena_hard(void)
ret = arenas[choose];
} else {
/* Initialize a new arena. */
- ret = arenas_extend(first_null);
+ choose = first_null;
+ ret = arena_init_locked(choose);
+ if (ret == NULL) {
+ malloc_mutex_unlock(&arenas_lock);
+ return (NULL);
+ }
}
- ret->nthreads++;
+ arena_bind_locked(tsd, choose);
malloc_mutex_unlock(&arenas_lock);
} else {
- ret = arenas[0];
- malloc_mutex_lock(&arenas_lock);
- ret->nthreads++;
- malloc_mutex_unlock(&arenas_lock);
+ ret = a0get();
+ arena_bind(tsd, 0);
}
- arenas_tsd_set(&ret);
-
return (ret);
}
+void
+thread_allocated_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+thread_deallocated_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+arena_cleanup(tsd_t *tsd)
+{
+ arena_t *arena;
+
+ arena = tsd_arena_get(tsd);
+ if (arena != NULL)
+ arena_unbind(tsd, arena->ind);
+}
+
+void
+arenas_cache_cleanup(tsd_t *tsd)
+{
+ arena_t **arenas_cache;
+
+ arenas_cache = tsd_arenas_cache_get(tsd);
+ if (arenas_cache != NULL)
+ a0dalloc(arenas_cache);
+}
+
+void
+narenas_cache_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+arenas_cache_bypass_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
static void
stats_print_atexit(void)
{
@@ -247,6 +715,19 @@ stats_print_atexit(void)
* Begin initialization functions.
*/
+#ifndef JEMALLOC_HAVE_SECURE_GETENV
+static char *
+secure_getenv(const char *name)
+{
+
+# ifdef JEMALLOC_HAVE_ISSETUGID
+ if (issetugid() != 0)
+ return (NULL);
+# endif
+ return (getenv(name));
+}
+#endif
+
static unsigned
malloc_ncpus(void)
{
@@ -262,44 +743,6 @@ malloc_ncpus(void)
return ((result == -1) ? 1 : (unsigned)result);
}
-void
-arenas_cleanup(void *arg)
-{
- arena_t *arena = *(arena_t **)arg;
-
- malloc_mutex_lock(&arenas_lock);
- arena->nthreads--;
- malloc_mutex_unlock(&arenas_lock);
-}
-
-JEMALLOC_ALWAYS_INLINE_C void
-malloc_thread_init(void)
-{
-
- /*
- * TSD initialization can't be safely done as a side effect of
- * deallocation, because it is possible for a thread to do nothing but
- * deallocate its TLS data via free(), in which case writing to TLS
- * would cause write-after-free memory corruption. The quarantine
- * facility *only* gets used as a side effect of deallocation, so make
- * a best effort attempt at initializing its TSD by hooking all
- * allocation events.
- */
- if (config_fill && opt_quarantine)
- quarantine_alloc_hook();
-}
-
-JEMALLOC_ALWAYS_INLINE_C bool
-malloc_init(void)
-{
-
- if (malloc_initialized == false && malloc_init_hard())
- return (true);
- malloc_thread_init();
-
- return (false);
-}
-
static bool
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
char const **v_p, size_t *vlen_p)
@@ -309,7 +752,7 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
*k_p = opts;
- for (accept = false; accept == false;) {
+ for (accept = false; !accept;) {
switch (*opts) {
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
@@ -344,7 +787,7 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
}
}
- for (accept = false; accept == false;) {
+ for (accept = false; !accept;) {
switch (*opts) {
case ',':
opts++;
@@ -398,14 +841,16 @@ malloc_conf_init(void)
* valgrind option remains in jemalloc 3.x for compatibility reasons.
*/
if (config_valgrind) {
- opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
- if (config_fill && opt_valgrind) {
- opt_junk = false;
- assert(opt_zero == false);
+ in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
+ if (config_fill && unlikely(in_valgrind)) {
+ opt_junk = "false";
+ opt_junk_alloc = false;
+ opt_junk_free = false;
+ assert(!opt_zero);
opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
opt_redzone = true;
}
- if (config_tcache && opt_valgrind)
+ if (config_tcache && unlikely(in_valgrind))
opt_tcache = false;
}
@@ -445,7 +890,7 @@ malloc_conf_init(void)
if (linklen == -1) {
/* No configuration specified. */
linklen = 0;
- /* restore errno */
+ /* Restore errno. */
set_errno(saved_errno);
}
#endif
@@ -461,8 +906,7 @@ malloc_conf_init(void)
#endif
;
- if (issetugid() == 0 && (opts = getenv(envname)) !=
- NULL) {
+ if ((opts = secure_getenv(envname)) != NULL) {
/*
* Do nothing; opts is already initialized to
* the value of the MALLOC_CONF environment
@@ -480,27 +924,28 @@ malloc_conf_init(void)
opts = buf;
}
- while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
- &vlen) == false) {
-#define CONF_HANDLE_BOOL(o, n) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
- if (strncmp("true", v, vlen) == 0 && \
- vlen == sizeof("true")-1) \
+ while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
+ &vlen)) {
+#define CONF_MATCH(n) \
+ (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
+#define CONF_MATCH_VALUE(n) \
+ (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
+#define CONF_HANDLE_BOOL(o, n, cont) \
+ if (CONF_MATCH(n)) { \
+ if (CONF_MATCH_VALUE("true")) \
o = true; \
- else if (strncmp("false", v, vlen) == \
- 0 && vlen == sizeof("false")-1) \
+ else if (CONF_MATCH_VALUE("false")) \
o = false; \
else { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} \
- continue; \
+ if (cont) \
+ continue; \
}
#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
+ if (CONF_MATCH(n)) { \
uintmax_t um; \
char *end; \
\
@@ -512,15 +957,15 @@ malloc_conf_init(void)
"Invalid conf value", \
k, klen, v, vlen); \
} else if (clip) { \
- if (min != 0 && um < min) \
- o = min; \
- else if (um > max) \
- o = max; \
+ if ((min) != 0 && um < (min)) \
+ o = (min); \
+ else if (um > (max)) \
+ o = (max); \
else \
o = um; \
} else { \
- if ((min != 0 && um < min) || \
- um > max) { \
+ if (((min) != 0 && um < (min)) \
+ || um > (max)) { \
malloc_conf_error( \
"Out-of-range " \
"conf value", \
@@ -531,8 +976,7 @@ malloc_conf_init(void)
continue; \
}
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
+ if (CONF_MATCH(n)) { \
long l; \
char *end; \
\
@@ -543,8 +987,8 @@ malloc_conf_init(void)
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
- } else if (l < (ssize_t)min || l > \
- (ssize_t)max) { \
+ } else if (l < (ssize_t)(min) || l > \
+ (ssize_t)(max)) { \
malloc_conf_error( \
"Out-of-range conf value", \
k, klen, v, vlen); \
@@ -553,8 +997,7 @@ malloc_conf_init(void)
continue; \
}
#define CONF_HANDLE_CHAR_P(o, n, d) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
+ if (CONF_MATCH(n)) { \
size_t cpylen = (vlen <= \
sizeof(o)-1) ? vlen : \
sizeof(o)-1; \
@@ -563,17 +1006,18 @@ malloc_conf_init(void)
continue; \
}
- CONF_HANDLE_BOOL(opt_abort, "abort")
+ CONF_HANDLE_BOOL(opt_abort, "abort", true)
/*
- * Chunks always require at least one header page, plus
- * one data page in the absence of redzones, or three
- * pages in the presence of redzones. In order to
- * simplify options processing, fix the limit based on
- * config_fill.
+ * Chunks always require at least one header page,
+ * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
+ * possibly an additional page in the presence of
+ * redzones. In order to simplify options processing,
+ * use a conservative bound that accommodates all these
+ * constraints.
*/
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
- (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
- true)
+ LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
+ (sizeof(size_t) << 3) - 1, true)
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;
@@ -592,7 +1036,7 @@ malloc_conf_init(void)
}
}
}
- if (match == false) {
+ if (!match) {
malloc_conf_error("Invalid conf value",
k, klen, v, vlen);
}
@@ -602,47 +1046,87 @@ malloc_conf_init(void)
SIZE_T_MAX, false)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
-1, (sizeof(size_t) << 3) - 1)
- CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
+ CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
if (config_fill) {
- CONF_HANDLE_BOOL(opt_junk, "junk")
+ if (CONF_MATCH("junk")) {
+ if (CONF_MATCH_VALUE("true")) {
+ opt_junk = "true";
+ opt_junk_alloc = opt_junk_free =
+ true;
+ } else if (CONF_MATCH_VALUE("false")) {
+ opt_junk = "false";
+ opt_junk_alloc = opt_junk_free =
+ false;
+ } else if (CONF_MATCH_VALUE("alloc")) {
+ opt_junk = "alloc";
+ opt_junk_alloc = true;
+ opt_junk_free = false;
+ } else if (CONF_MATCH_VALUE("free")) {
+ opt_junk = "free";
+ opt_junk_alloc = false;
+ opt_junk_free = true;
+ } else {
+ malloc_conf_error(
+ "Invalid conf value", k,
+ klen, v, vlen);
+ }
+ continue;
+ }
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
0, SIZE_T_MAX, false)
- CONF_HANDLE_BOOL(opt_redzone, "redzone")
- CONF_HANDLE_BOOL(opt_zero, "zero")
+ CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
+ CONF_HANDLE_BOOL(opt_zero, "zero", true)
}
if (config_utrace) {
- CONF_HANDLE_BOOL(opt_utrace, "utrace")
- }
- if (config_valgrind) {
- CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
+ CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
}
if (config_xmalloc) {
- CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
+ CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
}
if (config_tcache) {
- CONF_HANDLE_BOOL(opt_tcache, "tcache")
+ CONF_HANDLE_BOOL(opt_tcache, "tcache",
+ !config_valgrind || !in_valgrind)
+ if (CONF_MATCH("tcache")) {
+ assert(config_valgrind && in_valgrind);
+ if (opt_tcache) {
+ opt_tcache = false;
+ malloc_conf_error(
+ "tcache cannot be enabled "
+ "while running inside Valgrind",
+ k, klen, v, vlen);
+ }
+ continue;
+ }
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
"lg_tcache_max", -1,
(sizeof(size_t) << 3) - 1)
}
if (config_prof) {
- CONF_HANDLE_BOOL(opt_prof, "prof")
+ CONF_HANDLE_BOOL(opt_prof, "prof", true)
CONF_HANDLE_CHAR_P(opt_prof_prefix,
"prof_prefix", "jeprof")
- CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
- CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
+ CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
+ true)
+ CONF_HANDLE_BOOL(opt_prof_thread_active_init,
+ "prof_thread_active_init", true)
+ CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
"lg_prof_sample", 0,
- (sizeof(uint64_t) << 3) - 1)
- CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
+ (sizeof(uint64_t) << 3) - 1, true)
+ CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
+ true)
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
"lg_prof_interval", -1,
(sizeof(uint64_t) << 3) - 1)
- CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
- CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
- CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
+ CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
+ true)
+ CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
+ true)
+ CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
+ true)
}
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
+#undef CONF_MATCH
#undef CONF_HANDLE_BOOL
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
@@ -651,41 +1135,44 @@ malloc_conf_init(void)
}
}
+/* init_lock must be held. */
static bool
-malloc_init_hard(void)
+malloc_init_hard_needed(void)
{
- arena_t *init_arenas[1];
- malloc_mutex_lock(&init_lock);
- if (malloc_initialized || IS_INITIALIZER) {
+ if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
+ malloc_init_recursible)) {
/*
* Another thread initialized the allocator before this one
* acquired init_lock, or this thread is the initializing
* thread, and it is recursively allocating.
*/
- malloc_mutex_unlock(&init_lock);
return (false);
}
#ifdef JEMALLOC_THREADED_INIT
- if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
+ if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
/* Busy-wait until the initializing thread completes. */
do {
malloc_mutex_unlock(&init_lock);
CPU_SPINWAIT;
malloc_mutex_lock(&init_lock);
- } while (malloc_initialized == false);
- malloc_mutex_unlock(&init_lock);
+ } while (!malloc_initialized());
return (false);
}
#endif
+ return (true);
+}
+
+/* init_lock must be held. */
+static bool
+malloc_init_hard_a0_locked(void)
+{
+
malloc_initializer = INITIALIZER;
- malloc_tsd_boot();
if (config_prof)
prof_boot0();
-
malloc_conf_init();
-
if (opt_stats_print) {
/* Print statistics at exit. */
if (atexit(stats_print_atexit) != 0) {
@@ -694,94 +1181,64 @@ malloc_init_hard(void)
abort();
}
}
-
- if (base_boot()) {
- malloc_mutex_unlock(&init_lock);
+ if (base_boot())
return (true);
- }
-
- if (chunk_boot()) {
- malloc_mutex_unlock(&init_lock);
+ if (chunk_boot())
return (true);
- }
-
- if (ctl_boot()) {
- malloc_mutex_unlock(&init_lock);
+ if (ctl_boot())
return (true);
- }
-
if (config_prof)
prof_boot1();
-
- arena_boot();
-
- if (config_tcache && tcache_boot0()) {
- malloc_mutex_unlock(&init_lock);
+ if (arena_boot())
return (true);
- }
-
- if (huge_boot()) {
- malloc_mutex_unlock(&init_lock);
+ if (config_tcache && tcache_boot())
return (true);
- }
-
- if (malloc_mutex_init(&arenas_lock)) {
- malloc_mutex_unlock(&init_lock);
+ if (malloc_mutex_init(&arenas_lock))
return (true);
- }
-
/*
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
*/
narenas_total = narenas_auto = 1;
- arenas = init_arenas;
+ arenas = &a0;
memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
-
/*
* Initialize one arena here. The rest are lazily created in
- * choose_arena_hard().
+ * arena_choose_hard().
*/
- arenas_extend(0);
- if (arenas[0] == NULL) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- /* Initialize allocation counters before any allocations can occur. */
- if (config_stats && thread_allocated_tsd_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- if (arenas_tsd_boot()) {
- malloc_mutex_unlock(&init_lock);
+ if (arena_init(0) == NULL)
return (true);
- }
+ malloc_init_state = malloc_init_a0_initialized;
+ return (false);
+}
- if (config_tcache && tcache_boot1()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
+static bool
+malloc_init_hard_a0(void)
+{
+ bool ret;
- if (config_fill && quarantine_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
+ malloc_mutex_lock(&init_lock);
+ ret = malloc_init_hard_a0_locked();
+ malloc_mutex_unlock(&init_lock);
+ return (ret);
+}
- if (config_prof && prof_boot2()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
+/*
+ * Initialize data structures which may trigger recursive allocation.
+ *
+ * init_lock must be held.
+ */
+static void
+malloc_init_hard_recursible(void)
+{
+ malloc_init_state = malloc_init_recursible;
malloc_mutex_unlock(&init_lock);
- /**********************************************************************/
- /* Recursive allocation may follow. */
ncpus = malloc_ncpus();
#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
- && !defined(_WIN32))
+ && !defined(_WIN32) && !defined(__native_client__))
/* LinuxThreads's pthread_atfork() allocates. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
@@ -790,15 +1247,16 @@ malloc_init_hard(void)
abort();
}
#endif
-
- /* Done recursively allocating. */
- /**********************************************************************/
malloc_mutex_lock(&init_lock);
+}
- if (mutex_boot()) {
- malloc_mutex_unlock(&init_lock);
+/* init_lock must be held. */
+static bool
+malloc_init_hard_finish(void)
+{
+
+ if (mutex_boot())
return (true);
- }
if (opt_narenas == 0) {
/*
@@ -825,21 +1283,53 @@ malloc_init_hard(void)
/* Allocate and initialize arenas. */
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
- if (arenas == NULL) {
- malloc_mutex_unlock(&init_lock);
+ if (arenas == NULL)
return (true);
- }
/*
* Zero the array. In practice, this should always be pre-zeroed,
* since it was just mmap()ed, but let's be sure.
*/
memset(arenas, 0, sizeof(arena_t *) * narenas_total);
/* Copy the pointer to the one arena that was already initialized. */
- arenas[0] = init_arenas[0];
+ arenas[0] = a0;
- malloc_initialized = true;
- malloc_mutex_unlock(&init_lock);
+ malloc_init_state = malloc_init_initialized;
+ return (false);
+}
+
+static bool
+malloc_init_hard(void)
+{
+
+ malloc_mutex_lock(&init_lock);
+ if (!malloc_init_hard_needed()) {
+ malloc_mutex_unlock(&init_lock);
+ return (false);
+ }
+
+ if (malloc_init_state != malloc_init_a0_initialized &&
+ malloc_init_hard_a0_locked()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+ if (malloc_tsd_boot0()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+ if (config_prof && prof_boot2()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ malloc_init_hard_recursible();
+
+ if (malloc_init_hard_finish()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+ malloc_mutex_unlock(&init_lock);
+ malloc_tsd_boot1();
return (false);
}
@@ -852,98 +1342,87 @@ malloc_init_hard(void)
*/
static void *
-imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
+imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
void *p;
- if (cnt == NULL)
+ if (tctx == NULL)
return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- p = imalloc(SMALL_MAXCLASS+1);
+ if (usize <= SMALL_MAXCLASS) {
+ p = imalloc(tsd, LARGE_MINCLASS);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
- p = imalloc(usize);
+ p = imalloc(tsd, usize);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-imalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
+imalloc_prof(tsd_t *tsd, size_t usize)
{
void *p;
+ prof_tctx_t *tctx;
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = imalloc_prof_sample(usize, cnt);
+ tctx = prof_alloc_prep(tsd, usize, true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = imalloc_prof_sample(tsd, usize, tctx);
else
- p = imalloc(usize);
- if (p == NULL)
+ p = imalloc(tsd, usize);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
return (NULL);
- prof_malloc(p, usize, cnt);
+ }
+ prof_malloc(p, usize, tctx);
return (p);
}
-/*
- * MALLOC_BODY() is a macro rather than a function because its contents are in
- * the fast path, but inlining would cause reliability issues when determining
- * how many frames to discard from heap profiling backtraces.
- */
-#define MALLOC_BODY(ret, size, usize) do { \
- if (malloc_init()) \
- ret = NULL; \
- else { \
- if (config_prof && opt_prof) { \
- prof_thr_cnt_t *cnt; \
- \
- usize = s2u(size); \
- /* \
- * Call PROF_ALLOC_PREP() here rather than in \
- * imalloc_prof() so that imalloc_prof() can be \
- * inlined without introducing uncertainty \
- * about the number of backtrace frames to \
- * ignore. imalloc_prof() is in the fast path \
- * when heap profiling is enabled, so inlining \
- * is critical to performance. (For \
- * consistency all callers of PROF_ALLOC_PREP() \
- * are structured similarly, even though e.g. \
- * realloc() isn't called enough for inlining \
- * to be critical.) \
- */ \
- PROF_ALLOC_PREP(1, usize, cnt); \
- ret = imalloc_prof(usize, cnt); \
- } else { \
- if (config_stats || (config_valgrind && \
- opt_valgrind)) \
- usize = s2u(size); \
- ret = imalloc(size); \
- } \
- } \
-} while (0)
+JEMALLOC_ALWAYS_INLINE_C void *
+imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
+{
-void *
+ if (unlikely(malloc_init()))
+ return (NULL);
+ *tsd = tsd_fetch();
+
+ if (config_prof && opt_prof) {
+ *usize = s2u(size);
+ if (unlikely(*usize == 0))
+ return (NULL);
+ return (imalloc_prof(*tsd, *usize));
+ }
+
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
+ *usize = s2u(size);
+ return (imalloc(*tsd, size));
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_malloc(size_t size)
{
void *ret;
+ tsd_t *tsd;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
if (size == 0)
size = 1;
- MALLOC_BODY(ret, size, usize);
-
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
+ ret = imalloc_body(size, &tsd, &usize);
+ if (unlikely(ret == NULL)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in malloc(): "
"out of memory\n");
abort();
}
set_errno(ENOMEM);
}
- if (config_stats && ret != NULL) {
+ if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(ret, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
@@ -951,107 +1430,103 @@ je_malloc(size_t size)
}
static void *
-imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
+imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
+ prof_tctx_t *tctx)
{
void *p;
- if (cnt == NULL)
+ if (tctx == NULL)
return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0);
- p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment,
- false);
+ if (usize <= SMALL_MAXCLASS) {
+ assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
+ p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
- p = ipalloc(usize, alignment, false);
+ p = ipalloc(tsd, usize, alignment, false);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
+imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
{
void *p;
+ prof_tctx_t *tctx;
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = imemalign_prof_sample(alignment, usize, cnt);
+ tctx = prof_alloc_prep(tsd, usize, true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = imemalign_prof_sample(tsd, alignment, usize, tctx);
else
- p = ipalloc(usize, alignment, false);
- if (p == NULL)
+ p = ipalloc(tsd, usize, alignment, false);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
return (NULL);
- prof_malloc(p, usize, cnt);
+ }
+ prof_malloc(p, usize, tctx);
return (p);
}
JEMALLOC_ATTR(nonnull(1))
-#ifdef JEMALLOC_PROF
-/*
- * Avoid any uncertainty as to how many backtrace frames to ignore in
- * PROF_ALLOC_PREP().
- */
-JEMALLOC_NOINLINE
-#endif
static int
imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
{
int ret;
+ tsd_t *tsd;
size_t usize;
void *result;
assert(min_alignment != 0);
- if (malloc_init()) {
+ if (unlikely(malloc_init())) {
result = NULL;
goto label_oom;
- } else {
- if (size == 0)
- size = 1;
-
- /* Make sure that alignment is a large enough power of 2. */
- if (((alignment - 1) & alignment) != 0
- || (alignment < min_alignment)) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error allocating "
- "aligned memory: invalid alignment\n");
- abort();
- }
- result = NULL;
- ret = EINVAL;
- goto label_return;
- }
+ }
+ tsd = tsd_fetch();
+ if (size == 0)
+ size = 1;
- usize = sa2u(size, alignment);
- if (usize == 0) {
- result = NULL;
- goto label_oom;
+ /* Make sure that alignment is a large enough power of 2. */
+ if (unlikely(((alignment - 1) & alignment) != 0
+ || (alignment < min_alignment))) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error allocating "
+ "aligned memory: invalid alignment\n");
+ abort();
}
+ result = NULL;
+ ret = EINVAL;
+ goto label_return;
+ }
- if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
-
- PROF_ALLOC_PREP(2, usize, cnt);
- result = imemalign_prof(alignment, usize, cnt);
- } else
- result = ipalloc(usize, alignment, false);
- if (result == NULL)
- goto label_oom;
+ usize = sa2u(size, alignment);
+ if (unlikely(usize == 0)) {
+ result = NULL;
+ goto label_oom;
}
+ if (config_prof && opt_prof)
+ result = imemalign_prof(tsd, alignment, usize);
+ else
+ result = ipalloc(tsd, usize, alignment, false);
+ if (unlikely(result == NULL))
+ goto label_oom;
+ assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
+
*memptr = result;
ret = 0;
label_return:
- if (config_stats && result != NULL) {
+ if (config_stats && likely(result != NULL)) {
assert(usize == isalloc(result, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, result);
return (ret);
label_oom:
assert(result == NULL);
- if (config_xmalloc && opt_xmalloc) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error allocating aligned memory: "
"out of memory\n");
abort();
@@ -1060,7 +1535,8 @@ label_oom:
goto label_return;
}
-int
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+JEMALLOC_ATTR(nonnull(1))
je_posix_memalign(void **memptr, size_t alignment, size_t size)
{
int ret = imemalign(memptr, alignment, size, sizeof(void *));
@@ -1069,13 +1545,15 @@ je_posix_memalign(void **memptr, size_t alignment, size_t size)
return (ret);
}
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
je_aligned_alloc(size_t alignment, size_t size)
{
void *ret;
int err;
- if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
+ if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
ret = NULL;
set_errno(err);
}
@@ -1085,54 +1563,62 @@ je_aligned_alloc(size_t alignment, size_t size)
}
static void *
-icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
+icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
void *p;
- if (cnt == NULL)
+ if (tctx == NULL)
return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- p = icalloc(SMALL_MAXCLASS+1);
+ if (usize <= SMALL_MAXCLASS) {
+ p = icalloc(tsd, LARGE_MINCLASS);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
- p = icalloc(usize);
+ p = icalloc(tsd, usize);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-icalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
+icalloc_prof(tsd_t *tsd, size_t usize)
{
void *p;
+ prof_tctx_t *tctx;
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = icalloc_prof_sample(usize, cnt);
+ tctx = prof_alloc_prep(tsd, usize, true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = icalloc_prof_sample(tsd, usize, tctx);
else
- p = icalloc(usize);
- if (p == NULL)
+ p = icalloc(tsd, usize);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
return (NULL);
- prof_malloc(p, usize, cnt);
+ }
+ prof_malloc(p, usize, tctx);
return (p);
}
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
je_calloc(size_t num, size_t size)
{
void *ret;
+ tsd_t *tsd;
size_t num_size;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- if (malloc_init()) {
+ if (unlikely(malloc_init())) {
num_size = 0;
ret = NULL;
goto label_return;
}
+ tsd = tsd_fetch();
num_size = num * size;
- if (num_size == 0) {
+ if (unlikely(num_size == 0)) {
if (num == 0 || size == 0)
num_size = 1;
else {
@@ -1144,37 +1630,38 @@ je_calloc(size_t num, size_t size)
* overflow during multiplication if neither operand uses any of the
* most significant half of the bits in a size_t.
*/
- } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
- && (num_size / size != num)) {
+ } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
+ 2))) && (num_size / size != num))) {
/* size_t overflow. */
ret = NULL;
goto label_return;
}
if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
-
usize = s2u(num_size);
- PROF_ALLOC_PREP(1, usize, cnt);
- ret = icalloc_prof(usize, cnt);
+ if (unlikely(usize == 0)) {
+ ret = NULL;
+ goto label_return;
+ }
+ ret = icalloc_prof(tsd, usize);
} else {
- if (config_stats || (config_valgrind && opt_valgrind))
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
usize = s2u(num_size);
- ret = icalloc(num_size);
+ ret = icalloc(tsd, num_size);
}
label_return:
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
+ if (unlikely(ret == NULL)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in calloc(): out of "
"memory\n");
abort();
}
set_errno(ENOMEM);
}
- if (config_stats && ret != NULL) {
+ if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(ret, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, num_size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
@@ -1182,135 +1669,157 @@ label_return:
}
static void *
-irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt)
+irealloc_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize,
+ prof_tctx_t *tctx)
{
void *p;
- if (cnt == NULL)
+ if (tctx == NULL)
return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false);
+ if (usize <= SMALL_MAXCLASS) {
+ p = iralloc(tsd, oldptr, old_usize, LARGE_MINCLASS, 0, false);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
- p = iralloc(oldptr, usize, 0, 0, false);
+ p = iralloc(tsd, oldptr, old_usize, usize, 0, false);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt)
+irealloc_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize)
{
void *p;
- prof_ctx_t *old_ctx;
+ prof_tctx_t *old_tctx, *tctx;
- old_ctx = prof_ctx_get(oldptr);
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = irealloc_prof_sample(oldptr, usize, cnt);
+ old_tctx = prof_tctx_get(oldptr);
+ tctx = prof_alloc_prep(tsd, usize, true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = irealloc_prof_sample(tsd, oldptr, old_usize, usize, tctx);
else
- p = iralloc(oldptr, usize, 0, 0, false);
+ p = iralloc(tsd, oldptr, old_usize, usize, 0, false);
if (p == NULL)
return (NULL);
- prof_realloc(p, usize, cnt, old_usize, old_ctx);
+ prof_realloc(tsd, p, usize, tctx, true, old_usize, old_tctx);
return (p);
}
JEMALLOC_INLINE_C void
-ifree(void *ptr)
+ifree(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
size_t usize;
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
assert(ptr != NULL);
- assert(malloc_initialized || IS_INITIALIZER);
+ assert(malloc_initialized() || IS_INITIALIZER);
if (config_prof && opt_prof) {
usize = isalloc(ptr, config_prof);
- prof_free(ptr, usize);
+ prof_free(tsd, ptr, usize);
} else if (config_stats || config_valgrind)
usize = isalloc(ptr, config_prof);
if (config_stats)
- thread_allocated_tsd_get()->deallocated += usize;
- if (config_valgrind && opt_valgrind)
+ *tsd_thread_deallocatedp_get(tsd) += usize;
+ if (config_valgrind && unlikely(in_valgrind))
rzsize = p2rz(ptr);
- iqalloc(ptr);
+ iqalloc(tsd, ptr, tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
-void *
+JEMALLOC_INLINE_C void
+isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
+{
+ UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ if (config_prof && opt_prof)
+ prof_free(tsd, ptr, usize);
+ if (config_stats)
+ *tsd_thread_deallocatedp_get(tsd) += usize;
+ if (config_valgrind && unlikely(in_valgrind))
+ rzsize = p2rz(ptr);
+ isqalloc(tsd, ptr, usize, tcache);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
je_realloc(void *ptr, size_t size)
{
void *ret;
+ tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- if (size == 0) {
+ if (unlikely(size == 0)) {
if (ptr != NULL) {
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
- ifree(ptr);
+ tsd = tsd_fetch();
+ ifree(tsd, ptr, tcache_get(tsd, false));
return (NULL);
}
size = 1;
}
- if (ptr != NULL) {
- assert(malloc_initialized || IS_INITIALIZER);
+ if (likely(ptr != NULL)) {
+ assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
+ tsd = tsd_fetch();
- if ((config_prof && opt_prof) || config_stats ||
- (config_valgrind && opt_valgrind))
- old_usize = isalloc(ptr, config_prof);
- if (config_valgrind && opt_valgrind)
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && unlikely(in_valgrind))
old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
-
usize = s2u(size);
- PROF_ALLOC_PREP(1, usize, cnt);
- ret = irealloc_prof(ptr, old_usize, usize, cnt);
+ ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd,
+ ptr, old_usize, usize);
} else {
- if (config_stats || (config_valgrind && opt_valgrind))
+ if (config_stats || (config_valgrind &&
+ unlikely(in_valgrind)))
usize = s2u(size);
- ret = iralloc(ptr, size, 0, 0, false);
+ ret = iralloc(tsd, ptr, old_usize, size, 0, false);
}
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
- MALLOC_BODY(ret, size, usize);
+ ret = imalloc_body(size, &tsd, &usize);
}
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
+ if (unlikely(ret == NULL)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in realloc(): "
"out of memory\n");
abort();
}
set_errno(ENOMEM);
}
- if (config_stats && ret != NULL) {
- thread_allocated_t *ta;
+ if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(ret, config_prof));
- ta = thread_allocated_tsd_get();
- ta->allocated += usize;
- ta->deallocated += old_usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, ret);
- JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_usize, old_rzsize,
- false);
+ JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
+ old_rzsize, true, false);
return (ret);
}
-void
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_free(void *ptr)
{
UTRACE(ptr, 0, 0);
- if (ptr != NULL)
- ifree(ptr);
+ if (likely(ptr != NULL)) {
+ tsd_t *tsd = tsd_fetch();
+ ifree(tsd, ptr, tcache_get(tsd, false));
+ }
}
/*
@@ -1322,22 +1831,28 @@ je_free(void *ptr)
*/
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc)
je_memalign(size_t alignment, size_t size)
{
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
- imemalign(&ret, alignment, size, 1);
+ if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
+ ret = NULL;
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc)
je_valloc(size_t size)
{
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
- imemalign(&ret, PAGE, size, 1);
+ if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
+ ret = NULL;
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
@@ -1351,7 +1866,7 @@ je_valloc(size_t size)
#define is_malloc_(a) malloc_is_ ## a
#define is_malloc(a) is_malloc_(a)
-#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
+#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
/*
* glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
* to inconsistently reference libc's malloc(3)-compatible functions
@@ -1361,11 +1876,13 @@ je_valloc(size_t size)
* passed an extra argument for the caller return address, which will be
* ignored.
*/
-JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
-JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
-JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
-JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
+JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
+JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
+JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
+# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
+JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
je_memalign;
+# endif
#endif
/*
@@ -1376,111 +1893,185 @@ JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
* Begin non-standard functions.
*/
-JEMALLOC_ALWAYS_INLINE_C void *
-imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
- arena_t *arena)
+JEMALLOC_ALWAYS_INLINE_C bool
+imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
+ size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
{
- assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
- alignment)));
+ if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
+ *alignment = 0;
+ *usize = s2u(size);
+ } else {
+ *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
+ *usize = sa2u(size, *alignment);
+ }
+ *zero = MALLOCX_ZERO_GET(flags);
+ if ((flags & MALLOCX_TCACHE_MASK) != 0) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ *tcache = NULL;
+ else
+ *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ } else
+ *tcache = tcache_get(tsd, true);
+ if ((flags & MALLOCX_ARENA_MASK) != 0) {
+ unsigned arena_ind = MALLOCX_ARENA_GET(flags);
+ *arena = arena_get(tsd, arena_ind, true, true);
+ if (unlikely(*arena == NULL))
+ return (true);
+ } else
+ *arena = NULL;
+ return (false);
+}
+
+JEMALLOC_ALWAYS_INLINE_C bool
+imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
+ size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
+{
+
+ if (likely(flags == 0)) {
+ *usize = s2u(size);
+ assert(*usize != 0);
+ *alignment = 0;
+ *zero = false;
+ *tcache = tcache_get(tsd, true);
+ *arena = NULL;
+ return (false);
+ } else {
+ return (imallocx_flags_decode_hard(tsd, size, flags, usize,
+ alignment, zero, tcache, arena));
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena)
+{
if (alignment != 0)
- return (ipalloct(usize, alignment, zero, try_tcache, arena));
- else if (zero)
- return (icalloct(usize, try_tcache, arena));
- else
- return (imalloct(usize, try_tcache, arena));
+ return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
+ if (zero)
+ return (icalloct(tsd, usize, tcache, arena));
+ return (imalloct(tsd, usize, tcache, arena));
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_maybe_flags(tsd_t *tsd, size_t size, int flags, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
+{
+
+ if (likely(flags == 0))
+ return (imalloc(tsd, size));
+ return (imallocx_flags(tsd, usize, alignment, zero, tcache, arena));
}
static void *
-imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache,
- arena_t *arena, prof_thr_cnt_t *cnt)
+imallocx_prof_sample(tsd_t *tsd, size_t size, int flags, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
{
void *p;
- if (cnt == NULL)
- return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- size_t usize_promoted = (alignment == 0) ?
- s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment);
- assert(usize_promoted != 0);
- p = imallocx(usize_promoted, alignment, zero, try_tcache,
- arena);
+ if (usize <= SMALL_MAXCLASS) {
+ assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
+ sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
+ p = imallocx_maybe_flags(tsd, LARGE_MINCLASS, flags,
+ LARGE_MINCLASS, alignment, zero, tcache, arena);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
- } else
- p = imallocx(usize, alignment, zero, try_tcache, arena);
+ } else {
+ p = imallocx_maybe_flags(tsd, size, flags, usize, alignment,
+ zero, tcache, arena);
+ }
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache,
- arena_t *arena, prof_thr_cnt_t *cnt)
+imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
{
void *p;
+ size_t alignment;
+ bool zero;
+ tcache_t *tcache;
+ arena_t *arena;
+ prof_tctx_t *tctx;
- if ((uintptr_t)cnt != (uintptr_t)1U) {
- p = imallocx_prof_sample(usize, alignment, zero, try_tcache,
- arena, cnt);
+ if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
+ &zero, &tcache, &arena)))
+ return (NULL);
+ tctx = prof_alloc_prep(tsd, *usize, true);
+ if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
+ p = imallocx_maybe_flags(tsd, size, flags, *usize, alignment,
+ zero, tcache, arena);
+ } else if ((uintptr_t)tctx > (uintptr_t)1U) {
+ p = imallocx_prof_sample(tsd, size, flags, *usize, alignment,
+ zero, tcache, arena);
} else
- p = imallocx(usize, alignment, zero, try_tcache, arena);
- if (p == NULL)
+ p = NULL;
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
return (NULL);
- prof_malloc(p, usize, cnt);
+ }
+ prof_malloc(p, *usize, tctx);
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
return (p);
}
-void *
-je_mallocx(size_t size, int flags)
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
{
void *p;
- size_t usize;
- size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
- bool zero = flags & MALLOCX_ZERO;
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ size_t alignment;
+ bool zero;
+ tcache_t *tcache;
arena_t *arena;
- bool try_tcache;
- assert(size != 0);
+ if (likely(flags == 0)) {
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
+ *usize = s2u(size);
+ return (imalloc(tsd, size));
+ }
- if (malloc_init())
- goto label_oom;
+ if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
+ &alignment, &zero, &tcache, &arena)))
+ return (NULL);
+ p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+ return (p);
+}
- if (arena_ind != UINT_MAX) {
- arena = arenas[arena_ind];
- try_tcache = false;
- } else {
- arena = NULL;
- try_tcache = true;
- }
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
+je_mallocx(size_t size, int flags)
+{
+ tsd_t *tsd;
+ void *p;
+ size_t usize;
- usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- assert(usize != 0);
+ assert(size != 0);
- if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
+ if (unlikely(malloc_init()))
+ goto label_oom;
+ tsd = tsd_fetch();
- PROF_ALLOC_PREP(1, usize, cnt);
- p = imallocx_prof(usize, alignment, zero, try_tcache, arena,
- cnt);
- } else
- p = imallocx(usize, alignment, zero, try_tcache, arena);
- if (p == NULL)
+ if (config_prof && opt_prof)
+ p = imallocx_prof(tsd, size, flags, &usize);
+ else
+ p = imallocx_no_prof(tsd, size, flags, &usize);
+ if (unlikely(p == NULL))
goto label_oom;
if (config_stats) {
assert(usize == isalloc(p, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, p);
- JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
+ JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
return (p);
label_oom:
- if (config_xmalloc && opt_xmalloc) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
abort();
}
@@ -1489,47 +2080,49 @@ label_oom:
}
static void *
-irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize,
- bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena,
- prof_thr_cnt_t *cnt)
+irallocx_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
+ size_t alignment, size_t usize, bool zero, tcache_t *tcache, arena_t *arena,
+ prof_tctx_t *tctx)
{
void *p;
- if (cnt == NULL)
+ if (tctx == NULL)
return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
- size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero,
- try_tcache_alloc, try_tcache_dalloc, arena);
+ if (usize <= SMALL_MAXCLASS) {
+ p = iralloct(tsd, oldptr, old_usize, LARGE_MINCLASS, alignment,
+ zero, tcache, arena);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else {
- p = iralloct(oldptr, size, 0, alignment, zero,
- try_tcache_alloc, try_tcache_dalloc, arena);
+ p = iralloct(tsd, oldptr, old_usize, size, alignment, zero,
+ tcache, arena);
}
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
- size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
- arena_t *arena, prof_thr_cnt_t *cnt)
+irallocx_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
+ size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
+ arena_t *arena)
{
void *p;
- prof_ctx_t *old_ctx;
+ prof_tctx_t *old_tctx, *tctx;
- old_ctx = prof_ctx_get(oldptr);
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero,
- try_tcache_alloc, try_tcache_dalloc, arena, cnt);
- else {
- p = iralloct(oldptr, size, 0, alignment, zero,
- try_tcache_alloc, try_tcache_dalloc, arena);
+ old_tctx = prof_tctx_get(oldptr);
+ tctx = prof_alloc_prep(tsd, *usize, false);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
+ p = irallocx_prof_sample(tsd, oldptr, old_usize, size,
+ alignment, *usize, zero, tcache, arena, tctx);
+ } else {
+ p = iralloct(tsd, oldptr, old_usize, size, alignment, zero,
+ tcache, arena);
}
- if (p == NULL)
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, false);
return (NULL);
+ }
if (p == oldptr && alignment != 0) {
/*
@@ -1542,78 +2135,79 @@ irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
*/
*usize = isalloc(p, config_prof);
}
- prof_realloc(p, *usize, cnt, old_usize, old_ctx);
+ prof_realloc(tsd, p, *usize, tctx, false, old_usize, old_tctx);
return (p);
}
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
je_rallocx(void *ptr, size_t size, int flags)
{
void *p;
- size_t usize, old_usize;
+ tsd_t *tsd;
+ size_t usize;
+ size_t old_usize;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
+ size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO;
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- bool try_tcache_alloc, try_tcache_dalloc;
arena_t *arena;
+ tcache_t *tcache;
assert(ptr != NULL);
assert(size != 0);
- assert(malloc_initialized || IS_INITIALIZER);
+ assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
+ tsd = tsd_fetch();
- if (arena_ind != UINT_MAX) {
- arena_chunk_t *chunk;
- try_tcache_alloc = false;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- try_tcache_dalloc = (chunk == ptr || chunk->arena !=
- arenas[arena_ind]);
- arena = arenas[arena_ind];
- } else {
- try_tcache_alloc = true;
- try_tcache_dalloc = true;
+ if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
+ unsigned arena_ind = MALLOCX_ARENA_GET(flags);
+ arena = arena_get(tsd, arena_ind, true, true);
+ if (unlikely(arena == NULL))
+ goto label_oom;
+ } else
arena = NULL;
- }
- if ((config_prof && opt_prof) || config_stats ||
- (config_valgrind && opt_valgrind))
- old_usize = isalloc(ptr, config_prof);
- if (config_valgrind && opt_valgrind)
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ tcache = NULL;
+ else
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ } else
+ tcache = tcache_get(tsd, true);
+
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
-
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
assert(usize != 0);
- PROF_ALLOC_PREP(1, usize, cnt);
- p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero,
- try_tcache_alloc, try_tcache_dalloc, arena, cnt);
- if (p == NULL)
+ p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
+ zero, tcache, arena);
+ if (unlikely(p == NULL))
goto label_oom;
} else {
- p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc,
- try_tcache_dalloc, arena);
- if (p == NULL)
+ p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
+ tcache, arena);
+ if (unlikely(p == NULL))
goto label_oom;
- if (config_stats || (config_valgrind && opt_valgrind))
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
usize = isalloc(p, config_prof);
}
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
if (config_stats) {
- thread_allocated_t *ta;
- ta = thread_allocated_tsd_get();
- ta->allocated += usize;
- ta->deallocated += old_usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, p);
- JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_usize, old_rzsize, zero);
+ JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
+ old_rzsize, false, zero);
return (p);
label_oom:
- if (config_xmalloc && opt_xmalloc) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
abort();
}
@@ -1623,11 +2217,11 @@ label_oom:
JEMALLOC_ALWAYS_INLINE_C size_t
ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
- size_t alignment, bool zero, arena_t *arena)
+ size_t alignment, bool zero)
{
size_t usize;
- if (ixalloc(ptr, size, extra, alignment, zero))
+ if (ixalloc(ptr, old_usize, size, extra, alignment, zero))
return (old_usize);
usize = isalloc(ptr, config_prof);
@@ -1636,215 +2230,227 @@ ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
static size_t
ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
- size_t alignment, size_t max_usize, bool zero, arena_t *arena,
- prof_thr_cnt_t *cnt)
+ size_t alignment, size_t max_usize, bool zero, prof_tctx_t *tctx)
{
size_t usize;
- if (cnt == NULL)
+ if (tctx == NULL)
return (old_usize);
/* Use minimum usize to determine whether promotion may happen. */
- if (prof_promote && ((alignment == 0) ? s2u(size) : sa2u(size,
- alignment)) <= SMALL_MAXCLASS) {
- if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
- size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
- alignment, zero))
+ if (((alignment == 0) ? s2u(size) : sa2u(size, alignment)) <=
+ SMALL_MAXCLASS) {
+ if (ixalloc(ptr, old_usize, SMALL_MAXCLASS+1,
+ (SMALL_MAXCLASS+1 >= size+extra) ? 0 : size+extra -
+ (SMALL_MAXCLASS+1), alignment, zero))
return (old_usize);
usize = isalloc(ptr, config_prof);
- if (max_usize < PAGE)
+ if (max_usize < LARGE_MINCLASS)
arena_prof_promoted(ptr, usize);
} else {
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
- zero, arena);
+ zero);
}
return (usize);
}
JEMALLOC_ALWAYS_INLINE_C size_t
-ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
- size_t alignment, size_t max_usize, bool zero, arena_t *arena,
- prof_thr_cnt_t *cnt)
+ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
+ size_t extra, size_t alignment, bool zero)
{
- size_t usize;
- prof_ctx_t *old_ctx;
+ size_t max_usize, usize;
+ prof_tctx_t *old_tctx, *tctx;
- old_ctx = prof_ctx_get(ptr);
- if ((uintptr_t)cnt != (uintptr_t)1U) {
+ old_tctx = prof_tctx_get(ptr);
+ /*
+ * usize isn't knowable before ixalloc() returns when extra is non-zero.
+ * Therefore, compute its maximum possible value and use that in
+ * prof_alloc_prep() to decide whether to capture a backtrace.
+ * prof_realloc() will use the actual usize to decide whether to sample.
+ */
+ max_usize = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
+ alignment);
+ tctx = prof_alloc_prep(tsd, max_usize, false);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
- alignment, zero, max_usize, arena, cnt);
+ alignment, zero, max_usize, tctx);
} else {
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
- zero, arena);
+ zero);
}
- if (usize == old_usize)
+ if (unlikely(usize == old_usize)) {
+ prof_alloc_rollback(tsd, tctx, false);
return (usize);
- prof_realloc(ptr, usize, cnt, old_usize, old_ctx);
+ }
+ prof_realloc(tsd, ptr, usize, tctx, false, old_usize, old_tctx);
return (usize);
}
-size_t
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_xallocx(void *ptr, size_t size, size_t extra, int flags)
{
+ tsd_t *tsd;
size_t usize, old_usize;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
+ size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO;
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- arena_t *arena;
assert(ptr != NULL);
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
- assert(malloc_initialized || IS_INITIALIZER);
+ assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
-
- if (arena_ind != UINT_MAX)
- arena = arenas[arena_ind];
- else
- arena = NULL;
+ tsd = tsd_fetch();
old_usize = isalloc(ptr, config_prof);
- if (config_valgrind && opt_valgrind)
+ if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
- /*
- * usize isn't knowable before ixalloc() returns when extra is
- * non-zero. Therefore, compute its maximum possible value and
- * use that in PROF_ALLOC_PREP() to decide whether to capture a
- * backtrace. prof_realloc() will use the actual usize to
- * decide whether to sample.
- */
- size_t max_usize = (alignment == 0) ? s2u(size+extra) :
- sa2u(size+extra, alignment);
- PROF_ALLOC_PREP(1, max_usize, cnt);
- usize = ixallocx_prof(ptr, old_usize, size, extra, alignment,
- max_usize, zero, arena, cnt);
+ usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
+ alignment, zero);
} else {
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
- zero, arena);
+ zero);
}
- if (usize == old_usize)
+ if (unlikely(usize == old_usize))
goto label_not_resized;
if (config_stats) {
- thread_allocated_t *ta;
- ta = thread_allocated_tsd_get();
- ta->allocated += usize;
- ta->deallocated += old_usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
}
- JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero);
+ JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
+ old_rzsize, false, zero);
label_not_resized:
UTRACE(ptr, size, ptr);
return (usize);
}
-size_t
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+JEMALLOC_ATTR(pure)
je_sallocx(const void *ptr, int flags)
{
size_t usize;
- assert(malloc_initialized || IS_INITIALIZER);
+ assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
if (config_ivsalloc)
usize = ivsalloc(ptr, config_prof);
- else {
- assert(ptr != NULL);
+ else
usize = isalloc(ptr, config_prof);
- }
return (usize);
}
-void
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_dallocx(void *ptr, int flags)
{
- size_t usize;
- UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- bool try_tcache;
+ tsd_t *tsd;
+ tcache_t *tcache;
assert(ptr != NULL);
- assert(malloc_initialized || IS_INITIALIZER);
+ assert(malloc_initialized() || IS_INITIALIZER);
- if (arena_ind != UINT_MAX) {
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- try_tcache = (chunk == ptr || chunk->arena !=
- arenas[arena_ind]);
+ tsd = tsd_fetch();
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ tcache = NULL;
+ else
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
} else
- try_tcache = true;
+ tcache = tcache_get(tsd, false);
UTRACE(ptr, 0, 0);
- if (config_stats || config_valgrind)
- usize = isalloc(ptr, config_prof);
- if (config_prof && opt_prof) {
- if (config_stats == false && config_valgrind == false)
- usize = isalloc(ptr, config_prof);
- prof_free(ptr, usize);
- }
- if (config_stats)
- thread_allocated_tsd_get()->deallocated += usize;
- if (config_valgrind && opt_valgrind)
- rzsize = p2rz(ptr);
- iqalloct(ptr, try_tcache);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+ ifree(tsd_fetch(), ptr, tcache);
}
-size_t
-je_nallocx(size_t size, int flags)
+JEMALLOC_ALWAYS_INLINE_C size_t
+inallocx(size_t size, int flags)
+{
+ size_t usize;
+
+ if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
+ usize = s2u(size);
+ else
+ usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
+ assert(usize != 0);
+ return (usize);
+}
+
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_sdallocx(void *ptr, size_t size, int flags)
{
+ tsd_t *tsd;
+ tcache_t *tcache;
size_t usize;
- size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
+
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+ usize = inallocx(size, flags);
+ assert(usize == isalloc(ptr, config_prof));
+
+ tsd = tsd_fetch();
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ tcache = NULL;
+ else
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ } else
+ tcache = tcache_get(tsd, false);
+
+ UTRACE(ptr, 0, 0);
+ isfree(tsd, ptr, usize, tcache);
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+JEMALLOC_ATTR(pure)
+je_nallocx(size_t size, int flags)
+{
assert(size != 0);
- if (malloc_init())
+ if (unlikely(malloc_init()))
return (0);
- usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- assert(usize != 0);
- return (usize);
+ return (inallocx(size, flags));
}
-int
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
- if (malloc_init())
+ if (unlikely(malloc_init()))
return (EAGAIN);
return (ctl_byname(name, oldp, oldlenp, newp, newlen));
}
-int
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
{
- if (malloc_init())
+ if (unlikely(malloc_init()))
return (EAGAIN);
return (ctl_nametomib(name, mibp, miblenp));
}
-int
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
- if (malloc_init())
+ if (unlikely(malloc_init()))
return (EAGAIN);
return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
}
-void
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
@@ -1852,18 +2458,18 @@ je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
stats_print(write_cb, cbopaque, opts);
}
-size_t
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
{
size_t ret;
- assert(malloc_initialized || IS_INITIALIZER);
+ assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
if (config_ivsalloc)
ret = ivsalloc(ptr, config_prof);
else
- ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
+ ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
return (ret);
}
@@ -1873,9 +2479,17 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
*/
/******************************************************************************/
/*
- * Begin experimental functions.
+ * Begin compatibility functions.
*/
-#ifdef JEMALLOC_EXPERIMENTAL
+
+#define ALLOCM_LG_ALIGN(la) (la)
+#define ALLOCM_ALIGN(a) (ffsl(a)-1)
+#define ALLOCM_ZERO ((int)0x40)
+#define ALLOCM_NO_MOVE ((int)0x80)
+
+#define ALLOCM_SUCCESS 0
+#define ALLOCM_ERR_OOM 1
+#define ALLOCM_ERR_NOT_MOVED 2
int
je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
@@ -1952,9 +2566,17 @@ je_nallocm(size_t *rsize, size_t size, int flags)
return (ALLOCM_SUCCESS);
}
-#endif
+#undef ALLOCM_LG_ALIGN
+#undef ALLOCM_ALIGN
+#undef ALLOCM_ZERO
+#undef ALLOCM_NO_MOVE
+
+#undef ALLOCM_SUCCESS
+#undef ALLOCM_ERR_OOM
+#undef ALLOCM_ERR_NOT_MOVED
+
/*
- * End experimental functions.
+ * End compatibility functions.
*/
/******************************************************************************/
/*
@@ -1971,9 +2593,9 @@ je_nallocm(size_t *rsize, size_t size, int flags)
* fork/malloc races via the following functions it registers during
* initialization using pthread_atfork(), but of course that does no good if
* the allocator isn't fully initialized at fork time. The following library
- * constructor is a partial solution to this problem. It may still possible to
- * trigger the deadlock described above, but doing so would involve forking via
- * a library constructor that runs before jemalloc's runs.
+ * constructor is a partial solution to this problem. It may still be possible
+ * to trigger the deadlock described above, but doing so would involve forking
+ * via a library constructor that runs before jemalloc's runs.
*/
JEMALLOC_ATTR(constructor)
static void
@@ -1994,10 +2616,10 @@ _malloc_prefork(void)
unsigned i;
#ifdef JEMALLOC_MUTEX_INIT_CB
- if (malloc_initialized == false)
+ if (!malloc_initialized())
return;
#endif
- assert(malloc_initialized);
+ assert(malloc_initialized());
/* Acquire all mutexes in a safe order. */
ctl_prefork();
@@ -2009,7 +2631,6 @@ _malloc_prefork(void)
}
chunk_prefork();
base_prefork();
- huge_prefork();
}
#ifndef JEMALLOC_MUTEX_INIT_CB
@@ -2023,13 +2644,12 @@ _malloc_postfork(void)
unsigned i;
#ifdef JEMALLOC_MUTEX_INIT_CB
- if (malloc_initialized == false)
+ if (!malloc_initialized())
return;
#endif
- assert(malloc_initialized);
+ assert(malloc_initialized());
/* Release all mutexes, now that fork() has completed. */
- huge_postfork_parent();
base_postfork_parent();
chunk_postfork_parent();
for (i = 0; i < narenas_total; i++) {
@@ -2046,10 +2666,9 @@ jemalloc_postfork_child(void)
{
unsigned i;
- assert(malloc_initialized);
+ assert(malloc_initialized());
/* Release all mutexes, now that fork() has completed. */
- huge_postfork_child();
base_postfork_child();
chunk_postfork_child();
for (i = 0; i < narenas_total; i++) {
@@ -2069,55 +2688,3 @@ _malloc_first_thread(void)
}
/******************************************************************************/
-/*
- * The following functions are used for TLS allocation/deallocation in static
- * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
- * is that these avoid accessing TLS variables.
- */
-
-static void *
-a0alloc(size_t size, bool zero)
-{
-
- if (malloc_init())
- return (NULL);
-
- if (size == 0)
- size = 1;
-
- if (size <= arena_maxclass)
- return (arena_malloc(arenas[0], size, zero, false));
- else
- return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0])));
-}
-
-void *
-a0malloc(size_t size)
-{
-
- return (a0alloc(size, false));
-}
-
-void *
-a0calloc(size_t num, size_t size)
-{
-
- return (a0alloc(num * size, true));
-}
-
-void
-a0free(void *ptr)
-{
- arena_chunk_t *chunk;
-
- if (ptr == NULL)
- return;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk != ptr)
- arena_dalloc(chunk->arena, chunk, ptr, false);
- else
- huge_dalloc(ptr, true);
-}
-
-/******************************************************************************/
diff --git a/contrib/jemalloc/src/mutex.c b/contrib/jemalloc/src/mutex.c
index 0f90d05..934d5aa 100644
--- a/contrib/jemalloc/src/mutex.c
+++ b/contrib/jemalloc/src/mutex.c
@@ -74,8 +74,8 @@ _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
{
return (((int (*)(pthread_mutex_t *, void *(*)(size_t, size_t)))
- __libc_interposing[INTERPOS__pthread_mutex_init_calloc_cb])(
- mutex, calloc_cb));
+ __libc_interposing[INTERPOS__pthread_mutex_init_calloc_cb])(mutex,
+ calloc_cb));
}
#endif
@@ -84,9 +84,13 @@ malloc_mutex_init(malloc_mutex_t *mutex)
{
#ifdef _WIN32
+# if _WIN32_WINNT >= 0x0600
+ InitializeSRWLock(&mutex->lock);
+# else
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
_CRT_SPINCOUNT))
return (true);
+# endif
#elif (defined(JEMALLOC_OSSPIN))
mutex->lock = 0;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
@@ -94,8 +98,8 @@ malloc_mutex_init(malloc_mutex_t *mutex)
mutex->postponed_next = postponed_mutexes;
postponed_mutexes = mutex;
} else {
- if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) !=
- 0)
+ if (_pthread_mutex_init_calloc_cb(&mutex->lock,
+ bootstrap_calloc) != 0)
return (true);
}
#else
@@ -151,7 +155,7 @@ malloc_mutex_first_thread(void)
postpone_init = false;
while (postponed_mutexes != NULL) {
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
- base_calloc) != 0)
+ bootstrap_calloc) != 0)
return (true);
postponed_mutexes = postponed_mutexes->postponed_next;
}
diff --git a/contrib/jemalloc/src/pages.c b/contrib/jemalloc/src/pages.c
new file mode 100644
index 0000000..83a167f
--- /dev/null
+++ b/contrib/jemalloc/src/pages.c
@@ -0,0 +1,173 @@
+#define JEMALLOC_PAGES_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+
+void *
+pages_map(void *addr, size_t size)
+{
+ void *ret;
+
+ assert(size != 0);
+
+#ifdef _WIN32
+ /*
+ * If VirtualAlloc can't allocate at the given address when one is
+ * given, it fails and returns NULL.
+ */
+ ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
+ PAGE_READWRITE);
+#else
+ /*
+ * We don't use MAP_FIXED here, because it can cause the *replacement*
+ * of existing mappings, and we only want to create new mappings.
+ */
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
+ -1, 0);
+ assert(ret != NULL);
+
+ if (ret == MAP_FAILED)
+ ret = NULL;
+ else if (addr != NULL && ret != addr) {
+ /*
+ * We succeeded in mapping memory, but not in the right place.
+ */
+ pages_unmap(ret, size);
+ ret = NULL;
+ }
+#endif
+ assert(ret == NULL || (addr == NULL && ret != addr)
+ || (addr != NULL && ret == addr));
+ return (ret);
+}
+
+void
+pages_unmap(void *addr, size_t size)
+{
+
+#ifdef _WIN32
+ if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
+#else
+ if (munmap(addr, size) == -1)
+#endif
+ {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ malloc_printf("<jemalloc>: Error in "
+#ifdef _WIN32
+ "VirtualFree"
+#else
+ "munmap"
+#endif
+ "(): %s\n", buf);
+ if (opt_abort)
+ abort();
+ }
+}
+
+void *
+pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
+{
+ void *ret = (void *)((uintptr_t)addr + leadsize);
+
+ assert(alloc_size >= leadsize + size);
+#ifdef _WIN32
+ {
+ void *new_addr;
+
+ pages_unmap(addr, alloc_size);
+ new_addr = pages_map(ret, size);
+ if (new_addr == ret)
+ return (ret);
+ if (new_addr)
+ pages_unmap(new_addr, size);
+ return (NULL);
+ }
+#else
+ {
+ size_t trailsize = alloc_size - leadsize - size;
+
+ if (leadsize != 0)
+ pages_unmap(addr, leadsize);
+ if (trailsize != 0)
+ pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+ return (ret);
+ }
+#endif
+}
+
+static bool
+pages_commit_impl(void *addr, size_t size, bool commit)
+{
+
+#ifndef _WIN32
+ /*
+ * The following decommit/commit implementation is functional, but
+ * always disabled because it doesn't add value beyong improved
+ * debugging (at the cost of extra system calls) on systems that
+ * overcommit.
+ */
+ if (false) {
+ int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
+ void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
+ MAP_FIXED, -1, 0);
+ if (result == MAP_FAILED)
+ return (true);
+ if (result != addr) {
+ /*
+ * We succeeded in mapping memory, but not in the right
+ * place.
+ */
+ pages_unmap(result, size);
+ return (true);
+ }
+ return (false);
+ }
+#endif
+ return (true);
+}
+
+bool
+pages_commit(void *addr, size_t size)
+{
+
+ return (pages_commit_impl(addr, size, true));
+}
+
+bool
+pages_decommit(void *addr, size_t size)
+{
+
+ return (pages_commit_impl(addr, size, false));
+}
+
+bool
+pages_purge(void *addr, size_t size)
+{
+ bool unzeroed;
+
+#ifdef _WIN32
+ VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
+ unzeroed = true;
+#elif defined(JEMALLOC_HAVE_MADVISE)
+# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
+# define JEMALLOC_MADV_PURGE MADV_DONTNEED
+# define JEMALLOC_MADV_ZEROS true
+# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
+# define JEMALLOC_MADV_PURGE MADV_FREE
+# define JEMALLOC_MADV_ZEROS false
+# else
+# error "No madvise(2) flag defined for purging unused dirty pages."
+# endif
+ int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
+ unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
+# undef JEMALLOC_MADV_PURGE
+# undef JEMALLOC_MADV_ZEROS
+#else
+ /* Last resort no-op. */
+ unzeroed = true;
+#endif
+ return (unzeroed);
+}
+
diff --git a/contrib/jemalloc/src/prof.c b/contrib/jemalloc/src/prof.c
index 7722b7b..a05792f 100644
--- a/contrib/jemalloc/src/prof.c
+++ b/contrib/jemalloc/src/prof.c
@@ -14,14 +14,13 @@
/******************************************************************************/
/* Data. */
-malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL)
-
bool opt_prof = false;
bool opt_prof_active = true;
+bool opt_prof_thread_active_init = true;
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
-bool opt_prof_final = true;
+bool opt_prof_final = false;
bool opt_prof_leak = false;
bool opt_prof_accum = false;
char opt_prof_prefix[
@@ -31,25 +30,65 @@ char opt_prof_prefix[
#endif
1];
+/*
+ * Initialized as opt_prof_active, and accessed via
+ * prof_active_[gs]et{_unlocked,}().
+ */
+bool prof_active;
+static malloc_mutex_t prof_active_mtx;
+
+/*
+ * Initialized as opt_prof_thread_active_init, and accessed via
+ * prof_thread_active_init_[gs]et().
+ */
+static bool prof_thread_active_init;
+static malloc_mutex_t prof_thread_active_init_mtx;
+
+/*
+ * Initialized as opt_prof_gdump, and accessed via
+ * prof_gdump_[gs]et{_unlocked,}().
+ */
+bool prof_gdump_val;
+static malloc_mutex_t prof_gdump_mtx;
+
uint64_t prof_interval = 0;
-bool prof_promote;
+
+size_t lg_prof_sample;
/*
- * Table of mutexes that are shared among ctx's. These are leaf locks, so
- * there is no problem with using them for more than one ctx at the same time.
- * The primary motivation for this sharing though is that ctx's are ephemeral,
+ * Table of mutexes that are shared among gctx's. These are leaf locks, so
+ * there is no problem with using them for more than one gctx at the same time.
+ * The primary motivation for this sharing though is that gctx's are ephemeral,
* and destroying mutexes causes complications for systems that allocate when
* creating/destroying mutexes.
*/
-static malloc_mutex_t *ctx_locks;
-static unsigned cum_ctxs; /* Atomic counter. */
+static malloc_mutex_t *gctx_locks;
+static unsigned cum_gctxs; /* Atomic counter. */
+
+/*
+ * Table of mutexes that are shared among tdata's. No operations require
+ * holding multiple tdata locks, so there is no problem with using them for more
+ * than one tdata at the same time, even though a gctx lock may be acquired
+ * while holding a tdata lock.
+ */
+static malloc_mutex_t *tdata_locks;
/*
- * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data
+ * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
* structure that knows about all backtraces currently captured.
*/
-static ckh_t bt2ctx;
-static malloc_mutex_t bt2ctx_mtx;
+static ckh_t bt2gctx;
+static malloc_mutex_t bt2gctx_mtx;
+
+/*
+ * Tree of all extant prof_tdata_t structures, regardless of state,
+ * {attached,detached,expired}.
+ */
+static prof_tdata_tree_t tdatas;
+static malloc_mutex_t tdatas_mtx;
+
+static uint64_t next_thr_uid;
+static malloc_mutex_t next_thr_uid_mtx;
static malloc_mutex_t prof_dump_seq_mtx;
static uint64_t prof_dump_seq;
@@ -77,120 +116,203 @@ static int prof_dump_fd;
static bool prof_booted = false;
/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static bool prof_tctx_should_destroy(prof_tctx_t *tctx);
+static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
+static bool prof_tdata_should_destroy(prof_tdata_t *tdata,
+ bool even_if_attached);
+static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
+ bool even_if_attached);
+static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
+
+/******************************************************************************/
+/* Red-black trees. */
+
+JEMALLOC_INLINE_C int
+prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
+{
+ uint64_t a_thr_uid = a->thr_uid;
+ uint64_t b_thr_uid = b->thr_uid;
+ int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
+ if (ret == 0) {
+ uint64_t a_tctx_uid = a->tctx_uid;
+ uint64_t b_tctx_uid = b->tctx_uid;
+ ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < b_tctx_uid);
+ }
+ return (ret);
+}
+
+rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
+ tctx_link, prof_tctx_comp)
+
+JEMALLOC_INLINE_C int
+prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
+{
+ unsigned a_len = a->bt.len;
+ unsigned b_len = b->bt.len;
+ unsigned comp_len = (a_len < b_len) ? a_len : b_len;
+ int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
+ if (ret == 0)
+ ret = (a_len > b_len) - (a_len < b_len);
+ return (ret);
+}
+
+rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
+ prof_gctx_comp)
+
+JEMALLOC_INLINE_C int
+prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
+{
+ int ret;
+ uint64_t a_uid = a->thr_uid;
+ uint64_t b_uid = b->thr_uid;
+
+ ret = ((a_uid > b_uid) - (a_uid < b_uid));
+ if (ret == 0) {
+ uint64_t a_discrim = a->thr_discrim;
+ uint64_t b_discrim = b->thr_discrim;
+
+ ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
+ }
+ return (ret);
+}
+
+rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
+ prof_tdata_comp)
+
+/******************************************************************************/
void
-bt_init(prof_bt_t *bt, void **vec)
+prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
{
+ prof_tdata_t *tdata;
cassert(config_prof);
- bt->vec = vec;
- bt->len = 0;
+ if (updated) {
+ /*
+ * Compute a new sample threshold. This isn't very important in
+ * practice, because this function is rarely executed, so the
+ * potential for sample bias is minimal except in contrived
+ * programs.
+ */
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata != NULL)
+ prof_sample_threshold_update(tctx->tdata);
+ }
+
+ if ((uintptr_t)tctx > (uintptr_t)1U) {
+ malloc_mutex_lock(tctx->tdata->lock);
+ tctx->prepared = false;
+ if (prof_tctx_should_destroy(tctx))
+ prof_tctx_destroy(tsd, tctx);
+ else
+ malloc_mutex_unlock(tctx->tdata->lock);
+ }
}
-static void
-bt_destroy(prof_bt_t *bt)
+void
+prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
- cassert(config_prof);
+ prof_tctx_set(ptr, tctx);
- idalloc(bt);
+ malloc_mutex_lock(tctx->tdata->lock);
+ tctx->cnts.curobjs++;
+ tctx->cnts.curbytes += usize;
+ if (opt_prof_accum) {
+ tctx->cnts.accumobjs++;
+ tctx->cnts.accumbytes += usize;
+ }
+ tctx->prepared = false;
+ malloc_mutex_unlock(tctx->tdata->lock);
}
-static prof_bt_t *
-bt_dup(prof_bt_t *bt)
+void
+prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
- prof_bt_t *ret;
- cassert(config_prof);
+ malloc_mutex_lock(tctx->tdata->lock);
+ assert(tctx->cnts.curobjs > 0);
+ assert(tctx->cnts.curbytes >= usize);
+ tctx->cnts.curobjs--;
+ tctx->cnts.curbytes -= usize;
- /*
- * Create a single allocation that has space for vec immediately
- * following the prof_bt_t structure. The backtraces that get
- * stored in the backtrace caches are copied from stack-allocated
- * temporary variables, so size is known at creation time. Making this
- * a contiguous object improves cache locality.
- */
- ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) +
- (bt->len * sizeof(void *)));
- if (ret == NULL)
- return (NULL);
- ret->vec = (void **)((uintptr_t)ret +
- QUANTUM_CEILING(sizeof(prof_bt_t)));
- memcpy(ret->vec, bt->vec, bt->len * sizeof(void *));
- ret->len = bt->len;
+ if (prof_tctx_should_destroy(tctx))
+ prof_tctx_destroy(tsd, tctx);
+ else
+ malloc_mutex_unlock(tctx->tdata->lock);
+}
- return (ret);
+void
+bt_init(prof_bt_t *bt, void **vec)
+{
+
+ cassert(config_prof);
+
+ bt->vec = vec;
+ bt->len = 0;
}
-static inline void
-prof_enter(prof_tdata_t *prof_tdata)
+JEMALLOC_INLINE_C void
+prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
{
cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
- assert(prof_tdata->enq == false);
- prof_tdata->enq = true;
+ if (tdata != NULL) {
+ assert(!tdata->enq);
+ tdata->enq = true;
+ }
- malloc_mutex_lock(&bt2ctx_mtx);
+ malloc_mutex_lock(&bt2gctx_mtx);
}
-static inline void
-prof_leave(prof_tdata_t *prof_tdata)
+JEMALLOC_INLINE_C void
+prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
{
- bool idump, gdump;
cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
+
+ malloc_mutex_unlock(&bt2gctx_mtx);
- malloc_mutex_unlock(&bt2ctx_mtx);
+ if (tdata != NULL) {
+ bool idump, gdump;
- assert(prof_tdata->enq);
- prof_tdata->enq = false;
- idump = prof_tdata->enq_idump;
- prof_tdata->enq_idump = false;
- gdump = prof_tdata->enq_gdump;
- prof_tdata->enq_gdump = false;
+ assert(tdata->enq);
+ tdata->enq = false;
+ idump = tdata->enq_idump;
+ tdata->enq_idump = false;
+ gdump = tdata->enq_gdump;
+ tdata->enq_gdump = false;
- if (idump)
- prof_idump();
- if (gdump)
- prof_gdump();
+ if (idump)
+ prof_idump();
+ if (gdump)
+ prof_gdump();
+ }
}
#ifdef JEMALLOC_PROF_LIBUNWIND
void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
+prof_backtrace(prof_bt_t *bt)
{
- unw_context_t uc;
- unw_cursor_t cursor;
- unsigned i;
- int err;
+ int nframes;
cassert(config_prof);
assert(bt->len == 0);
assert(bt->vec != NULL);
- unw_getcontext(&uc);
- unw_init_local(&cursor, &uc);
-
- /* Throw away (nignore+1) stack frames, if that many exist. */
- for (i = 0; i < nignore + 1; i++) {
- err = unw_step(&cursor);
- if (err <= 0)
- return;
- }
-
- /*
- * Iterate over stack frames until there are no more, or until no space
- * remains in bt.
- */
- for (i = 0; i < PROF_BT_MAX; i++) {
- unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]);
- bt->len++;
- err = unw_step(&cursor);
- if (err <= 0)
- break;
- }
+ nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
+ if (nframes <= 0)
+ return;
+ bt->len = nframes;
}
#elif (defined(JEMALLOC_PROF_LIBGCC))
static _Unwind_Reason_Code
@@ -206,25 +328,25 @@ static _Unwind_Reason_Code
prof_unwind_callback(struct _Unwind_Context *context, void *arg)
{
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
+ void *ip;
cassert(config_prof);
- if (data->nignore > 0)
- data->nignore--;
- else {
- data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context);
- data->bt->len++;
- if (data->bt->len == data->max)
- return (_URC_END_OF_STACK);
- }
+ ip = (void *)_Unwind_GetIP(context);
+ if (ip == NULL)
+ return (_URC_END_OF_STACK);
+ data->bt->vec[data->bt->len] = ip;
+ data->bt->len++;
+ if (data->bt->len == data->max)
+ return (_URC_END_OF_STACK);
return (_URC_NO_REASON);
}
void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
+prof_backtrace(prof_bt_t *bt)
{
- prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX};
+ prof_unwind_data_t data = {bt, PROF_BT_MAX};
cassert(config_prof);
@@ -232,25 +354,22 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore)
}
#elif (defined(JEMALLOC_PROF_GCC))
void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
+prof_backtrace(prof_bt_t *bt)
{
#define BT_FRAME(i) \
- if ((i) < nignore + PROF_BT_MAX) { \
+ if ((i) < PROF_BT_MAX) { \
void *p; \
if (__builtin_frame_address(i) == 0) \
return; \
p = __builtin_return_address(i); \
if (p == NULL) \
return; \
- if (i >= nignore) { \
- bt->vec[(i) - nignore] = p; \
- bt->len = (i) - nignore + 1; \
- } \
+ bt->vec[(i)] = p; \
+ bt->len = (i) + 1; \
} else \
return;
cassert(config_prof);
- assert(nignore <= 3);
BT_FRAME(0)
BT_FRAME(1)
@@ -392,16 +511,11 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore)
BT_FRAME(125)
BT_FRAME(126)
BT_FRAME(127)
-
- /* Extras to compensate for nignore. */
- BT_FRAME(128)
- BT_FRAME(129)
- BT_FRAME(130)
#undef BT_FRAME
}
#else
void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
+prof_backtrace(prof_bt_t *bt)
{
cassert(config_prof);
@@ -410,256 +524,393 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore)
#endif
static malloc_mutex_t *
-prof_ctx_mutex_choose(void)
+prof_gctx_mutex_choose(void)
{
- unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
+ unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
- return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
+ return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
}
-static void
-prof_ctx_init(prof_ctx_t *ctx, prof_bt_t *bt)
+static malloc_mutex_t *
+prof_tdata_mutex_choose(uint64_t thr_uid)
{
- ctx->bt = bt;
- ctx->lock = prof_ctx_mutex_choose();
+ return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
+}
+
+static prof_gctx_t *
+prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
+{
+ /*
+ * Create a single allocation that has space for vec of length bt->len.
+ */
+ prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t,
+ vec) + (bt->len * sizeof(void *)), false, tcache_get(tsd, true),
+ true, NULL);
+ if (gctx == NULL)
+ return (NULL);
+ gctx->lock = prof_gctx_mutex_choose();
/*
* Set nlimbo to 1, in order to avoid a race condition with
- * prof_ctx_merge()/prof_ctx_destroy().
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
*/
- ctx->nlimbo = 1;
- ql_elm_new(ctx, dump_link);
- memset(&ctx->cnt_merged, 0, sizeof(prof_cnt_t));
- ql_new(&ctx->cnts_ql);
+ gctx->nlimbo = 1;
+ tctx_tree_new(&gctx->tctxs);
+ /* Duplicate bt. */
+ memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
+ gctx->bt.vec = gctx->vec;
+ gctx->bt.len = bt->len;
+ return (gctx);
}
static void
-prof_ctx_destroy(prof_ctx_t *ctx)
+prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
+ prof_tdata_t *tdata)
{
- prof_tdata_t *prof_tdata;
cassert(config_prof);
/*
- * Check that ctx is still unused by any thread cache before destroying
- * it. prof_lookup() increments ctx->nlimbo in order to avoid a race
- * condition with this function, as does prof_ctx_merge() in order to
- * avoid a race between the main body of prof_ctx_merge() and entry
+ * Check that gctx is still unused by any thread cache before destroying
+ * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
+ * condition with this function, as does prof_tctx_destroy() in order to
+ * avoid a race between the main body of prof_tctx_destroy() and entry
* into this function.
*/
- prof_tdata = prof_tdata_get(false);
- assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
- prof_enter(prof_tdata);
- malloc_mutex_lock(ctx->lock);
- if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 &&
- ctx->nlimbo == 1) {
- assert(ctx->cnt_merged.curbytes == 0);
- assert(ctx->cnt_merged.accumobjs == 0);
- assert(ctx->cnt_merged.accumbytes == 0);
- /* Remove ctx from bt2ctx. */
- if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
+ prof_enter(tsd, tdata_self);
+ malloc_mutex_lock(gctx->lock);
+ assert(gctx->nlimbo != 0);
+ if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
+ /* Remove gctx from bt2gctx. */
+ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
not_reached();
- prof_leave(prof_tdata);
- /* Destroy ctx. */
- malloc_mutex_unlock(ctx->lock);
- bt_destroy(ctx->bt);
- idalloc(ctx);
+ prof_leave(tsd, tdata_self);
+ /* Destroy gctx. */
+ malloc_mutex_unlock(gctx->lock);
+ idalloctm(tsd, gctx, tcache_get(tsd, false), true);
} else {
/*
- * Compensate for increment in prof_ctx_merge() or
+ * Compensate for increment in prof_tctx_destroy() or
* prof_lookup().
*/
- ctx->nlimbo--;
- malloc_mutex_unlock(ctx->lock);
- prof_leave(prof_tdata);
+ gctx->nlimbo--;
+ malloc_mutex_unlock(gctx->lock);
+ prof_leave(tsd, tdata_self);
}
}
-static void
-prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
+/* tctx->tdata->lock must be held. */
+static bool
+prof_tctx_should_destroy(prof_tctx_t *tctx)
{
- bool destroy;
- cassert(config_prof);
+ if (opt_prof_accum)
+ return (false);
+ if (tctx->cnts.curobjs != 0)
+ return (false);
+ if (tctx->prepared)
+ return (false);
+ return (true);
+}
+
+static bool
+prof_gctx_should_destroy(prof_gctx_t *gctx)
+{
+
+ if (opt_prof_accum)
+ return (false);
+ if (!tctx_tree_empty(&gctx->tctxs))
+ return (false);
+ if (gctx->nlimbo != 0)
+ return (false);
+ return (true);
+}
- /* Merge cnt stats and detach from ctx. */
- malloc_mutex_lock(ctx->lock);
- ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
- ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
- ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
- ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
- ql_remove(&ctx->cnts_ql, cnt, cnts_link);
- if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
- ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) {
+/* tctx->tdata->lock is held upon entry, and released before return. */
+static void
+prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
+{
+ prof_tdata_t *tdata = tctx->tdata;
+ prof_gctx_t *gctx = tctx->gctx;
+ bool destroy_tdata, destroy_tctx, destroy_gctx;
+
+ assert(tctx->cnts.curobjs == 0);
+ assert(tctx->cnts.curbytes == 0);
+ assert(!opt_prof_accum);
+ assert(tctx->cnts.accumobjs == 0);
+ assert(tctx->cnts.accumbytes == 0);
+
+ ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
+ destroy_tdata = prof_tdata_should_destroy(tdata, false);
+ malloc_mutex_unlock(tdata->lock);
+
+ malloc_mutex_lock(gctx->lock);
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ tctx_tree_remove(&gctx->tctxs, tctx);
+ destroy_tctx = true;
+ if (prof_gctx_should_destroy(gctx)) {
+ /*
+ * Increment gctx->nlimbo in order to keep another
+ * thread from winning the race to destroy gctx while
+ * this one has gctx->lock dropped. Without this, it
+ * would be possible for another thread to:
+ *
+ * 1) Sample an allocation associated with gctx.
+ * 2) Deallocate the sampled object.
+ * 3) Successfully prof_gctx_try_destroy(gctx).
+ *
+ * The result would be that gctx no longer exists by the
+ * time this thread accesses it in
+ * prof_gctx_try_destroy().
+ */
+ gctx->nlimbo++;
+ destroy_gctx = true;
+ } else
+ destroy_gctx = false;
+ break;
+ case prof_tctx_state_dumping:
/*
- * Increment ctx->nlimbo in order to keep another thread from
- * winning the race to destroy ctx while this one has ctx->lock
- * dropped. Without this, it would be possible for another
- * thread to:
- *
- * 1) Sample an allocation associated with ctx.
- * 2) Deallocate the sampled object.
- * 3) Successfully prof_ctx_destroy(ctx).
- *
- * The result would be that ctx no longer exists by the time
- * this thread accesses it in prof_ctx_destroy().
+ * A dumping thread needs tctx to remain valid until dumping
+ * has finished. Change state such that the dumping thread will
+ * complete destruction during a late dump iteration phase.
*/
- ctx->nlimbo++;
- destroy = true;
- } else
- destroy = false;
- malloc_mutex_unlock(ctx->lock);
- if (destroy)
- prof_ctx_destroy(ctx);
+ tctx->state = prof_tctx_state_purgatory;
+ destroy_tctx = false;
+ destroy_gctx = false;
+ break;
+ default:
+ not_reached();
+ destroy_tctx = false;
+ destroy_gctx = false;
+ }
+ malloc_mutex_unlock(gctx->lock);
+ if (destroy_gctx) {
+ prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
+ tdata);
+ }
+
+ if (destroy_tdata)
+ prof_tdata_destroy(tsd, tdata, false);
+
+ if (destroy_tctx)
+ idalloctm(tsd, tctx, tcache_get(tsd, false), true);
}
static bool
-prof_lookup_global(prof_bt_t *bt, prof_tdata_t *prof_tdata, void **p_btkey,
- prof_ctx_t **p_ctx, bool *p_new_ctx)
+prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
+ void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
{
union {
- prof_ctx_t *p;
+ prof_gctx_t *p;
void *v;
- } ctx;
+ } gctx;
union {
prof_bt_t *p;
void *v;
} btkey;
- bool new_ctx;
+ bool new_gctx;
- prof_enter(prof_tdata);
- if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
+ prof_enter(tsd, tdata);
+ if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
/* bt has never been seen before. Insert it. */
- ctx.v = imalloc(sizeof(prof_ctx_t));
- if (ctx.v == NULL) {
- prof_leave(prof_tdata);
- return (true);
- }
- btkey.p = bt_dup(bt);
- if (btkey.v == NULL) {
- prof_leave(prof_tdata);
- idalloc(ctx.v);
+ gctx.p = prof_gctx_create(tsd, bt);
+ if (gctx.v == NULL) {
+ prof_leave(tsd, tdata);
return (true);
}
- prof_ctx_init(ctx.p, btkey.p);
- if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
+ btkey.p = &gctx.p->bt;
+ if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
/* OOM. */
- prof_leave(prof_tdata);
- idalloc(btkey.v);
- idalloc(ctx.v);
+ prof_leave(tsd, tdata);
+ idalloctm(tsd, gctx.v, tcache_get(tsd, false), true);
return (true);
}
- new_ctx = true;
+ new_gctx = true;
} else {
/*
* Increment nlimbo, in order to avoid a race condition with
- * prof_ctx_merge()/prof_ctx_destroy().
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
*/
- malloc_mutex_lock(ctx.p->lock);
- ctx.p->nlimbo++;
- malloc_mutex_unlock(ctx.p->lock);
- new_ctx = false;
+ malloc_mutex_lock(gctx.p->lock);
+ gctx.p->nlimbo++;
+ malloc_mutex_unlock(gctx.p->lock);
+ new_gctx = false;
}
- prof_leave(prof_tdata);
+ prof_leave(tsd, tdata);
*p_btkey = btkey.v;
- *p_ctx = ctx.p;
- *p_new_ctx = new_ctx;
+ *p_gctx = gctx.p;
+ *p_new_gctx = new_gctx;
return (false);
}
-prof_thr_cnt_t *
-prof_lookup(prof_bt_t *bt)
+prof_tctx_t *
+prof_lookup(tsd_t *tsd, prof_bt_t *bt)
{
union {
- prof_thr_cnt_t *p;
+ prof_tctx_t *p;
void *v;
} ret;
- prof_tdata_t *prof_tdata;
+ prof_tdata_t *tdata;
+ bool not_found;
cassert(config_prof);
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
return (NULL);
- if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
+ malloc_mutex_lock(tdata->lock);
+ not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
+ if (!not_found) /* Note double negative! */
+ ret.p->prepared = true;
+ malloc_mutex_unlock(tdata->lock);
+ if (not_found) {
+ tcache_t *tcache;
void *btkey;
- prof_ctx_t *ctx;
- bool new_ctx;
+ prof_gctx_t *gctx;
+ bool new_gctx, error;
/*
* This thread's cache lacks bt. Look for it in the global
* cache.
*/
- if (prof_lookup_global(bt, prof_tdata, &btkey, &ctx, &new_ctx))
+ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
+ &new_gctx))
return (NULL);
- /* Link a prof_thd_cnt_t into ctx for this thread. */
- if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
- assert(ckh_count(&prof_tdata->bt2cnt) > 0);
- /*
- * Flush the least recently used cnt in order to keep
- * bt2cnt from becoming too large.
- */
- ret.p = ql_last(&prof_tdata->lru_ql, lru_link);
- assert(ret.v != NULL);
- if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
- NULL, NULL))
- not_reached();
- ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
- prof_ctx_merge(ret.p->ctx, ret.p);
- /* ret can now be re-used. */
- } else {
- assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX);
- /* Allocate and partially initialize a new cnt. */
- ret.v = imalloc(sizeof(prof_thr_cnt_t));
- if (ret.p == NULL) {
- if (new_ctx)
- prof_ctx_destroy(ctx);
- return (NULL);
- }
- ql_elm_new(ret.p, cnts_link);
- ql_elm_new(ret.p, lru_link);
+ /* Link a prof_tctx_t into gctx for this thread. */
+ tcache = tcache_get(tsd, true);
+ ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, tcache, true,
+ NULL);
+ if (ret.p == NULL) {
+ if (new_gctx)
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ return (NULL);
}
- /* Finish initializing ret. */
- ret.p->ctx = ctx;
- ret.p->epoch = 0;
+ ret.p->tdata = tdata;
+ ret.p->thr_uid = tdata->thr_uid;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
- if (ckh_insert(&prof_tdata->bt2cnt, btkey, ret.v)) {
- if (new_ctx)
- prof_ctx_destroy(ctx);
- idalloc(ret.v);
+ ret.p->gctx = gctx;
+ ret.p->tctx_uid = tdata->tctx_uid_next++;
+ ret.p->prepared = true;
+ ret.p->state = prof_tctx_state_initializing;
+ malloc_mutex_lock(tdata->lock);
+ error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
+ malloc_mutex_unlock(tdata->lock);
+ if (error) {
+ if (new_gctx)
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ idalloctm(tsd, ret.v, tcache, true);
return (NULL);
}
- ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
- malloc_mutex_lock(ctx->lock);
- ql_tail_insert(&ctx->cnts_ql, ret.p, cnts_link);
- ctx->nlimbo--;
- malloc_mutex_unlock(ctx->lock);
- } else {
- /* Move ret to the front of the LRU. */
- ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
- ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
+ malloc_mutex_lock(gctx->lock);
+ ret.p->state = prof_tctx_state_nominal;
+ tctx_tree_insert(&gctx->tctxs, ret.p);
+ gctx->nlimbo--;
+ malloc_mutex_unlock(gctx->lock);
}
return (ret.p);
}
+void
+prof_sample_threshold_update(prof_tdata_t *tdata)
+{
+ /*
+ * The body of this function is compiled out unless heap profiling is
+ * enabled, so that it is possible to compile jemalloc with floating
+ * point support completely disabled. Avoiding floating point code is
+ * important on memory-constrained systems, but it also enables a
+ * workaround for versions of glibc that don't properly save/restore
+ * floating point registers during dynamic lazy symbol loading (which
+ * internally calls into whatever malloc implementation happens to be
+ * integrated into the application). Note that some compilers (e.g.
+ * gcc 4.8) may use floating point registers for fast memory moves, so
+ * jemalloc must be compiled with such optimizations disabled (e.g.
+ * -mno-sse) in order for the workaround to be complete.
+ */
+#ifdef JEMALLOC_PROF
+ uint64_t r;
+ double u;
+
+ if (!config_prof)
+ return;
+
+ if (lg_prof_sample == 0) {
+ tdata->bytes_until_sample = 0;
+ return;
+ }
+
+ /*
+ * Compute sample interval as a geometrically distributed random
+ * variable with mean (2^lg_prof_sample).
+ *
+ * __ __
+ * | log(u) | 1
+ * tdata->bytes_until_sample = | -------- |, where p = ---------------
+ * | log(1-p) | lg_prof_sample
+ * 2
+ *
+ * For more information on the math, see:
+ *
+ * Non-Uniform Random Variate Generation
+ * Luc Devroye
+ * Springer-Verlag, New York, 1986
+ * pp 500
+ * (http://luc.devroye.org/rnbookindex.html)
+ */
+ prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005),
+ UINT64_C(1442695040888963407));
+ u = (double)r * (1.0/9007199254740992.0L);
+ tdata->bytes_until_sample = (uint64_t)(log(u) /
+ log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
+ + (uint64_t)1U;
+#endif
+}
+
+#ifdef JEMALLOC_JET
+static prof_tdata_t *
+prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+ size_t *tdata_count = (size_t *)arg;
+
+ (*tdata_count)++;
+
+ return (NULL);
+}
+
+size_t
+prof_tdata_count(void)
+{
+ size_t tdata_count = 0;
+
+ malloc_mutex_lock(&tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
+ (void *)&tdata_count);
+ malloc_mutex_unlock(&tdatas_mtx);
+
+ return (tdata_count);
+}
+#endif
+
#ifdef JEMALLOC_JET
size_t
prof_bt_count(void)
{
size_t bt_count;
- prof_tdata_t *prof_tdata;
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
return (0);
- prof_enter(prof_tdata);
- bt_count = ckh_count(&bt2ctx);
- prof_leave(prof_tdata);
+ malloc_mutex_lock(&bt2gctx_mtx);
+ bt_count = ckh_count(&bt2gctx);
+ malloc_mutex_unlock(&bt2gctx_mtx);
return (bt_count);
}
@@ -675,7 +926,7 @@ prof_dump_open(bool propagate_err, const char *filename)
int fd;
fd = creat(filename, 0644);
- if (fd == -1 && propagate_err == false) {
+ if (fd == -1 && !propagate_err) {
malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
filename);
if (opt_abort)
@@ -700,7 +951,7 @@ prof_dump_flush(bool propagate_err)
err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
if (err == -1) {
- if (propagate_err == false) {
+ if (!propagate_err) {
malloc_write("<jemalloc>: write() failed during heap "
"profile flush\n");
if (opt_abort)
@@ -756,7 +1007,7 @@ prof_dump_write(bool propagate_err, const char *s)
return (false);
}
-JEMALLOC_ATTR(format(printf, 2, 3))
+JEMALLOC_FORMAT_PRINTF(2, 3)
static bool
prof_dump_printf(bool propagate_err, const char *format, ...)
{
@@ -772,176 +1023,355 @@ prof_dump_printf(bool propagate_err, const char *format, ...)
return (ret);
}
+/* tctx->tdata->lock is held. */
static void
-prof_dump_ctx_prep(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx,
- prof_ctx_list_t *ctx_ql)
+prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
{
- prof_thr_cnt_t *thr_cnt;
- prof_cnt_t tcnt;
-
- cassert(config_prof);
-
- malloc_mutex_lock(ctx->lock);
-
- /*
- * Increment nlimbo so that ctx won't go away before dump.
- * Additionally, link ctx into the dump list so that it is included in
- * prof_dump()'s second pass.
- */
- ctx->nlimbo++;
- ql_tail_insert(ctx_ql, ctx, dump_link);
-
- memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
- ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
- volatile unsigned *epoch = &thr_cnt->epoch;
-
- while (true) {
- unsigned epoch0 = *epoch;
- /* Make sure epoch is even. */
- if (epoch0 & 1U)
- continue;
+ malloc_mutex_lock(tctx->gctx->lock);
- memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t));
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ malloc_mutex_unlock(tctx->gctx->lock);
+ return;
+ case prof_tctx_state_nominal:
+ tctx->state = prof_tctx_state_dumping;
+ malloc_mutex_unlock(tctx->gctx->lock);
- /* Terminate if epoch didn't change while reading. */
- if (*epoch == epoch0)
- break;
- }
+ memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
- ctx->cnt_summed.curobjs += tcnt.curobjs;
- ctx->cnt_summed.curbytes += tcnt.curbytes;
+ tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
if (opt_prof_accum) {
- ctx->cnt_summed.accumobjs += tcnt.accumobjs;
- ctx->cnt_summed.accumbytes += tcnt.accumbytes;
+ tdata->cnt_summed.accumobjs +=
+ tctx->dump_cnts.accumobjs;
+ tdata->cnt_summed.accumbytes +=
+ tctx->dump_cnts.accumbytes;
}
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ not_reached();
}
+}
- if (ctx->cnt_summed.curobjs != 0)
- (*leak_nctx)++;
+/* gctx->lock is held. */
+static void
+prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx)
+{
- /* Add to cnt_all. */
- cnt_all->curobjs += ctx->cnt_summed.curobjs;
- cnt_all->curbytes += ctx->cnt_summed.curbytes;
+ gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
if (opt_prof_accum) {
- cnt_all->accumobjs += ctx->cnt_summed.accumobjs;
- cnt_all->accumbytes += ctx->cnt_summed.accumbytes;
+ gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
+ gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
+ }
+}
+
+/* tctx->gctx is held. */
+static prof_tctx_t *
+prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
+{
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ prof_tctx_merge_gctx(tctx, tctx->gctx);
+ break;
+ default:
+ not_reached();
}
- malloc_mutex_unlock(ctx->lock);
+ return (NULL);
}
-static bool
-prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
+/* gctx->lock is held. */
+static prof_tctx_t *
+prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{
+ bool propagate_err = *(bool *)arg;
+
+ if (prof_dump_printf(propagate_err,
+ " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
+ tctx->thr_uid, tctx->dump_cnts.curobjs, tctx->dump_cnts.curbytes,
+ tctx->dump_cnts.accumobjs, tctx->dump_cnts.accumbytes))
+ return (tctx);
+ return (NULL);
+}
- if (opt_lg_prof_sample == 0) {
- if (prof_dump_printf(propagate_err,
- "heap profile: %"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @ heapprofile\n",
- cnt_all->curobjs, cnt_all->curbytes,
- cnt_all->accumobjs, cnt_all->accumbytes))
- return (true);
- } else {
- if (prof_dump_printf(propagate_err,
- "heap profile: %"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
- cnt_all->curobjs, cnt_all->curbytes,
- cnt_all->accumobjs, cnt_all->accumbytes,
- ((uint64_t)1U << opt_lg_prof_sample)))
- return (true);
+/* tctx->gctx is held. */
+static prof_tctx_t *
+prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
+{
+ prof_tctx_t *ret;
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ tctx->state = prof_tctx_state_nominal;
+ break;
+ case prof_tctx_state_purgatory:
+ ret = tctx;
+ goto label_return;
+ default:
+ not_reached();
}
- return (false);
+ ret = NULL;
+label_return:
+ return (ret);
}
static void
-prof_dump_ctx_cleanup_locked(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
+prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
+{
+
+ cassert(config_prof);
+
+ malloc_mutex_lock(gctx->lock);
+
+ /*
+ * Increment nlimbo so that gctx won't go away before dump.
+ * Additionally, link gctx into the dump list so that it is included in
+ * prof_dump()'s second pass.
+ */
+ gctx->nlimbo++;
+ gctx_tree_insert(gctxs, gctx);
+
+ memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
+
+ malloc_mutex_unlock(gctx->lock);
+}
+
+static prof_gctx_t *
+prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
{
+ size_t *leak_ngctx = (size_t *)arg;
+
+ malloc_mutex_lock(gctx->lock);
+ tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL);
+ if (gctx->cnt_summed.curobjs != 0)
+ (*leak_ngctx)++;
+ malloc_mutex_unlock(gctx->lock);
- ctx->nlimbo--;
- ql_remove(ctx_ql, ctx, dump_link);
+ return (NULL);
}
static void
-prof_dump_ctx_cleanup(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
+prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
+{
+ prof_tdata_t *tdata = prof_tdata_get(tsd, false);
+ prof_gctx_t *gctx;
+
+ /*
+ * Standard tree iteration won't work here, because as soon as we
+ * decrement gctx->nlimbo and unlock gctx, another thread can
+ * concurrently destroy it, which will corrupt the tree. Therefore,
+ * tear down the tree one node at a time during iteration.
+ */
+ while ((gctx = gctx_tree_first(gctxs)) != NULL) {
+ gctx_tree_remove(gctxs, gctx);
+ malloc_mutex_lock(gctx->lock);
+ {
+ prof_tctx_t *next;
+
+ next = NULL;
+ do {
+ prof_tctx_t *to_destroy =
+ tctx_tree_iter(&gctx->tctxs, next,
+ prof_tctx_finish_iter, NULL);
+ if (to_destroy != NULL) {
+ next = tctx_tree_next(&gctx->tctxs,
+ to_destroy);
+ tctx_tree_remove(&gctx->tctxs,
+ to_destroy);
+ idalloctm(tsd, to_destroy,
+ tcache_get(tsd, false), true);
+ } else
+ next = NULL;
+ } while (next != NULL);
+ }
+ gctx->nlimbo--;
+ if (prof_gctx_should_destroy(gctx)) {
+ gctx->nlimbo++;
+ malloc_mutex_unlock(gctx->lock);
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ } else
+ malloc_mutex_unlock(gctx->lock);
+ }
+}
+
+static prof_tdata_t *
+prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+ prof_cnt_t *cnt_all = (prof_cnt_t *)arg;
+
+ malloc_mutex_lock(tdata->lock);
+ if (!tdata->expired) {
+ size_t tabind;
+ union {
+ prof_tctx_t *p;
+ void *v;
+ } tctx;
+
+ tdata->dumping = true;
+ memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
+ for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
+ &tctx.v);)
+ prof_tctx_merge_tdata(tctx.p, tdata);
+
+ cnt_all->curobjs += tdata->cnt_summed.curobjs;
+ cnt_all->curbytes += tdata->cnt_summed.curbytes;
+ if (opt_prof_accum) {
+ cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
+ cnt_all->accumbytes += tdata->cnt_summed.accumbytes;
+ }
+ } else
+ tdata->dumping = false;
+ malloc_mutex_unlock(tdata->lock);
+
+ return (NULL);
+}
+
+static prof_tdata_t *
+prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+ bool propagate_err = *(bool *)arg;
+
+ if (!tdata->dumping)
+ return (NULL);
+
+ if (prof_dump_printf(propagate_err,
+ " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
+ tdata->thr_uid, tdata->cnt_summed.curobjs,
+ tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
+ tdata->cnt_summed.accumbytes,
+ (tdata->thread_name != NULL) ? " " : "",
+ (tdata->thread_name != NULL) ? tdata->thread_name : ""))
+ return (tdata);
+ return (NULL);
+}
+
+#ifdef JEMALLOC_JET
+#undef prof_dump_header
+#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
+#endif
+static bool
+prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
{
+ bool ret;
- malloc_mutex_lock(ctx->lock);
- prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
- malloc_mutex_unlock(ctx->lock);
+ if (prof_dump_printf(propagate_err,
+ "heap_v2/%"FMTu64"\n"
+ " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
+ ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
+ cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
+ return (true);
+
+ malloc_mutex_lock(&tdatas_mtx);
+ ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
+ (void *)&propagate_err) != NULL);
+ malloc_mutex_unlock(&tdatas_mtx);
+ return (ret);
}
+#ifdef JEMALLOC_JET
+#undef prof_dump_header
+#define prof_dump_header JEMALLOC_N(prof_dump_header)
+prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
+#endif
+/* gctx->lock is held. */
static bool
-prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, const prof_bt_t *bt,
- prof_ctx_list_t *ctx_ql)
+prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
+ prof_gctx_tree_t *gctxs)
{
bool ret;
unsigned i;
cassert(config_prof);
- /*
- * Current statistics can sum to 0 as a result of unmerged per thread
- * statistics. Additionally, interval- and growth-triggered dumps can
- * occur between the time a ctx is created and when its statistics are
- * filled in. Avoid dumping any ctx that is an artifact of either
- * implementation detail.
- */
- malloc_mutex_lock(ctx->lock);
- if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) ||
- (opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) {
- assert(ctx->cnt_summed.curobjs == 0);
- assert(ctx->cnt_summed.curbytes == 0);
- assert(ctx->cnt_summed.accumobjs == 0);
- assert(ctx->cnt_summed.accumbytes == 0);
+ /* Avoid dumping such gctx's that have no useful data. */
+ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
+ (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
+ assert(gctx->cnt_summed.curobjs == 0);
+ assert(gctx->cnt_summed.curbytes == 0);
+ assert(gctx->cnt_summed.accumobjs == 0);
+ assert(gctx->cnt_summed.accumbytes == 0);
ret = false;
goto label_return;
}
- if (prof_dump_printf(propagate_err, "%"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @",
- ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes,
- ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) {
+ if (prof_dump_printf(propagate_err, "@")) {
ret = true;
goto label_return;
}
-
for (i = 0; i < bt->len; i++) {
- if (prof_dump_printf(propagate_err, " %#"PRIxPTR,
+ if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
(uintptr_t)bt->vec[i])) {
ret = true;
goto label_return;
}
}
- if (prof_dump_write(propagate_err, "\n")) {
+ if (prof_dump_printf(propagate_err,
+ "\n"
+ " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
+ gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
+ gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
+ ret = true;
+ goto label_return;
+ }
+
+ if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
+ (void *)&propagate_err) != NULL) {
ret = true;
goto label_return;
}
ret = false;
label_return:
- prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
- malloc_mutex_unlock(ctx->lock);
return (ret);
}
+JEMALLOC_FORMAT_PRINTF(1, 2)
+static int
+prof_open_maps(const char *format, ...)
+{
+ int mfd;
+ va_list ap;
+ char filename[PATH_MAX + 1];
+
+ va_start(ap, format);
+ malloc_vsnprintf(filename, sizeof(filename), format, ap);
+ va_end(ap);
+ mfd = open(filename, O_RDONLY);
+
+ return (mfd);
+}
+
static bool
prof_dump_maps(bool propagate_err)
{
bool ret;
int mfd;
- char filename[PATH_MAX + 1];
cassert(config_prof);
#ifdef __FreeBSD__
- malloc_snprintf(filename, sizeof(filename), "/proc/curproc/map");
+ mfd = prof_open_maps("/proc/curproc/map");
#else
- malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
- (int)getpid());
+ {
+ int pid = getpid();
+
+ mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
+ if (mfd == -1)
+ mfd = prof_open_maps("/proc/%d/maps", pid);
+ }
#endif
- mfd = open(filename, O_RDONLY);
if (mfd != -1) {
ssize_t nread;
@@ -977,51 +1407,85 @@ label_return:
}
static void
-prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_nctx,
+prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
const char *filename)
{
if (cnt_all->curbytes != 0) {
- malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
- PRId64" object%s, %zu context%s\n",
+ malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %"
+ FMTu64" object%s, %zu context%s\n",
cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
- leak_nctx, (leak_nctx != 1) ? "s" : "");
+ leak_ngctx, (leak_ngctx != 1) ? "s" : "");
malloc_printf(
- "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
+ "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
filename);
}
}
+static prof_gctx_t *
+prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
+{
+ prof_gctx_t *ret;
+ bool propagate_err = *(bool *)arg;
+
+ malloc_mutex_lock(gctx->lock);
+
+ if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) {
+ ret = gctx;
+ goto label_return;
+ }
+
+ ret = NULL;
+label_return:
+ malloc_mutex_unlock(gctx->lock);
+ return (ret);
+}
+
static bool
-prof_dump(bool propagate_err, const char *filename, bool leakcheck)
+prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
{
- prof_tdata_t *prof_tdata;
+ prof_tdata_t *tdata;
prof_cnt_t cnt_all;
size_t tabind;
union {
- prof_ctx_t *p;
+ prof_gctx_t *p;
void *v;
- } ctx;
- size_t leak_nctx;
- prof_ctx_list_t ctx_ql;
+ } gctx;
+ size_t leak_ngctx;
+ prof_gctx_tree_t gctxs;
cassert(config_prof);
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
return (true);
malloc_mutex_lock(&prof_dump_mtx);
+ prof_enter(tsd, tdata);
- /* Merge per thread profile stats, and sum them in cnt_all. */
+ /*
+ * Put gctx's in limbo and clear their counters in preparation for
+ * summing.
+ */
+ gctx_tree_new(&gctxs);
+ for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
+ prof_dump_gctx_prep(gctx.p, &gctxs);
+
+ /*
+ * Iterate over tdatas, and for the non-expired ones snapshot their tctx
+ * stats and merge them into the associated gctx's.
+ */
memset(&cnt_all, 0, sizeof(prof_cnt_t));
- leak_nctx = 0;
- ql_new(&ctx_ql);
- prof_enter(prof_tdata);
- for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;)
- prof_dump_ctx_prep(ctx.p, &cnt_all, &leak_nctx, &ctx_ql);
- prof_leave(prof_tdata);
+ malloc_mutex_lock(&tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all);
+ malloc_mutex_unlock(&tdatas_mtx);
+
+ /* Merge tctx stats into gctx's. */
+ leak_ngctx = 0;
+ gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
+
+ prof_leave(tsd, tdata);
/* Create dump file. */
if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
@@ -1031,11 +1495,10 @@ prof_dump(bool propagate_err, const char *filename, bool leakcheck)
if (prof_dump_header(propagate_err, &cnt_all))
goto label_write_error;
- /* Dump per ctx profile stats. */
- while ((ctx.p = ql_first(&ctx_ql)) != NULL) {
- if (prof_dump_ctx(propagate_err, ctx.p, ctx.p->bt, &ctx_ql))
- goto label_write_error;
- }
+ /* Dump per gctx profile stats. */
+ if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
+ (void *)&propagate_err) != NULL)
+ goto label_write_error;
/* Dump /proc/<pid>/maps if possible. */
if (prof_dump_maps(propagate_err))
@@ -1044,17 +1507,17 @@ prof_dump(bool propagate_err, const char *filename, bool leakcheck)
if (prof_dump_close(propagate_err))
goto label_open_close_error;
+ prof_gctx_finish(tsd, &gctxs);
malloc_mutex_unlock(&prof_dump_mtx);
if (leakcheck)
- prof_leakcheck(&cnt_all, leak_nctx, filename);
+ prof_leakcheck(&cnt_all, leak_ngctx, filename);
return (false);
label_write_error:
prof_dump_close(propagate_err);
label_open_close_error:
- while ((ctx.p = ql_first(&ctx_ql)) != NULL)
- prof_dump_ctx_cleanup(ctx.p, &ctx_ql);
+ prof_gctx_finish(tsd, &gctxs);
malloc_mutex_unlock(&prof_dump_mtx);
return (true);
}
@@ -1062,7 +1525,7 @@ label_open_close_error:
#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
static void
-prof_dump_filename(char *filename, char v, int64_t vseq)
+prof_dump_filename(char *filename, char v, uint64_t vseq)
{
cassert(config_prof);
@@ -1070,12 +1533,12 @@ prof_dump_filename(char *filename, char v, int64_t vseq)
if (vseq != VSEQ_INVALID) {
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"PRIu64".%c%"PRId64".heap",
+ "%s.%d.%"FMTu64".%c%"FMTu64".heap",
opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
} else {
/* "<prefix>.<pid>.<seq>.<v>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"PRIu64".%c.heap",
+ "%s.%d.%"FMTu64".%c.heap",
opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
}
prof_dump_seq++;
@@ -1084,36 +1547,40 @@ prof_dump_filename(char *filename, char v, int64_t vseq)
static void
prof_fdump(void)
{
+ tsd_t *tsd;
char filename[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
+ assert(opt_prof_final);
+ assert(opt_prof_prefix[0] != '\0');
- if (prof_booted == false)
+ if (!prof_booted)
return;
+ tsd = tsd_fetch();
- if (opt_prof_final && opt_prof_prefix[0] != '\0') {
- malloc_mutex_lock(&prof_dump_seq_mtx);
- prof_dump_filename(filename, 'f', VSEQ_INVALID);
- malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(false, filename, opt_prof_leak);
- }
+ malloc_mutex_lock(&prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'f', VSEQ_INVALID);
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, opt_prof_leak);
}
void
prof_idump(void)
{
- prof_tdata_t *prof_tdata;
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
char filename[PATH_MAX + 1];
cassert(config_prof);
- if (prof_booted == false)
+ if (!prof_booted)
return;
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
return;
- if (prof_tdata->enq) {
- prof_tdata->enq_idump = true;
+ if (tdata->enq) {
+ tdata->enq_idump = true;
return;
}
@@ -1122,19 +1589,21 @@ prof_idump(void)
prof_dump_filename(filename, 'i', prof_dump_iseq);
prof_dump_iseq++;
malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(false, filename, false);
+ prof_dump(tsd, false, filename, false);
}
}
bool
prof_mdump(const char *filename)
{
+ tsd_t *tsd;
char filename_buf[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
- if (opt_prof == false || prof_booted == false)
+ if (!opt_prof || !prof_booted)
return (true);
+ tsd = tsd_fetch();
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
@@ -1146,24 +1615,26 @@ prof_mdump(const char *filename)
malloc_mutex_unlock(&prof_dump_seq_mtx);
filename = filename_buf;
}
- return (prof_dump(true, filename, false));
+ return (prof_dump(tsd, true, filename, false));
}
void
prof_gdump(void)
{
- prof_tdata_t *prof_tdata;
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
char filename[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
- if (prof_booted == false)
+ if (!prof_booted)
return;
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
return;
- if (prof_tdata->enq) {
- prof_tdata->enq_gdump = true;
+ if (tdata->enq) {
+ tdata->enq_gdump = true;
return;
}
@@ -1172,7 +1643,7 @@ prof_gdump(void)
prof_dump_filename(filename, 'u', prof_dump_useq);
prof_dump_useq++;
malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(false, filename, false);
+ prof_dump(tsd, false, filename, false);
}
}
@@ -1199,88 +1670,375 @@ prof_bt_keycomp(const void *k1, const void *k2)
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
}
-prof_tdata_t *
-prof_tdata_init(void)
+JEMALLOC_INLINE_C uint64_t
+prof_thr_uid_alloc(void)
{
- prof_tdata_t *prof_tdata;
+ uint64_t thr_uid;
+
+ malloc_mutex_lock(&next_thr_uid_mtx);
+ thr_uid = next_thr_uid;
+ next_thr_uid++;
+ malloc_mutex_unlock(&next_thr_uid_mtx);
+
+ return (thr_uid);
+}
+
+static prof_tdata_t *
+prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
+ char *thread_name, bool active)
+{
+ prof_tdata_t *tdata;
+ tcache_t *tcache;
cassert(config_prof);
/* Initialize an empty cache for this thread. */
- prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
- if (prof_tdata == NULL)
+ tcache = tcache_get(tsd, true);
+ tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false,
+ tcache, true, NULL);
+ if (tdata == NULL)
return (NULL);
- if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS,
+ tdata->lock = prof_tdata_mutex_choose(thr_uid);
+ tdata->thr_uid = thr_uid;
+ tdata->thr_discrim = thr_discrim;
+ tdata->thread_name = thread_name;
+ tdata->attached = true;
+ tdata->expired = false;
+ tdata->tctx_uid_next = 0;
+
+ if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
prof_bt_hash, prof_bt_keycomp)) {
- idalloc(prof_tdata);
+ idalloctm(tsd, tdata, tcache, true);
return (NULL);
}
- ql_new(&prof_tdata->lru_ql);
- prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX);
- if (prof_tdata->vec == NULL) {
- ckh_delete(&prof_tdata->bt2cnt);
- idalloc(prof_tdata);
- return (NULL);
- }
+ tdata->prng_state = (uint64_t)(uintptr_t)tdata;
+ prof_sample_threshold_update(tdata);
- prof_tdata->prng_state = 0;
- prof_tdata->threshold = 0;
- prof_tdata->accum = 0;
+ tdata->enq = false;
+ tdata->enq_idump = false;
+ tdata->enq_gdump = false;
- prof_tdata->enq = false;
- prof_tdata->enq_idump = false;
- prof_tdata->enq_gdump = false;
+ tdata->dumping = false;
+ tdata->active = active;
- prof_tdata_tsd_set(&prof_tdata);
+ malloc_mutex_lock(&tdatas_mtx);
+ tdata_tree_insert(&tdatas, tdata);
+ malloc_mutex_unlock(&tdatas_mtx);
- return (prof_tdata);
+ return (tdata);
}
-void
-prof_tdata_cleanup(void *arg)
+prof_tdata_t *
+prof_tdata_init(tsd_t *tsd)
{
- prof_thr_cnt_t *cnt;
- prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg;
- cassert(config_prof);
+ return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
+ prof_thread_active_init_get()));
+}
- if (prof_tdata == PROF_TDATA_STATE_REINCARNATED) {
- /*
- * Another destructor deallocated memory after this destructor
- * was called. Reset prof_tdata to PROF_TDATA_STATE_PURGATORY
- * in order to receive another callback.
- */
- prof_tdata = PROF_TDATA_STATE_PURGATORY;
- prof_tdata_tsd_set(&prof_tdata);
- } else if (prof_tdata == PROF_TDATA_STATE_PURGATORY) {
- /*
- * The previous time this destructor was called, we set the key
- * to PROF_TDATA_STATE_PURGATORY so that other destructors
- * wouldn't cause re-creation of the prof_tdata. This time, do
- * nothing, so that the destructor will not be called again.
- */
- } else if (prof_tdata != NULL) {
- /*
- * Delete the hash table. All of its contents can still be
- * iterated over via the LRU.
- */
- ckh_delete(&prof_tdata->bt2cnt);
+/* tdata->lock must be held. */
+static bool
+prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached)
+{
+
+ if (tdata->attached && !even_if_attached)
+ return (false);
+ if (ckh_count(&tdata->bt2tctx) != 0)
+ return (false);
+ return (true);
+}
+
+/* tdatas_mtx must be held. */
+static void
+prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
+ bool even_if_attached)
+{
+ tcache_t *tcache;
+
+ assert(prof_tdata_should_destroy(tdata, even_if_attached));
+ assert(tsd_prof_tdata_get(tsd) != tdata);
+
+ tdata_tree_remove(&tdatas, tdata);
+
+ tcache = tcache_get(tsd, false);
+ if (tdata->thread_name != NULL)
+ idalloctm(tsd, tdata->thread_name, tcache, true);
+ ckh_delete(tsd, &tdata->bt2tctx);
+ idalloctm(tsd, tdata, tcache, true);
+}
+
+static void
+prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
+{
+
+ malloc_mutex_lock(&tdatas_mtx);
+ prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
+ malloc_mutex_unlock(&tdatas_mtx);
+}
+
+static void
+prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
+{
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tdata->lock);
+ if (tdata->attached) {
+ destroy_tdata = prof_tdata_should_destroy(tdata, true);
/*
- * Iteratively merge cnt's into the global stats and delete
- * them.
+ * Only detach if !destroy_tdata, because detaching would allow
+ * another thread to win the race to destroy tdata.
*/
- while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
- ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
- prof_ctx_merge(cnt->ctx, cnt);
- idalloc(cnt);
- }
- idalloc(prof_tdata->vec);
- idalloc(prof_tdata);
- prof_tdata = PROF_TDATA_STATE_PURGATORY;
- prof_tdata_tsd_set(&prof_tdata);
+ if (!destroy_tdata)
+ tdata->attached = false;
+ tsd_prof_tdata_set(tsd, NULL);
+ } else
+ destroy_tdata = false;
+ malloc_mutex_unlock(tdata->lock);
+ if (destroy_tdata)
+ prof_tdata_destroy(tsd, tdata, true);
+}
+
+prof_tdata_t *
+prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
+{
+ uint64_t thr_uid = tdata->thr_uid;
+ uint64_t thr_discrim = tdata->thr_discrim + 1;
+ char *thread_name = (tdata->thread_name != NULL) ?
+ prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
+ bool active = tdata->active;
+
+ prof_tdata_detach(tsd, tdata);
+ return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
+ active));
+}
+
+static bool
+prof_tdata_expire(prof_tdata_t *tdata)
+{
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tdata->lock);
+ if (!tdata->expired) {
+ tdata->expired = true;
+ destroy_tdata = tdata->attached ? false :
+ prof_tdata_should_destroy(tdata, false);
+ } else
+ destroy_tdata = false;
+ malloc_mutex_unlock(tdata->lock);
+
+ return (destroy_tdata);
+}
+
+static prof_tdata_t *
+prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+
+ return (prof_tdata_expire(tdata) ? tdata : NULL);
+}
+
+void
+prof_reset(tsd_t *tsd, size_t lg_sample)
+{
+ prof_tdata_t *next;
+
+ assert(lg_sample < (sizeof(uint64_t) << 3));
+
+ malloc_mutex_lock(&prof_dump_mtx);
+ malloc_mutex_lock(&tdatas_mtx);
+
+ lg_prof_sample = lg_sample;
+
+ next = NULL;
+ do {
+ prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
+ prof_tdata_reset_iter, NULL);
+ if (to_destroy != NULL) {
+ next = tdata_tree_next(&tdatas, to_destroy);
+ prof_tdata_destroy_locked(tsd, to_destroy, false);
+ } else
+ next = NULL;
+ } while (next != NULL);
+
+ malloc_mutex_unlock(&tdatas_mtx);
+ malloc_mutex_unlock(&prof_dump_mtx);
+}
+
+void
+prof_tdata_cleanup(tsd_t *tsd)
+{
+ prof_tdata_t *tdata;
+
+ if (!config_prof)
+ return;
+
+ tdata = tsd_prof_tdata_get(tsd);
+ if (tdata != NULL)
+ prof_tdata_detach(tsd, tdata);
+}
+
+bool
+prof_active_get(void)
+{
+ bool prof_active_current;
+
+ malloc_mutex_lock(&prof_active_mtx);
+ prof_active_current = prof_active;
+ malloc_mutex_unlock(&prof_active_mtx);
+ return (prof_active_current);
+}
+
+bool
+prof_active_set(bool active)
+{
+ bool prof_active_old;
+
+ malloc_mutex_lock(&prof_active_mtx);
+ prof_active_old = prof_active;
+ prof_active = active;
+ malloc_mutex_unlock(&prof_active_mtx);
+ return (prof_active_old);
+}
+
+const char *
+prof_thread_name_get(void)
+{
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return ("");
+ return (tdata->thread_name != NULL ? tdata->thread_name : "");
+}
+
+static char *
+prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
+{
+ char *ret;
+ size_t size;
+
+ if (thread_name == NULL)
+ return (NULL);
+
+ size = strlen(thread_name) + 1;
+ if (size == 1)
+ return ("");
+
+ ret = iallocztm(tsd, size, false, tcache_get(tsd, true), true, NULL);
+ if (ret == NULL)
+ return (NULL);
+ memcpy(ret, thread_name, size);
+ return (ret);
+}
+
+int
+prof_thread_name_set(tsd_t *tsd, const char *thread_name)
+{
+ prof_tdata_t *tdata;
+ unsigned i;
+ char *s;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return (EAGAIN);
+
+ /* Validate input. */
+ if (thread_name == NULL)
+ return (EFAULT);
+ for (i = 0; thread_name[i] != '\0'; i++) {
+ char c = thread_name[i];
+ if (!isgraph(c) && !isblank(c))
+ return (EFAULT);
+ }
+
+ s = prof_thread_name_alloc(tsd, thread_name);
+ if (s == NULL)
+ return (EAGAIN);
+
+ if (tdata->thread_name != NULL) {
+ idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
+ true);
+ tdata->thread_name = NULL;
}
+ if (strlen(s) > 0)
+ tdata->thread_name = s;
+ return (0);
+}
+
+bool
+prof_thread_active_get(void)
+{
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return (false);
+ return (tdata->active);
+}
+
+bool
+prof_thread_active_set(bool active)
+{
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return (true);
+ tdata->active = active;
+ return (false);
+}
+
+bool
+prof_thread_active_init_get(void)
+{
+ bool active_init;
+
+ malloc_mutex_lock(&prof_thread_active_init_mtx);
+ active_init = prof_thread_active_init;
+ malloc_mutex_unlock(&prof_thread_active_init_mtx);
+ return (active_init);
+}
+
+bool
+prof_thread_active_init_set(bool active_init)
+{
+ bool active_init_old;
+
+ malloc_mutex_lock(&prof_thread_active_init_mtx);
+ active_init_old = prof_thread_active_init;
+ prof_thread_active_init = active_init;
+ malloc_mutex_unlock(&prof_thread_active_init_mtx);
+ return (active_init_old);
+}
+
+bool
+prof_gdump_get(void)
+{
+ bool prof_gdump_current;
+
+ malloc_mutex_lock(&prof_gdump_mtx);
+ prof_gdump_current = prof_gdump_val;
+ malloc_mutex_unlock(&prof_gdump_mtx);
+ return (prof_gdump_current);
+}
+
+bool
+prof_gdump_set(bool gdump)
+{
+ bool prof_gdump_old;
+
+ malloc_mutex_lock(&prof_gdump_mtx);
+ prof_gdump_old = prof_gdump_val;
+ prof_gdump_val = gdump;
+ malloc_mutex_unlock(&prof_gdump_mtx);
+ return (prof_gdump_old);
}
void
@@ -1300,11 +2058,11 @@ prof_boot1(void)
cassert(config_prof);
/*
- * opt_prof and prof_promote must be in their final state before any
- * arenas are initialized, so this function must be executed early.
+ * opt_prof must be in its final state before any arenas are
+ * initialized, so this function must be executed early.
*/
- if (opt_prof_leak && opt_prof == false) {
+ if (opt_prof_leak && !opt_prof) {
/*
* Enable opt_prof, but in such a way that profiles are never
* automatically dumped.
@@ -1317,8 +2075,6 @@ prof_boot1(void)
opt_lg_prof_interval);
}
}
-
- prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);
}
bool
@@ -1328,36 +2084,65 @@ prof_boot2(void)
cassert(config_prof);
if (opt_prof) {
+ tsd_t *tsd;
unsigned i;
- if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
+ lg_prof_sample = opt_lg_prof_sample;
+
+ prof_active = opt_prof_active;
+ if (malloc_mutex_init(&prof_active_mtx))
+ return (true);
+
+ prof_gdump_val = opt_prof_gdump;
+ if (malloc_mutex_init(&prof_gdump_mtx))
+ return (true);
+
+ prof_thread_active_init = opt_prof_thread_active_init;
+ if (malloc_mutex_init(&prof_thread_active_init_mtx))
+ return (true);
+
+ tsd = tsd_fetch();
+ if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp))
return (true);
- if (malloc_mutex_init(&bt2ctx_mtx))
+ if (malloc_mutex_init(&bt2gctx_mtx))
+ return (true);
+
+ tdata_tree_new(&tdatas);
+ if (malloc_mutex_init(&tdatas_mtx))
+ return (true);
+
+ next_thr_uid = 0;
+ if (malloc_mutex_init(&next_thr_uid_mtx))
return (true);
- if (prof_tdata_tsd_boot()) {
- malloc_write(
- "<jemalloc>: Error in pthread_key_create()\n");
- abort();
- }
if (malloc_mutex_init(&prof_dump_seq_mtx))
return (true);
if (malloc_mutex_init(&prof_dump_mtx))
return (true);
- if (atexit(prof_fdump) != 0) {
+ if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
+ atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
if (opt_abort)
abort();
}
- ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
+ gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
sizeof(malloc_mutex_t));
- if (ctx_locks == NULL)
+ if (gctx_locks == NULL)
return (true);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
- if (malloc_mutex_init(&ctx_locks[i]))
+ if (malloc_mutex_init(&gctx_locks[i]))
+ return (true);
+ }
+
+ tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS *
+ sizeof(malloc_mutex_t));
+ if (tdata_locks == NULL)
+ return (true);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
+ if (malloc_mutex_init(&tdata_locks[i]))
return (true);
}
}
@@ -1382,10 +2167,14 @@ prof_prefork(void)
if (opt_prof) {
unsigned i;
- malloc_mutex_prefork(&bt2ctx_mtx);
+ malloc_mutex_prefork(&tdatas_mtx);
+ malloc_mutex_prefork(&bt2gctx_mtx);
+ malloc_mutex_prefork(&next_thr_uid_mtx);
malloc_mutex_prefork(&prof_dump_seq_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_prefork(&ctx_locks[i]);
+ malloc_mutex_prefork(&gctx_locks[i]);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_prefork(&tdata_locks[i]);
}
}
@@ -1396,10 +2185,14 @@ prof_postfork_parent(void)
if (opt_prof) {
unsigned i;
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_postfork_parent(&tdata_locks[i]);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_postfork_parent(&ctx_locks[i]);
+ malloc_mutex_postfork_parent(&gctx_locks[i]);
malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
- malloc_mutex_postfork_parent(&bt2ctx_mtx);
+ malloc_mutex_postfork_parent(&next_thr_uid_mtx);
+ malloc_mutex_postfork_parent(&bt2gctx_mtx);
+ malloc_mutex_postfork_parent(&tdatas_mtx);
}
}
@@ -1410,10 +2203,14 @@ prof_postfork_child(void)
if (opt_prof) {
unsigned i;
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_postfork_child(&tdata_locks[i]);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_postfork_child(&ctx_locks[i]);
+ malloc_mutex_postfork_child(&gctx_locks[i]);
malloc_mutex_postfork_child(&prof_dump_seq_mtx);
- malloc_mutex_postfork_child(&bt2ctx_mtx);
+ malloc_mutex_postfork_child(&next_thr_uid_mtx);
+ malloc_mutex_postfork_child(&bt2gctx_mtx);
+ malloc_mutex_postfork_child(&tdatas_mtx);
}
}
diff --git a/contrib/jemalloc/src/quarantine.c b/contrib/jemalloc/src/quarantine.c
index 5431511..6c43dfc 100644
--- a/contrib/jemalloc/src/quarantine.c
+++ b/contrib/jemalloc/src/quarantine.c
@@ -2,7 +2,7 @@
#include "jemalloc/internal/jemalloc_internal.h"
/*
- * quarantine pointers close to NULL are used to encode state information that
+ * Quarantine pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1)
@@ -10,26 +10,25 @@
#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
/******************************************************************************/
-/* Data. */
-
-malloc_tsd_data(, quarantine, quarantine_t *, NULL)
-
-/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static quarantine_t *quarantine_grow(quarantine_t *quarantine);
-static void quarantine_drain_one(quarantine_t *quarantine);
-static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
+static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
+static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine);
+static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine,
+ size_t upper_bound);
/******************************************************************************/
-quarantine_t *
-quarantine_init(size_t lg_maxobjs)
+static quarantine_t *
+quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
{
quarantine_t *quarantine;
- quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
- ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)));
+ assert(tsd_nominal(tsd));
+
+ quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs)
+ + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false,
+ tcache_get(tsd, true), true, NULL);
if (quarantine == NULL)
return (NULL);
quarantine->curbytes = 0;
@@ -37,19 +36,36 @@ quarantine_init(size_t lg_maxobjs)
quarantine->first = 0;
quarantine->lg_maxobjs = lg_maxobjs;
- quarantine_tsd_set(&quarantine);
-
return (quarantine);
}
+void
+quarantine_alloc_hook_work(tsd_t *tsd)
+{
+ quarantine_t *quarantine;
+
+ if (!tsd_nominal(tsd))
+ return;
+
+ quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT);
+ /*
+ * Check again whether quarantine has been initialized, because
+ * quarantine_init() may have triggered recursive initialization.
+ */
+ if (tsd_quarantine_get(tsd) == NULL)
+ tsd_quarantine_set(tsd, quarantine);
+ else
+ idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
+}
+
static quarantine_t *
-quarantine_grow(quarantine_t *quarantine)
+quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
{
quarantine_t *ret;
- ret = quarantine_init(quarantine->lg_maxobjs + 1);
+ ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1);
if (ret == NULL) {
- quarantine_drain_one(quarantine);
+ quarantine_drain_one(tsd, quarantine);
return (quarantine);
}
@@ -71,17 +87,18 @@ quarantine_grow(quarantine_t *quarantine)
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
- idalloc(quarantine);
+ idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
+ tsd_quarantine_set(tsd, ret);
return (ret);
}
static void
-quarantine_drain_one(quarantine_t *quarantine)
+quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
{
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
assert(obj->usize == isalloc(obj->ptr, config_prof));
- idalloc(obj->ptr);
+ idalloctm(tsd, obj->ptr, NULL, false);
quarantine->curbytes -= obj->usize;
quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
@@ -89,15 +106,15 @@ quarantine_drain_one(quarantine_t *quarantine)
}
static void
-quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
+quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound)
{
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
- quarantine_drain_one(quarantine);
+ quarantine_drain_one(tsd, quarantine);
}
void
-quarantine(void *ptr)
+quarantine(tsd_t *tsd, void *ptr)
{
quarantine_t *quarantine;
size_t usize = isalloc(ptr, config_prof);
@@ -105,17 +122,8 @@ quarantine(void *ptr)
cassert(config_fill);
assert(opt_quarantine);
- quarantine = *quarantine_tsd_get();
- if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
- if (quarantine == QUARANTINE_STATE_PURGATORY) {
- /*
- * Make a note that quarantine() was called after
- * quarantine_cleanup() was called.
- */
- quarantine = QUARANTINE_STATE_REINCARNATED;
- quarantine_tsd_set(&quarantine);
- }
- idalloc(ptr);
+ if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
+ idalloctm(tsd, ptr, NULL, false);
return;
}
/*
@@ -125,11 +133,11 @@ quarantine(void *ptr)
if (quarantine->curbytes + usize > opt_quarantine) {
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- usize : 0;
- quarantine_drain(quarantine, upper_bound);
+ quarantine_drain(tsd, quarantine, upper_bound);
}
/* Grow the quarantine ring buffer if it's full. */
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
- quarantine = quarantine_grow(quarantine);
+ quarantine = quarantine_grow(tsd, quarantine);
/* quarantine_grow() must free a slot if it fails to grow. */
assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
/* Append ptr if its size doesn't exceed the quarantine size. */
@@ -141,12 +149,12 @@ quarantine(void *ptr)
obj->usize = usize;
quarantine->curbytes += usize;
quarantine->curobjs++;
- if (config_fill && opt_junk) {
+ if (config_fill && unlikely(opt_junk_free)) {
/*
* Only do redzone validation if Valgrind isn't in
* operation.
*/
- if ((config_valgrind == false || opt_valgrind == false)
+ if ((!config_valgrind || likely(!in_valgrind))
&& usize <= SMALL_MAXCLASS)
arena_quarantine_junk_small(ptr, usize);
else
@@ -154,46 +162,22 @@ quarantine(void *ptr)
}
} else {
assert(quarantine->curbytes == 0);
- idalloc(ptr);
+ idalloctm(tsd, ptr, NULL, false);
}
}
void
-quarantine_cleanup(void *arg)
-{
- quarantine_t *quarantine = *(quarantine_t **)arg;
-
- if (quarantine == QUARANTINE_STATE_REINCARNATED) {
- /*
- * Another destructor deallocated memory after this destructor
- * was called. Reset quarantine to QUARANTINE_STATE_PURGATORY
- * in order to receive another callback.
- */
- quarantine = QUARANTINE_STATE_PURGATORY;
- quarantine_tsd_set(&quarantine);
- } else if (quarantine == QUARANTINE_STATE_PURGATORY) {
- /*
- * The previous time this destructor was called, we set the key
- * to QUARANTINE_STATE_PURGATORY so that other destructors
- * wouldn't cause re-creation of the quarantine. This time, do
- * nothing, so that the destructor will not be called again.
- */
- } else if (quarantine != NULL) {
- quarantine_drain(quarantine, 0);
- idalloc(quarantine);
- quarantine = QUARANTINE_STATE_PURGATORY;
- quarantine_tsd_set(&quarantine);
- }
-}
-
-bool
-quarantine_boot(void)
+quarantine_cleanup(tsd_t *tsd)
{
+ quarantine_t *quarantine;
- cassert(config_fill);
-
- if (quarantine_tsd_boot())
- return (true);
+ if (!config_fill)
+ return;
- return (false);
+ quarantine = tsd_quarantine_get(tsd);
+ if (quarantine != NULL) {
+ quarantine_drain(tsd, quarantine, 0);
+ idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
+ tsd_quarantine_set(tsd, NULL);
+ }
}
diff --git a/contrib/jemalloc/src/rtree.c b/contrib/jemalloc/src/rtree.c
index 205957a..af0d97e 100644
--- a/contrib/jemalloc/src/rtree.c
+++ b/contrib/jemalloc/src/rtree.c
@@ -1,73 +1,74 @@
#define JEMALLOC_RTREE_C_
#include "jemalloc/internal/jemalloc_internal.h"
-rtree_t *
-rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc)
+static unsigned
+hmin(unsigned ha, unsigned hb)
{
- rtree_t *ret;
- unsigned bits_per_level, bits_in_leaf, height, i;
+
+ return (ha < hb ? ha : hb);
+}
+
+/* Only the most significant bits of keys passed to rtree_[gs]et() are used. */
+bool
+rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
+ rtree_node_dalloc_t *dalloc)
+{
+ unsigned bits_in_leaf, height, i;
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
- bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
- bits_in_leaf = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1;
+ bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
+ : (bits % RTREE_BITS_PER_LEVEL);
if (bits > bits_in_leaf) {
- height = 1 + (bits - bits_in_leaf) / bits_per_level;
- if ((height-1) * bits_per_level + bits_in_leaf != bits)
+ height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL;
+ if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits)
height++;
- } else {
+ } else
height = 1;
+ assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits);
+
+ rtree->alloc = alloc;
+ rtree->dalloc = dalloc;
+ rtree->height = height;
+
+ /* Root level. */
+ rtree->levels[0].subtree = NULL;
+ rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL :
+ bits_in_leaf;
+ rtree->levels[0].cumbits = rtree->levels[0].bits;
+ /* Interior levels. */
+ for (i = 1; i < height-1; i++) {
+ rtree->levels[i].subtree = NULL;
+ rtree->levels[i].bits = RTREE_BITS_PER_LEVEL;
+ rtree->levels[i].cumbits = rtree->levels[i-1].cumbits +
+ RTREE_BITS_PER_LEVEL;
}
- assert((height-1) * bits_per_level + bits_in_leaf >= bits);
-
- ret = (rtree_t*)alloc(offsetof(rtree_t, level2bits) +
- (sizeof(unsigned) * height));
- if (ret == NULL)
- return (NULL);
- memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
- height));
-
- ret->alloc = alloc;
- ret->dalloc = dalloc;
- if (malloc_mutex_init(&ret->mutex)) {
- if (dalloc != NULL)
- dalloc(ret);
- return (NULL);
- }
- ret->height = height;
+ /* Leaf level. */
if (height > 1) {
- if ((height-1) * bits_per_level + bits_in_leaf > bits) {
- ret->level2bits[0] = (bits - bits_in_leaf) %
- bits_per_level;
- } else
- ret->level2bits[0] = bits_per_level;
- for (i = 1; i < height-1; i++)
- ret->level2bits[i] = bits_per_level;
- ret->level2bits[height-1] = bits_in_leaf;
- } else
- ret->level2bits[0] = bits;
+ rtree->levels[height-1].subtree = NULL;
+ rtree->levels[height-1].bits = bits_in_leaf;
+ rtree->levels[height-1].cumbits = bits;
+ }
- ret->root = (void**)alloc(sizeof(void *) << ret->level2bits[0]);
- if (ret->root == NULL) {
- if (dalloc != NULL)
- dalloc(ret);
- return (NULL);
+ /* Compute lookup table to be used by rtree_start_level(). */
+ for (i = 0; i < RTREE_HEIGHT_MAX; i++) {
+ rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height -
+ 1);
}
- memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
- return (ret);
+ return (false);
}
static void
-rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
+rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level)
{
- if (level < rtree->height - 1) {
+ if (level + 1 < rtree->height) {
size_t nchildren, i;
- nchildren = ZU(1) << rtree->level2bits[level];
+ nchildren = ZU(1) << rtree->levels[level].bits;
for (i = 0; i < nchildren; i++) {
- void **child = (void **)node[i];
+ rtree_node_elm_t *child = node[i].child;
if (child != NULL)
rtree_delete_subtree(rtree, child, level + 1);
}
@@ -78,28 +79,49 @@ rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
void
rtree_delete(rtree_t *rtree)
{
+ unsigned i;
- rtree_delete_subtree(rtree, rtree->root, 0);
- rtree->dalloc(rtree);
+ for (i = 0; i < rtree->height; i++) {
+ rtree_node_elm_t *subtree = rtree->levels[i].subtree;
+ if (subtree != NULL)
+ rtree_delete_subtree(rtree, subtree, i);
+ }
}
-void
-rtree_prefork(rtree_t *rtree)
+static rtree_node_elm_t *
+rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
{
+ rtree_node_elm_t *node;
+
+ if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
+ /*
+ * Another thread is already in the process of initializing.
+ * Spin-wait until initialization is complete.
+ */
+ do {
+ CPU_SPINWAIT;
+ node = atomic_read_p((void **)elmp);
+ } while (node == RTREE_NODE_INITIALIZING);
+ } else {
+ node = rtree->alloc(ZU(1) << rtree->levels[level].bits);
+ if (node == NULL)
+ return (NULL);
+ atomic_write_p((void **)elmp, node);
+ }
- malloc_mutex_prefork(&rtree->mutex);
+ return (node);
}
-void
-rtree_postfork_parent(rtree_t *rtree)
+rtree_node_elm_t *
+rtree_subtree_read_hard(rtree_t *rtree, unsigned level)
{
- malloc_mutex_postfork_parent(&rtree->mutex);
+ return (rtree_node_init(rtree, level, &rtree->levels[level].subtree));
}
-void
-rtree_postfork_child(rtree_t *rtree)
+rtree_node_elm_t *
+rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
{
- malloc_mutex_postfork_child(&rtree->mutex);
+ return (rtree_node_init(rtree, level, &elm->child));
}
diff --git a/contrib/jemalloc/src/stats.c b/contrib/jemalloc/src/stats.c
index bef2ab3..154c3e7 100644
--- a/contrib/jemalloc/src/stats.c
+++ b/contrib/jemalloc/src/stats.c
@@ -6,31 +6,22 @@
xmallctl(n, v, &sz, NULL, 0); \
} while (0)
-#define CTL_I_GET(n, v, t) do { \
+#define CTL_M2_GET(n, i, v, t) do { \
size_t mib[6]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
- mib[2] = i; \
+ mib[2] = (i); \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
-#define CTL_J_GET(n, v, t) do { \
+#define CTL_M2_M4_GET(n, i, j, v, t) do { \
size_t mib[6]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
- mib[2] = j; \
- xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
-} while (0)
-
-#define CTL_IJ_GET(n, v, t) do { \
- size_t mib[6]; \
- size_t miblen = sizeof(mib) / sizeof(size_t); \
- size_t sz = sizeof(t); \
- xmallctlnametomib(n, mib, &miblen); \
- mib[2] = i; \
- mib[4] = j; \
+ mib[2] = (i); \
+ mib[4] = (j); \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
@@ -48,8 +39,10 @@ static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
+static void stats_arena_hchunks_print(
+ void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *),
- void *cbopaque, unsigned i, bool bins, bool large);
+ void *cbopaque, unsigned i, bool bins, bool large, bool huge);
/******************************************************************************/
@@ -58,100 +51,109 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
{
size_t page;
- bool config_tcache;
- unsigned nbins, j, gap_start;
+ bool config_tcache, in_gap;
+ unsigned nbins, j;
CTL_GET("arenas.page", &page, size_t);
CTL_GET("config.tcache", &config_tcache, bool);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
- "bins: bin size regs pgs allocated nmalloc"
- " ndalloc nrequests nfills nflushes"
- " newruns reruns curruns\n");
+ "bins: size ind allocated nmalloc"
+ " ndalloc nrequests curregs curruns regs"
+ " pgs util nfills nflushes newruns"
+ " reruns\n");
} else {
malloc_cprintf(write_cb, cbopaque,
- "bins: bin size regs pgs allocated nmalloc"
- " ndalloc newruns reruns curruns\n");
+ "bins: size ind allocated nmalloc"
+ " ndalloc nrequests curregs curruns regs"
+ " pgs util newruns reruns\n");
}
CTL_GET("arenas.nbins", &nbins, unsigned);
- for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
+ for (j = 0, in_gap = false; j < nbins; j++) {
uint64_t nruns;
- CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
- if (nruns == 0) {
- if (gap_start == UINT_MAX)
- gap_start = j;
- } else {
- size_t reg_size, run_size, allocated;
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns,
+ uint64_t);
+ if (nruns == 0)
+ in_gap = true;
+ else {
+ size_t reg_size, run_size, curregs, availregs, milli;
+ size_t curruns;
uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t reruns;
- size_t curruns;
+ char util[6]; /* "x.yyy". */
- if (gap_start != UINT_MAX) {
- if (j > gap_start + 1) {
- /* Gap of more than one size class. */
- malloc_cprintf(write_cb, cbopaque,
- "[%u..%u]\n", gap_start,
- j - 1);
- } else {
- /* Gap of one size class. */
- malloc_cprintf(write_cb, cbopaque,
- "[%u]\n", gap_start);
- }
- gap_start = UINT_MAX;
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ in_gap = false;
}
- CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
- CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
- CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.allocated",
- &allocated, size_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc",
+ CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
+ CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
+ CTL_M2_GET("arenas.bin.0.run_size", j, &run_size,
+ size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j,
&nmalloc, uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc",
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j,
&ndalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j,
+ &curregs, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
+ &nrequests, uint64_t);
if (config_tcache) {
- CTL_IJ_GET("stats.arenas.0.bins.0.nrequests",
- &nrequests, uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.nfills",
- &nfills, uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.nflushes",
- &nflushes, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i,
+ j, &nfills, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes",
+ i, j, &nflushes, uint64_t);
}
- CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
- uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
- size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j,
+ &reruns, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j,
+ &curruns, size_t);
+
+ availregs = nregs * curruns;
+ milli = (availregs != 0) ? (1000 * curregs) / availregs
+ : 1000;
+ assert(milli <= 1000);
+ if (milli < 10) {
+ malloc_snprintf(util, sizeof(util),
+ "0.00%zu", milli);
+ } else if (milli < 100) {
+ malloc_snprintf(util, sizeof(util), "0.0%zu",
+ milli);
+ } else if (milli < 1000) {
+ malloc_snprintf(util, sizeof(util), "0.%zu",
+ milli);
+ } else
+ malloc_snprintf(util, sizeof(util), "1");
+
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
- "%13u %5zu %4u %3zu %12zu %12"PRIu64
- " %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu\n",
- j, reg_size, nregs, run_size / page,
- allocated, nmalloc, ndalloc, nrequests,
- nfills, nflushes, nruns, reruns, curruns);
+ "%20zu %3u %12zu %12"FMTu64
+ " %12"FMTu64" %12"FMTu64" %12zu"
+ " %12zu %4u %3zu %-5s %12"FMTu64
+ " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n",
+ reg_size, j, curregs * reg_size, nmalloc,
+ ndalloc, nrequests, curregs, curruns, nregs,
+ run_size / page, util, nfills, nflushes,
+ nruns, reruns);
} else {
malloc_cprintf(write_cb, cbopaque,
- "%13u %5zu %4u %3zu %12zu %12"PRIu64
- " %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu\n",
- j, reg_size, nregs, run_size / page,
- allocated, nmalloc, ndalloc, nruns, reruns,
- curruns);
+ "%20zu %3u %12zu %12"FMTu64
+ " %12"FMTu64" %12"FMTu64" %12zu"
+ " %12zu %4u %3zu %-5s %12"FMTu64
+ " %12"FMTu64"\n",
+ reg_size, j, curregs * reg_size, nmalloc,
+ ndalloc, nrequests, curregs, curruns, nregs,
+ run_size / page, util, nruns, reruns);
}
}
}
- if (gap_start != UINT_MAX) {
- if (j > gap_start + 1) {
- /* Gap of more than one size class. */
- malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
- gap_start, j - 1);
- } else {
- /* Gap of one size class. */
- malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
- }
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
}
}
@@ -159,110 +161,199 @@ static void
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
{
- size_t page, nlruns, j;
- ssize_t gap_start;
-
- CTL_GET("arenas.page", &page, size_t);
+ unsigned nbins, nlruns, j;
+ bool in_gap;
malloc_cprintf(write_cb, cbopaque,
- "large: size pages nmalloc ndalloc nrequests"
- " curruns\n");
- CTL_GET("arenas.nlruns", &nlruns, size_t);
- for (j = 0, gap_start = -1; j < nlruns; j++) {
+ "large: size ind allocated nmalloc ndalloc"
+ " nrequests curruns\n");
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ CTL_GET("arenas.nlruns", &nlruns, unsigned);
+ for (j = 0, in_gap = false; j < nlruns; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t run_size, curruns;
- CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc,
uint64_t);
- CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc,
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc,
uint64_t);
- CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests,
- uint64_t);
- if (nrequests == 0) {
- if (gap_start == -1)
- gap_start = j;
- } else {
- CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
- CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ if (nrequests == 0)
+ in_gap = true;
+ else {
+ CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j,
+ &curruns, size_t);
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ in_gap = false;
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64" %12zu\n",
+ run_size, nbins + j, curruns * run_size, nmalloc,
+ ndalloc, nrequests, curruns);
+ }
+ }
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+}
+
+static void
+stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, unsigned i)
+{
+ unsigned nbins, nlruns, nhchunks, j;
+ bool in_gap;
+
+ malloc_cprintf(write_cb, cbopaque,
+ "huge: size ind allocated nmalloc ndalloc"
+ " nrequests curhchunks\n");
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ CTL_GET("arenas.nlruns", &nlruns, unsigned);
+ CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
+ for (j = 0, in_gap = false; j < nhchunks; j++) {
+ uint64_t nmalloc, ndalloc, nrequests;
+ size_t hchunk_size, curhchunks;
+
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j,
+ &nmalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j,
+ &ndalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ if (nrequests == 0)
+ in_gap = true;
+ else {
+ CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size,
size_t);
- if (gap_start != -1) {
- malloc_cprintf(write_cb, cbopaque, "[%zu]\n",
- j - gap_start);
- gap_start = -1;
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i,
+ j, &curhchunks, size_t);
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ in_gap = false;
}
malloc_cprintf(write_cb, cbopaque,
- "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu\n",
- run_size, run_size / page, nmalloc, ndalloc,
- nrequests, curruns);
+ "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64" %12zu\n",
+ hchunk_size, nbins + nlruns + j,
+ curhchunks * hchunk_size, nmalloc, ndalloc,
+ nrequests, curhchunks);
}
}
- if (gap_start != -1)
- malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start);
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
}
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
- unsigned i, bool bins, bool large)
+ unsigned i, bool bins, bool large, bool huge)
{
unsigned nthreads;
const char *dss;
+ ssize_t lg_dirty_mult;
size_t page, pactive, pdirty, mapped;
+ size_t metadata_mapped, metadata_allocated;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
+ size_t huge_allocated;
+ uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
CTL_GET("arenas.page", &page, size_t);
- CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
+ CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
malloc_cprintf(write_cb, cbopaque,
"assigned threads: %u\n", nthreads);
- CTL_I_GET("stats.arenas.0.dss", &dss, const char *);
+ CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
dss);
- CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
- CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
- CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
- CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t);
- CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t);
+ CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
+ if (lg_dirty_mult >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "min active:dirty page ratio: %u:1\n",
+ (1U << lg_dirty_mult));
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "min active:dirty page ratio: N/A\n");
+ }
+ CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
+ CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
+ CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
+ CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
+ CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
malloc_cprintf(write_cb, cbopaque,
- "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
- " %"PRIu64" madvise%s, %"PRIu64" purged\n",
- pactive, pdirty, npurge, npurge == 1 ? "" : "s",
- nmadvise, nmadvise == 1 ? "" : "s", purged);
+ "dirty pages: %zu:%zu active:dirty, %"FMTu64" sweep%s, %"FMTu64
+ " madvise%s, %"FMTu64" purged\n", pactive, pdirty, npurge, npurge ==
+ 1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged);
malloc_cprintf(write_cb, cbopaque,
- " allocated nmalloc ndalloc nrequests\n");
- CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t);
- CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
+ " allocated nmalloc ndalloc"
+ " nrequests\n");
+ CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
+ size_t);
+ CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
+ uint64_t);
malloc_cprintf(write_cb, cbopaque,
- "small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+ "small: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
- CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t);
- CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
+ CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
+ size_t);
+ CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
+ uint64_t);
malloc_cprintf(write_cb, cbopaque,
- "large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+ "large: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
+ CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
+ CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests,
+ uint64_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "huge: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
+ malloc_cprintf(write_cb, cbopaque,
+ "total: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ small_allocated + large_allocated + huge_allocated,
+ small_nmalloc + large_nmalloc + huge_nmalloc,
+ small_ndalloc + large_ndalloc + huge_ndalloc,
+ small_nrequests + large_nrequests + huge_nrequests);
+ malloc_cprintf(write_cb, cbopaque,
+ "active: %12zu\n", pactive * page);
+ CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
- "total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
- small_allocated + large_allocated,
- small_nmalloc + large_nmalloc,
- small_ndalloc + large_ndalloc,
- small_nrequests + large_nrequests);
- malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
- CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
- malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
+ "mapped: %12zu\n", mapped);
+ CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped,
+ size_t);
+ CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated,
+ size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "metadata: mapped: %zu, allocated: %zu\n",
+ metadata_mapped, metadata_allocated);
if (bins)
stats_arena_bins_print(write_cb, cbopaque, i);
if (large)
stats_arena_lruns_print(write_cb, cbopaque, i);
+ if (huge)
+ stats_arena_hchunks_print(write_cb, cbopaque, i);
}
void
@@ -277,6 +368,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool unmerged = true;
bool bins = true;
bool large = true;
+ bool huge = true;
/*
* Refresh stats, in case mallctl() was called by the application.
@@ -319,6 +411,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
case 'l':
large = false;
break;
+ case 'h':
+ huge = false;
+ break;
default:;
}
}
@@ -327,7 +422,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"___ Begin jemalloc statistics ___\n");
if (general) {
- int err;
const char *cpv;
bool bv;
unsigned uv;
@@ -346,26 +440,40 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
bv ? "enabled" : "disabled");
#define OPT_WRITE_BOOL(n) \
- if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
- == 0) { \
+ if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s\n", bv ? "true" : "false"); \
}
+#define OPT_WRITE_BOOL_MUTABLE(n, m) { \
+ bool bv2; \
+ if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \
+ je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %s ("#m": %s)\n", bv ? "true" \
+ : "false", bv2 ? "true" : "false"); \
+ } \
+}
#define OPT_WRITE_SIZE_T(n) \
- if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
- == 0) { \
+ if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \
}
#define OPT_WRITE_SSIZE_T(n) \
- if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
- == 0) { \
+ if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd\n", ssv); \
}
+#define OPT_WRITE_SSIZE_T_MUTABLE(n, m) { \
+ ssize_t ssv2; \
+ if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 && \
+ je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %zd ("#m": %zd)\n", \
+ ssv, ssv2); \
+ } \
+}
#define OPT_WRITE_CHAR_P(n) \
- if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
- == 0) { \
+ if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": \"%s\"\n", cpv); \
}
@@ -376,9 +484,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_SIZE_T(lg_chunk)
OPT_WRITE_CHAR_P(dss)
OPT_WRITE_SIZE_T(narenas)
- OPT_WRITE_SSIZE_T(lg_dirty_mult)
+ OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, arenas.lg_dirty_mult)
OPT_WRITE_BOOL(stats_print)
- OPT_WRITE_BOOL(junk)
+ OPT_WRITE_CHAR_P(junk)
OPT_WRITE_SIZE_T(quarantine)
OPT_WRITE_BOOL(redzone)
OPT_WRITE_BOOL(zero)
@@ -389,7 +497,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_SSIZE_T(lg_tcache_max)
OPT_WRITE_BOOL(prof)
OPT_WRITE_CHAR_P(prof_prefix)
- OPT_WRITE_BOOL(prof_active)
+ OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active)
+ OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init,
+ prof.thread_active_init)
OPT_WRITE_SSIZE_T(lg_prof_sample)
OPT_WRITE_BOOL(prof_accum)
OPT_WRITE_SSIZE_T(lg_prof_interval)
@@ -398,6 +508,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_BOOL(prof_leak)
#undef OPT_WRITE_BOOL
+#undef OPT_WRITE_BOOL_MUTABLE
#undef OPT_WRITE_SIZE_T
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P
@@ -411,12 +522,13 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
sizeof(void *));
CTL_GET("arenas.quantum", &sv, size_t);
- malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
+ malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n",
+ sv);
CTL_GET("arenas.page", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
- CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
+ CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: %u:1\n",
@@ -425,22 +537,20 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: N/A\n");
}
- if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
- == 0) {
+ if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) {
malloc_cprintf(write_cb, cbopaque,
"Maximum thread-cached size class: %zu\n", sv);
}
- if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
- bv) {
- CTL_GET("opt.lg_prof_sample", &sv, size_t);
+ if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) {
+ CTL_GET("prof.lg_sample", &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
- "Average profile sample interval: %"PRIu64
+ "Average profile sample interval: %"FMTu64
" (2^%zu)\n", (((uint64_t)1U) << sv), sv);
CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
- "Average profile dump interval: %"PRIu64
+ "Average profile dump interval: %"FMTu64
" (2^%zd)\n",
(((uint64_t)1U) << ssv), ssv);
} else {
@@ -449,47 +559,27 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
}
CTL_GET("opt.lg_chunk", &sv, size_t);
- malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
- (ZU(1) << sv), sv);
+ malloc_cprintf(write_cb, cbopaque,
+ "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv);
}
if (config_stats) {
size_t *cactive;
- size_t allocated, active, mapped;
- size_t chunks_current, chunks_high;
- uint64_t chunks_total;
- size_t huge_allocated;
- uint64_t huge_nmalloc, huge_ndalloc;
+ size_t allocated, active, metadata, resident, mapped;
CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t);
+ CTL_GET("stats.metadata", &metadata, size_t);
+ CTL_GET("stats.resident", &resident, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
- "Allocated: %zu, active: %zu, mapped: %zu\n",
- allocated, active, mapped);
- malloc_cprintf(write_cb, cbopaque,
- "Current active ceiling: %zu\n", atomic_read_z(cactive));
-
- /* Print chunk stats. */
- CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
- CTL_GET("stats.chunks.high", &chunks_high, size_t);
- CTL_GET("stats.chunks.current", &chunks_current, size_t);
- malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
- "highchunks curchunks\n");
- malloc_cprintf(write_cb, cbopaque,
- " %13"PRIu64" %12zu %12zu\n",
- chunks_total, chunks_high, chunks_current);
-
- /* Print huge stats. */
- CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
- CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t);
- CTL_GET("stats.huge.allocated", &huge_allocated, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "huge: nmalloc ndalloc allocated\n");
+ "Allocated: %zu, active: %zu, metadata: %zu,"
+ " resident: %zu, mapped: %zu\n",
+ allocated, active, metadata, resident, mapped);
malloc_cprintf(write_cb, cbopaque,
- " %12"PRIu64" %12"PRIu64" %12zu\n",
- huge_nmalloc, huge_ndalloc, huge_allocated);
+ "Current active ceiling: %zu\n",
+ atomic_read_z(cactive));
if (merged) {
unsigned narenas;
@@ -508,12 +598,12 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
ninitialized++;
}
- if (ninitialized > 1 || unmerged == false) {
+ if (ninitialized > 1 || !unmerged) {
/* Print merged arena stats. */
malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n");
stats_arena_print(write_cb, cbopaque,
- narenas, bins, large);
+ narenas, bins, large, huge);
}
}
}
@@ -539,7 +629,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
cbopaque,
"\narenas[%u]:\n", i);
stats_arena_print(write_cb,
- cbopaque, i, bins, large);
+ cbopaque, i, bins, large,
+ huge);
}
}
}
diff --git a/contrib/jemalloc/src/tcache.c b/contrib/jemalloc/src/tcache.c
index 6de9296..3814365 100644
--- a/contrib/jemalloc/src/tcache.c
+++ b/contrib/jemalloc/src/tcache.c
@@ -4,9 +4,6 @@
/******************************************************************************/
/* Data. */
-malloc_tsd_data(, tcache, tcache_t *, NULL)
-malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
-
bool opt_tcache = true;
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
@@ -16,6 +13,14 @@ static unsigned stack_nelms; /* Total stack elms per tcache. */
size_t nhbins;
size_t tcache_maxclass;
+tcaches_t *tcaches;
+
+/* Index of first element within tcaches that has never been used. */
+static unsigned tcaches_past;
+
+/* Head of singly linked list tracking available tcaches elements. */
+static tcaches_t *tcaches_avail;
+
/******************************************************************************/
size_t tcache_salloc(const void *ptr)
@@ -25,9 +30,9 @@ size_t tcache_salloc(const void *ptr)
}
void
-tcache_event_hard(tcache_t *tcache)
+tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
{
- size_t binind = tcache->next_gc_bin;
+ index_t binind = tcache->next_gc_bin;
tcache_bin_t *tbin = &tcache->tbins[binind];
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
@@ -36,11 +41,12 @@ tcache_event_hard(tcache_t *tcache)
* Flush (ceiling) 3/4 of the objects below the low water mark.
*/
if (binind < NBINS) {
- tcache_bin_flush_small(tbin, binind, tbin->ncached -
- tbin->low_water + (tbin->low_water >> 2), tcache);
+ tcache_bin_flush_small(tsd, tcache, tbin, binind,
+ tbin->ncached - tbin->low_water + (tbin->low_water
+ >> 2));
} else {
- tcache_bin_flush_large(tbin, binind, tbin->ncached -
- tbin->low_water + (tbin->low_water >> 2), tcache);
+ tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
+ - tbin->low_water + (tbin->low_water >> 2), tcache);
}
/*
* Reduce fill count by 2X. Limit lg_fill_div such that the
@@ -65,12 +71,13 @@ tcache_event_hard(tcache_t *tcache)
}
void *
-tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
+tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, index_t binind)
{
void *ret;
- arena_tcache_fill_small(tcache->arena, tbin, binind,
- config_prof ? tcache->prof_accumbytes : 0);
+ arena_tcache_fill_small(arena, tbin, binind, config_prof ?
+ tcache->prof_accumbytes : 0);
if (config_prof)
tcache->prof_accumbytes = 0;
ret = tcache_alloc_easy(tbin);
@@ -79,9 +86,10 @@ tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
}
void
-tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
- tcache_t *tcache)
+tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
+ index_t binind, unsigned rem)
{
+ arena_t *arena;
void *ptr;
unsigned i, nflush, ndeferred;
bool merged_stats = false;
@@ -89,22 +97,24 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
assert(binind < NBINS);
assert(rem <= tbin->ncached);
+ arena = arena_choose(tsd, NULL);
+ assert(arena != NULL);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
- arena_t *arena = chunk->arena;
- arena_bin_t *bin = &arena->bins[binind];
+ arena_t *bin_arena = extent_node_arena_get(&chunk->node);
+ arena_bin_t *bin = &bin_arena->bins[binind];
- if (config_prof && arena == tcache->arena) {
+ if (config_prof && bin_arena == arena) {
if (arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump();
tcache->prof_accumbytes = 0;
}
malloc_mutex_lock(&bin->lock);
- if (config_stats && arena == tcache->arena) {
- assert(merged_stats == false);
+ if (config_stats && bin_arena == arena) {
+ assert(!merged_stats);
merged_stats = true;
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
@@ -115,17 +125,13 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk->arena == arena) {
+ if (extent_node_arena_get(&chunk->node) == bin_arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_t *mapelm =
- arena_mapp_get(chunk, pageind);
- if (config_fill && opt_junk) {
- arena_alloc_junk_small(ptr,
- &arena_bin_info[binind], true);
- }
- arena_dalloc_bin_locked(arena, chunk, ptr,
- mapelm);
+ arena_chunk_map_bits_t *bitselm =
+ arena_bitselm_get(chunk, pageind);
+ arena_dalloc_bin_junked_locked(bin_arena, chunk,
+ ptr, bitselm);
} else {
/*
* This object was allocated via a different
@@ -139,12 +145,12 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
}
malloc_mutex_unlock(&bin->lock);
}
- if (config_stats && merged_stats == false) {
+ if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
- arena_bin_t *bin = &tcache->arena->bins[binind];
+ arena_bin_t *bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
@@ -160,9 +166,10 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
}
void
-tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
- tcache_t *tcache)
+tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
+ unsigned rem, tcache_t *tcache)
{
+ arena_t *arena;
void *ptr;
unsigned i, nflush, ndeferred;
bool merged_stats = false;
@@ -170,17 +177,19 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
assert(binind < nhbins);
assert(rem <= tbin->ncached);
+ arena = arena_choose(tsd, NULL);
+ assert(arena != NULL);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
- arena_t *arena = chunk->arena;
+ arena_t *locked_arena = extent_node_arena_get(&chunk->node);
UNUSED bool idump;
if (config_prof)
idump = false;
- malloc_mutex_lock(&arena->lock);
- if ((config_prof || config_stats) && arena == tcache->arena) {
+ malloc_mutex_lock(&locked_arena->lock);
+ if ((config_prof || config_stats) && locked_arena == arena) {
if (config_prof) {
idump = arena_prof_accum_locked(arena,
tcache->prof_accumbytes);
@@ -200,9 +209,11 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk->arena == arena)
- arena_dalloc_large_locked(arena, chunk, ptr);
- else {
+ if (extent_node_arena_get(&chunk->node) ==
+ locked_arena) {
+ arena_dalloc_large_junked_locked(locked_arena,
+ chunk, ptr);
+ } else {
/*
* This object was allocated via a different
* arena than the one that is currently locked.
@@ -213,16 +224,15 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
ndeferred++;
}
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(&locked_arena->lock);
if (config_prof && idump)
prof_idump();
}
- if (config_stats && merged_stats == false) {
+ if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
- arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
@@ -249,24 +259,58 @@ tcache_arena_associate(tcache_t *tcache, arena_t *arena)
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&arena->lock);
}
- tcache->arena = arena;
}
void
-tcache_arena_dissociate(tcache_t *tcache)
+tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
+{
+
+ tcache_arena_dissociate(tcache, oldarena);
+ tcache_arena_associate(tcache, newarena);
+}
+
+void
+tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Unlink from list of extant tcaches. */
- malloc_mutex_lock(&tcache->arena->lock);
- ql_remove(&tcache->arena->tcache_ql, tcache, link);
- tcache_stats_merge(tcache, tcache->arena);
- malloc_mutex_unlock(&tcache->arena->lock);
+ malloc_mutex_lock(&arena->lock);
+ if (config_debug) {
+ bool in_ql = false;
+ tcache_t *iter;
+ ql_foreach(iter, &arena->tcache_ql, link) {
+ if (iter == tcache) {
+ in_ql = true;
+ break;
+ }
+ }
+ assert(in_ql);
+ }
+ ql_remove(&arena->tcache_ql, tcache, link);
+ tcache_stats_merge(tcache, arena);
+ malloc_mutex_unlock(&arena->lock);
}
}
tcache_t *
-tcache_create(arena_t *arena)
+tcache_get_hard(tsd_t *tsd)
+{
+ arena_t *arena;
+
+ if (!tcache_enabled_get()) {
+ if (tsd_nominal(tsd))
+ tcache_enabled_set(false); /* Memoize. */
+ return (NULL);
+ }
+ arena = arena_choose(tsd, NULL);
+ if (unlikely(arena == NULL))
+ return (NULL);
+ return (tcache_create(tsd, arena));
+}
+
+tcache_t *
+tcache_create(tsd_t *tsd, arena_t *arena)
{
tcache_t *tcache;
size_t size, stack_offset;
@@ -277,23 +321,10 @@ tcache_create(arena_t *arena)
size = PTR_CEILING(size);
stack_offset = size;
size += stack_nelms * sizeof(void *);
- /*
- * Round up to the nearest multiple of the cacheline size, in order to
- * avoid the possibility of false cacheline sharing.
- *
- * That this works relies on the same logic as in ipalloc(), but we
- * cannot directly call ipalloc() here due to tcache bootstrapping
- * issues.
- */
- size = (size + CACHELINE_MASK) & (-CACHELINE);
-
- if (size <= SMALL_MAXCLASS)
- tcache = (tcache_t *)arena_malloc_small(arena, size, true);
- else if (size <= tcache_maxclass)
- tcache = (tcache_t *)arena_malloc_large(arena, size, true);
- else
- tcache = (tcache_t *)icalloct(size, false, arena);
+ /* Avoid false cacheline sharing. */
+ size = sa2u(size, CACHELINE);
+ tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get());
if (tcache == NULL)
return (NULL);
@@ -307,25 +338,23 @@ tcache_create(arena_t *arena)
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
}
- tcache_tsd_set(&tcache);
-
return (tcache);
}
-void
-tcache_destroy(tcache_t *tcache)
+static void
+tcache_destroy(tsd_t *tsd, tcache_t *tcache)
{
+ arena_t *arena;
unsigned i;
- size_t tcache_size;
- tcache_arena_dissociate(tcache);
+ arena = arena_choose(tsd, NULL);
+ tcache_arena_dissociate(tcache, arena);
for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
- tcache_bin_flush_small(tbin, i, 0, tcache);
+ tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
if (config_stats && tbin->tstats.nrequests != 0) {
- arena_t *arena = tcache->arena;
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
@@ -335,10 +364,9 @@ tcache_destroy(tcache_t *tcache)
for (; i < nhbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
- tcache_bin_flush_large(tbin, i, 0, tcache);
+ tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
- arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - NBINS].nrequests +=
@@ -348,57 +376,33 @@ tcache_destroy(tcache_t *tcache)
}
if (config_prof && tcache->prof_accumbytes > 0 &&
- arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
+ arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump();
- tcache_size = arena_salloc(tcache, false);
- if (tcache_size <= SMALL_MAXCLASS) {
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
- arena_t *arena = chunk->arena;
- size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
- LG_PAGE;
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
-
- arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
- } else if (tcache_size <= tcache_maxclass) {
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
- arena_t *arena = chunk->arena;
-
- arena_dalloc_large(arena, chunk, tcache);
- } else
- idalloct(tcache, false);
+ idalloctm(tsd, tcache, false, true);
}
void
-tcache_thread_cleanup(void *arg)
+tcache_cleanup(tsd_t *tsd)
{
- tcache_t *tcache = *(tcache_t **)arg;
+ tcache_t *tcache;
- if (tcache == TCACHE_STATE_DISABLED) {
- /* Do nothing. */
- } else if (tcache == TCACHE_STATE_REINCARNATED) {
- /*
- * Another destructor called an allocator function after this
- * destructor was called. Reset tcache to
- * TCACHE_STATE_PURGATORY in order to receive another callback.
- */
- tcache = TCACHE_STATE_PURGATORY;
- tcache_tsd_set(&tcache);
- } else if (tcache == TCACHE_STATE_PURGATORY) {
- /*
- * The previous time this destructor was called, we set the key
- * to TCACHE_STATE_PURGATORY so that other destructors wouldn't
- * cause re-creation of the tcache. This time, do nothing, so
- * that the destructor will not be called again.
- */
- } else if (tcache != NULL) {
- assert(tcache != TCACHE_STATE_PURGATORY);
- tcache_destroy(tcache);
- tcache = TCACHE_STATE_PURGATORY;
- tcache_tsd_set(&tcache);
+ if (!config_tcache)
+ return;
+
+ if ((tcache = tsd_tcache_get(tsd)) != NULL) {
+ tcache_destroy(tsd, tcache);
+ tsd_tcache_set(tsd, NULL);
}
}
+void
+tcache_enabled_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
/* Caller must own arena->lock. */
void
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
@@ -427,7 +431,67 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
}
bool
-tcache_boot0(void)
+tcaches_create(tsd_t *tsd, unsigned *r_ind)
+{
+ tcache_t *tcache;
+ tcaches_t *elm;
+
+ if (tcaches == NULL) {
+ tcaches = base_alloc(sizeof(tcache_t *) *
+ (MALLOCX_TCACHE_MAX+1));
+ if (tcaches == NULL)
+ return (true);
+ }
+
+ if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
+ return (true);
+ tcache = tcache_create(tsd, a0get());
+ if (tcache == NULL)
+ return (true);
+
+ if (tcaches_avail != NULL) {
+ elm = tcaches_avail;
+ tcaches_avail = tcaches_avail->next;
+ elm->tcache = tcache;
+ *r_ind = elm - tcaches;
+ } else {
+ elm = &tcaches[tcaches_past];
+ elm->tcache = tcache;
+ *r_ind = tcaches_past;
+ tcaches_past++;
+ }
+
+ return (false);
+}
+
+static void
+tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm)
+{
+
+ if (elm->tcache == NULL)
+ return;
+ tcache_destroy(tsd, elm->tcache);
+ elm->tcache = NULL;
+}
+
+void
+tcaches_flush(tsd_t *tsd, unsigned ind)
+{
+
+ tcaches_elm_flush(tsd, &tcaches[ind]);
+}
+
+void
+tcaches_destroy(tsd_t *tsd, unsigned ind)
+{
+ tcaches_t *elm = &tcaches[ind];
+ tcaches_elm_flush(tsd, elm);
+ elm->next = tcaches_avail;
+ tcaches_avail = elm;
+}
+
+bool
+tcache_boot(void)
{
unsigned i;
@@ -442,7 +506,7 @@ tcache_boot0(void)
else
tcache_maxclass = (1U << opt_lg_tcache_max);
- nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
+ nhbins = size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
@@ -451,7 +515,11 @@ tcache_boot0(void)
return (true);
stack_nelms = 0;
for (i = 0; i < NBINS; i++) {
- if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
+ if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
+ tcache_bin_info[i].ncached_max =
+ TCACHE_NSLOTS_SMALL_MIN;
+ } else if ((arena_bin_info[i].nregs << 1) <=
+ TCACHE_NSLOTS_SMALL_MAX) {
tcache_bin_info[i].ncached_max =
(arena_bin_info[i].nregs << 1);
} else {
@@ -467,13 +535,3 @@ tcache_boot0(void)
return (false);
}
-
-bool
-tcache_boot1(void)
-{
-
- if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
- return (true);
-
- return (false);
-}
diff --git a/contrib/jemalloc/src/tsd.c b/contrib/jemalloc/src/tsd.c
index 700caab..2100833 100644
--- a/contrib/jemalloc/src/tsd.c
+++ b/contrib/jemalloc/src/tsd.c
@@ -7,21 +7,22 @@
static unsigned ncleanups;
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
+malloc_tsd_data(, , tsd_t, TSD_INITIALIZER)
+
/******************************************************************************/
void *
malloc_tsd_malloc(size_t size)
{
- /* Avoid choose_arena() in order to dodge bootstrapping issues. */
- return (arena_malloc(arenas[0], size, false, false));
+ return (a0malloc(CACHELINE_CEILING(size)));
}
void
malloc_tsd_dalloc(void *wrapper)
{
- idalloct(wrapper, false);
+ a0dalloc(wrapper);
}
void
@@ -67,10 +68,58 @@ malloc_tsd_cleanup_register(bool (*f)(void))
}
void
-malloc_tsd_boot(void)
+tsd_cleanup(void *arg)
+{
+ tsd_t *tsd = (tsd_t *)arg;
+
+ switch (tsd->state) {
+ case tsd_state_nominal:
+#define O(n, t) \
+ n##_cleanup(tsd);
+MALLOC_TSD
+#undef O
+ tsd->state = tsd_state_purgatory;
+ tsd_set(tsd);
+ break;
+ case tsd_state_purgatory:
+ /*
+ * The previous time this destructor was called, we set the
+ * state to tsd_state_purgatory so that other destructors
+ * wouldn't cause re-creation of the tsd. This time, do
+ * nothing, and do not request another callback.
+ */
+ break;
+ case tsd_state_reincarnated:
+ /*
+ * Another destructor deallocated memory after this destructor
+ * was called. Reset state to tsd_state_purgatory and request
+ * another callback.
+ */
+ tsd->state = tsd_state_purgatory;
+ tsd_set(tsd);
+ break;
+ default:
+ not_reached();
+ }
+}
+
+bool
+malloc_tsd_boot0(void)
{
ncleanups = 0;
+ if (tsd_boot0())
+ return (true);
+ *tsd_arenas_cache_bypassp_get(tsd_fetch()) = true;
+ return (false);
+}
+
+void
+malloc_tsd_boot1(void)
+{
+
+ tsd_boot1();
+ *tsd_arenas_cache_bypassp_get(tsd_fetch()) = false;
}
#ifdef _WIN32
@@ -102,7 +151,7 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
# pragma section(".CRT$XLY",long,read)
#endif
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
-static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL,
+static BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif
diff --git a/contrib/jemalloc/src/util.c b/contrib/jemalloc/src/util.c
index 70b3e45..25b61c2 100644
--- a/contrib/jemalloc/src/util.c
+++ b/contrib/jemalloc/src/util.c
@@ -97,10 +97,10 @@ buferror(int err, char *buf, size_t buflen)
{
#ifdef _WIN32
- FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0,
+ FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
(LPSTR)buf, buflen, NULL);
return (0);
-#elif defined(_GNU_SOURCE)
+#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
char *b = strerror_r(err, buf, buflen);
if (b != buf) {
strncpy(buf, b, buflen);
@@ -116,7 +116,7 @@ uintmax_t
malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
{
uintmax_t ret, digit;
- int b;
+ unsigned b;
bool neg;
const char *p, *ns;
@@ -282,7 +282,7 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p)
sign = '-';
switch (sign) {
case '-':
- if (neg == false)
+ if (!neg)
break;
/* Fall through. */
case ' ':
@@ -345,7 +345,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
/* Left padding. */ \
size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
(size_t)width - slen : 0); \
- if (left_justify == false && pad_len != 0) { \
+ if (!left_justify && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) \
APPEND_C(' '); \
@@ -397,7 +397,9 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
case 'p': /* Synthetic; used for %p. */ \
val = va_arg(ap, uintptr_t); \
break; \
- default: not_reached(); \
+ default: \
+ not_reached(); \
+ val = 0; \
} \
} while (0)
@@ -420,19 +422,19 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
while (true) {
switch (*f) {
case '#':
- assert(alt_form == false);
+ assert(!alt_form);
alt_form = true;
break;
case '-':
- assert(left_justify == false);
+ assert(!left_justify);
left_justify = true;
break;
case ' ':
- assert(plus_space == false);
+ assert(!plus_space);
plus_space = true;
break;
case '+':
- assert(plus_plus == false);
+ assert(!plus_plus);
plus_plus = true;
break;
default: goto label_width;
@@ -564,7 +566,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
s = va_arg(ap, char *);
- slen = (prec < 0) ? strlen(s) : prec;
+ slen = (prec < 0) ? strlen(s) : (size_t)prec;
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
@@ -600,7 +602,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
return (ret);
}
-JEMALLOC_ATTR(format(printf, 3, 4))
+JEMALLOC_FORMAT_PRINTF(3, 4)
int
malloc_snprintf(char *str, size_t size, const char *format, ...)
{
@@ -639,7 +641,7 @@ malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
* Print to a callback function in such a way as to (hopefully) avoid memory
* allocation.
*/
-JEMALLOC_ATTR(format(printf, 3, 4))
+JEMALLOC_FORMAT_PRINTF(3, 4)
void
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, ...)
@@ -652,7 +654,7 @@ malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
}
/* Print to stderr in such a way as to avoid memory allocation. */
-JEMALLOC_ATTR(format(printf, 1, 2))
+JEMALLOC_FORMAT_PRINTF(1, 2)
void
malloc_printf(const char *format, ...)
{
OpenPOWER on IntegriCloud