diff options
Diffstat (limited to 'contrib/jemalloc/src/arena.c')
-rw-r--r-- | contrib/jemalloc/src/arena.c | 2846 |
1 files changed, 1799 insertions, 1047 deletions
diff --git a/contrib/jemalloc/src/arena.c b/contrib/jemalloc/src/arena.c index dad707b..af48b39 100644 --- a/contrib/jemalloc/src/arena.c +++ b/contrib/jemalloc/src/arena.c @@ -5,37 +5,17 @@ /* Data. */ ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; +static ssize_t lg_dirty_mult_default; arena_bin_info_t arena_bin_info[NBINS]; -JEMALLOC_ALIGNED(CACHELINE) -const uint8_t small_size2bin[] = { -#define S2B_8(i) i, -#define S2B_16(i) S2B_8(i) S2B_8(i) -#define S2B_32(i) S2B_16(i) S2B_16(i) -#define S2B_64(i) S2B_32(i) S2B_32(i) -#define S2B_128(i) S2B_64(i) S2B_64(i) -#define S2B_256(i) S2B_128(i) S2B_128(i) -#define S2B_512(i) S2B_256(i) S2B_256(i) -#define S2B_1024(i) S2B_512(i) S2B_512(i) -#define S2B_2048(i) S2B_1024(i) S2B_1024(i) -#define S2B_4096(i) S2B_2048(i) S2B_2048(i) -#define S2B_8192(i) S2B_4096(i) S2B_4096(i) -#define SIZE_CLASS(bin, delta, size) \ - S2B_##delta(bin) - SIZE_CLASSES -#undef S2B_8 -#undef S2B_16 -#undef S2B_32 -#undef S2B_64 -#undef S2B_128 -#undef S2B_256 -#undef S2B_512 -#undef S2B_1024 -#undef S2B_2048 -#undef S2B_4096 -#undef S2B_8192 -#undef SIZE_CLASS -}; +size_t map_bias; +size_t map_misc_offset; +size_t arena_maxrun; /* Max run size for arenas. */ +size_t arena_maxclass; /* Max size class for arenas. */ +static size_t small_maxrun; /* Max run size used for small size classes. */ +static bool *small_run_tab; /* Valid small run page multiples. */ +unsigned nlclasses; /* Number of large size classes. */ +unsigned nhclasses; /* Number of huge size classes. */ /******************************************************************************/ /* @@ -45,7 +25,7 @@ const uint8_t small_size2bin[] = { static void arena_purge(arena_t *arena, bool all); static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, - bool cleaned); + bool cleaned, bool decommitted); static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, @@ -53,296 +33,327 @@ static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, /******************************************************************************/ -static inline int -arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) +#define CHUNK_MAP_KEY ((uintptr_t)0x1U) + +JEMALLOC_INLINE_C arena_chunk_map_misc_t * +arena_miscelm_key_create(size_t size) { - uintptr_t a_mapelm = (uintptr_t)a; - uintptr_t b_mapelm = (uintptr_t)b; - assert(a != NULL); - assert(b != NULL); + return ((arena_chunk_map_misc_t *)((size << CHUNK_MAP_SIZE_SHIFT) | + CHUNK_MAP_KEY)); +} + +JEMALLOC_INLINE_C bool +arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm) +{ - return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm)); + return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0); } -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, - u.rb_link, arena_run_comp) +#undef CHUNK_MAP_KEY -static inline int -arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) +JEMALLOC_INLINE_C size_t +arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm) { - int ret; - size_t a_size = a->bits & ~PAGE_MASK; - size_t b_size = b->bits & ~PAGE_MASK; - ret = (a_size > b_size) - (a_size < b_size); - if (ret == 0) { - uintptr_t a_mapelm, b_mapelm; + assert(arena_miscelm_is_key(miscelm)); - if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY) - a_mapelm = (uintptr_t)a; - else { - /* - * Treat keys as though they are lower than anything - * else. - */ - a_mapelm = 0; - } - b_mapelm = (uintptr_t)b; + return (((uintptr_t)miscelm & CHUNK_MAP_SIZE_MASK) >> + CHUNK_MAP_SIZE_SHIFT); +} - ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm); - } +JEMALLOC_INLINE_C size_t +arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm) +{ + arena_chunk_t *chunk; + size_t pageind, mapbits; - return (ret); -} + assert(!arena_miscelm_is_key(miscelm)); -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, - u.rb_link, arena_avail_comp) + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); + pageind = arena_miscelm_to_pageind(miscelm); + mapbits = arena_mapbits_get(chunk, pageind); + return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT); +} -static inline int -arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b) +JEMALLOC_INLINE_C int +arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) { + uintptr_t a_miscelm = (uintptr_t)a; + uintptr_t b_miscelm = (uintptr_t)b; assert(a != NULL); assert(b != NULL); - /* - * Short-circuit for self comparison. The following comparison code - * would come to the same result, but at the cost of executing the slow - * path. - */ - if (a == b) - return (0); + return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); +} + +/* Generate red-black tree functions. */ +rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, + rb_link, arena_run_comp) + +static size_t +run_quantize(size_t size) +{ + size_t qsize; + + assert(size != 0); + assert(size == PAGE_CEILING(size)); + + /* Don't change sizes that are valid small run sizes. */ + if (size <= small_maxrun && small_run_tab[size >> LG_PAGE]) + return (size); /* - * Order such that chunks with higher fragmentation are "less than" - * those with lower fragmentation -- purging order is from "least" to - * "greatest". Fragmentation is measured as: - * - * mean current avail run size - * -------------------------------- - * mean defragmented avail run size - * - * navail - * ----------- - * nruns_avail nruns_avail-nruns_adjac - * = ========================= = ----------------------- - * navail nruns_avail - * ----------------------- - * nruns_avail-nruns_adjac - * - * The following code multiplies away the denominator prior to - * comparison, in order to avoid division. - * + * Round down to the nearest run size that can actually be requested + * during normal large allocation. Add large_pad so that cache index + * randomization can offset the allocation from the page boundary. */ - { - size_t a_val = (a->nruns_avail - a->nruns_adjac) * - b->nruns_avail; - size_t b_val = (b->nruns_avail - b->nruns_adjac) * - a->nruns_avail; + qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad; + if (qsize <= SMALL_MAXCLASS + large_pad) + return (run_quantize(size - large_pad)); + assert(qsize <= size); + return (qsize); +} + +static size_t +run_quantize_next(size_t size) +{ + size_t large_run_size_next; + + assert(size != 0); + assert(size == PAGE_CEILING(size)); - if (a_val < b_val) - return (1); - if (a_val > b_val) - return (-1); - } /* - * Break ties by chunk address. For fragmented chunks, report lower - * addresses as "lower", so that fragmentation reduction happens first - * at lower addresses. However, use the opposite ordering for - * unfragmented chunks, in order to increase the chances of - * re-allocating dirty runs. + * Return the next quantized size greater than the input size. + * Quantized sizes comprise the union of run sizes that back small + * region runs, and run sizes that back large regions with no explicit + * alignment constraints. */ - { - uintptr_t a_chunk = (uintptr_t)a; - uintptr_t b_chunk = (uintptr_t)b; - int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk)); - if (a->nruns_adjac == 0) { - assert(b->nruns_adjac == 0); - ret = -ret; + + if (size > SMALL_MAXCLASS) { + large_run_size_next = PAGE_CEILING(index2size(size2index(size - + large_pad) + 1) + large_pad); + } else + large_run_size_next = SIZE_T_MAX; + if (size >= small_maxrun) + return (large_run_size_next); + + while (true) { + size += PAGE; + assert(size <= small_maxrun); + if (small_run_tab[size >> LG_PAGE]) { + if (large_run_size_next < size) + return (large_run_size_next); + return (size); } - return (ret); } } -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t, - dirty_link, arena_chunk_dirty_comp) - -static inline bool -arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind) +static size_t +run_quantize_first(size_t size) { - bool ret; + size_t qsize = run_quantize(size); - if (pageind-1 < map_bias) - ret = false; - else { - ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0); - assert(ret == false || arena_mapbits_dirty_get(chunk, - pageind-1) != arena_mapbits_dirty_get(chunk, pageind)); + if (qsize < size) { + /* + * Skip a quantization that may have an adequately large run, + * because under-sized runs may be mixed in. This only happens + * when an unusual size is requested, i.e. for aligned + * allocation, and is just one of several places where linear + * search would potentially find sufficiently aligned available + * memory somewhere lower. + */ + qsize = run_quantize_next(size); } - return (ret); + return (qsize); } -static inline bool -arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages) +JEMALLOC_INLINE_C int +arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) { - bool ret; + int ret; + uintptr_t a_miscelm = (uintptr_t)a; + size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ? + arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a)); + size_t b_qsize = run_quantize(arena_miscelm_size_get(b)); - if (pageind+npages == chunk_npages) - ret = false; - else { - assert(pageind+npages < chunk_npages); - ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0); - assert(ret == false || arena_mapbits_dirty_get(chunk, pageind) - != arena_mapbits_dirty_get(chunk, pageind+npages)); + /* + * Compare based on quantized size rather than size, in order to sort + * equally useful runs only by address. + */ + ret = (a_qsize > b_qsize) - (a_qsize < b_qsize); + if (ret == 0) { + if (!arena_miscelm_is_key(a)) { + uintptr_t b_miscelm = (uintptr_t)b; + + ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm); + } else { + /* + * Treat keys as if they are lower than anything else. + */ + ret = -1; + } } + return (ret); } -static inline bool -arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages) +/* Generate red-black tree functions. */ +rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, + arena_chunk_map_misc_t, rb_link, arena_avail_comp) + +static void +arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, + size_t npages) { - return (arena_avail_adjac_pred(chunk, pageind) || - arena_avail_adjac_succ(chunk, pageind, npages)); + assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> + LG_PAGE)); + arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk, + pageind)); } static void -arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ) +arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, + size_t npages) { assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE)); + arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk, + pageind)); +} - /* - * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be - * removed and reinserted even if the run to be inserted is clean. - */ - if (chunk->ndirty != 0) - arena_chunk_dirty_remove(&arena->chunks_dirty, chunk); - - if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind)) - chunk->nruns_adjac++; - if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages)) - chunk->nruns_adjac++; - chunk->nruns_avail++; - assert(chunk->nruns_avail > chunk->nruns_adjac); +static void +arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, + size_t npages) +{ + arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); - if (arena_mapbits_dirty_get(chunk, pageind) != 0) { - arena->ndirty += npages; - chunk->ndirty += npages; - } - if (chunk->ndirty != 0) - arena_chunk_dirty_insert(&arena->chunks_dirty, chunk); + assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> + LG_PAGE)); + assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); + assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == + CHUNK_MAP_DIRTY); - arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk, - pageind)); + qr_new(&miscelm->rd, rd_link); + qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); + arena->ndirty += npages; } static void -arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ) +arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, + size_t npages) { + arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE)); + assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); + assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == + CHUNK_MAP_DIRTY); - /* - * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be - * removed and reinserted even if the run to be removed is clean. - */ - if (chunk->ndirty != 0) - arena_chunk_dirty_remove(&arena->chunks_dirty, chunk); + qr_remove(&miscelm->rd, rd_link); + assert(arena->ndirty >= npages); + arena->ndirty -= npages; +} + +static size_t +arena_chunk_dirty_npages(const extent_node_t *node) +{ - if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind)) - chunk->nruns_adjac--; - if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages)) - chunk->nruns_adjac--; - chunk->nruns_avail--; - assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail - == 0 && chunk->nruns_adjac == 0)); + return (extent_node_size_get(node) >> LG_PAGE); +} + +void +arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) +{ - if (arena_mapbits_dirty_get(chunk, pageind) != 0) { - arena->ndirty -= npages; - chunk->ndirty -= npages; + if (cache) { + extent_node_dirty_linkage_init(node); + extent_node_dirty_insert(node, &arena->runs_dirty, + &arena->chunks_cache); + arena->ndirty += arena_chunk_dirty_npages(node); } - if (chunk->ndirty != 0) - arena_chunk_dirty_insert(&arena->chunks_dirty, chunk); +} - arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk, - pageind)); +void +arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) +{ + + if (dirty) { + extent_node_dirty_remove(node); + assert(arena->ndirty >= arena_chunk_dirty_npages(node)); + arena->ndirty -= arena_chunk_dirty_npages(node); + } } -static inline void * +JEMALLOC_INLINE_C void * arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) { void *ret; unsigned regind; - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); + arena_chunk_map_misc_t *miscelm; + void *rpages; assert(run->nfree > 0); - assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false); + assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); - regind = bitmap_sfu(bitmap, &bin_info->bitmap_info); - ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset + + regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info); + miscelm = arena_run_to_miscelm(run); + rpages = arena_miscelm_to_rpages(miscelm); + ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + (uintptr_t)(bin_info->reg_interval * regind)); run->nfree--; - if (regind == run->nextind) - run->nextind++; - assert(regind < run->nextind); return (ret); } -static inline void +JEMALLOC_INLINE_C void arena_run_reg_dalloc(arena_run_t *run, void *ptr) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t mapbits = arena_mapbits_get(chunk, pageind); - size_t binind = arena_ptr_small_binind_get(ptr, mapbits); + index_t binind = arena_ptr_small_binind_get(ptr, mapbits); arena_bin_info_t *bin_info = &arena_bin_info[binind]; unsigned regind = arena_run_regind(run, bin_info, ptr); - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); assert(run->nfree < bin_info->nregs); /* Freeing an interior pointer can cause assertion failure. */ - assert(((uintptr_t)ptr - ((uintptr_t)run + + assert(((uintptr_t)ptr - + ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + (uintptr_t)bin_info->reg0_offset)) % (uintptr_t)bin_info->reg_interval == 0); - assert((uintptr_t)ptr >= (uintptr_t)run + + assert((uintptr_t)ptr >= + (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + (uintptr_t)bin_info->reg0_offset); /* Freeing an unallocated pointer can cause assertion failure. */ - assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind)); + assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); - bitmap_unset(bitmap, &bin_info->bitmap_info, regind); + bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); run->nfree++; } -static inline void +JEMALLOC_INLINE_C void arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) { - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << - LG_PAGE)), (npages << LG_PAGE)); + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + + (run_ind << LG_PAGE)), (npages << LG_PAGE)); memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, (npages << LG_PAGE)); } -static inline void +JEMALLOC_INLINE_C void arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) { - VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind << - LG_PAGE)), PAGE); + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind + << LG_PAGE)), PAGE); } -static inline void +JEMALLOC_INLINE_C void arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) { size_t i; @@ -358,9 +369,9 @@ arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) { if (config_stats) { - ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + - add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive - - sub_pages) << LG_PAGE); + ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages + - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << + LG_PAGE); if (cactive_diff != 0) stats_cactive_add(cactive_diff); } @@ -368,10 +379,12 @@ arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) static void arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, - size_t flag_dirty, size_t need_pages) + size_t flag_dirty, size_t flag_decommitted, size_t need_pages) { size_t total_pages, rem_pages; + assert(flag_dirty == 0 || flag_decommitted == 0); + total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> LG_PAGE; assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == @@ -379,54 +392,70 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, assert(need_pages <= total_pages); rem_pages = total_pages - need_pages; - arena_avail_remove(arena, chunk, run_ind, total_pages, true, true); + arena_avail_remove(arena, chunk, run_ind, total_pages); + if (flag_dirty != 0) + arena_run_dirty_remove(arena, chunk, run_ind, total_pages); arena_cactive_update(arena, need_pages, 0); arena->nactive += need_pages; /* Keep track of trailing unused pages for later use. */ if (rem_pages > 0) { + size_t flags = flag_dirty | flag_decommitted; + size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : + 0; + + arena_mapbits_unallocated_set(chunk, run_ind+need_pages, + (rem_pages << LG_PAGE), flags | + (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) & + flag_unzeroed_mask)); + arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, + (rem_pages << LG_PAGE), flags | + (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) & + flag_unzeroed_mask)); if (flag_dirty != 0) { - arena_mapbits_unallocated_set(chunk, - run_ind+need_pages, (rem_pages << LG_PAGE), - flag_dirty); - arena_mapbits_unallocated_set(chunk, - run_ind+total_pages-1, (rem_pages << LG_PAGE), - flag_dirty); - } else { - arena_mapbits_unallocated_set(chunk, run_ind+need_pages, - (rem_pages << LG_PAGE), - arena_mapbits_unzeroed_get(chunk, - run_ind+need_pages)); - arena_mapbits_unallocated_set(chunk, - run_ind+total_pages-1, (rem_pages << LG_PAGE), - arena_mapbits_unzeroed_get(chunk, - run_ind+total_pages-1)); + arena_run_dirty_insert(arena, chunk, run_ind+need_pages, + rem_pages); } - arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages, - false, true); + arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); } } -static void +static bool arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, bool remove, bool zero) { arena_chunk_t *chunk; - size_t flag_dirty, run_ind, need_pages, i; + arena_chunk_map_misc_t *miscelm; + size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; + size_t flag_unzeroed_mask; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); + miscelm = arena_run_to_miscelm(run); + run_ind = arena_miscelm_to_pageind(miscelm); flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); + flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); need_pages = (size >> LG_PAGE); assert(need_pages > 0); + if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, + run_ind << LG_PAGE, size, arena->ind)) + return (true); + if (remove) { arena_run_split_remove(arena, chunk, run_ind, flag_dirty, - need_pages); + flag_decommitted, need_pages); } if (zero) { - if (flag_dirty == 0) { + if (flag_decommitted != 0) { + /* The run is untouched, and therefore zeroed. */ + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void + *)((uintptr_t)chunk + (run_ind << LG_PAGE)), + (need_pages << LG_PAGE)); + } else if (flag_dirty != 0) { + /* The run is dirty, so all pages must be zeroed. */ + arena_run_zero(chunk, run_ind, need_pages); + } else { /* * The run is clean, so some pages may be zeroed (i.e. * never before touched). @@ -443,12 +472,9 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, run_ind+i); } } - } else { - /* The run is dirty, so all pages must be zeroed. */ - arena_run_zero(chunk, run_ind, need_pages); } } else { - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); } @@ -456,68 +482,66 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, * Set the last element first, in case the run only contains one page * (i.e. both statements set the same element). */ - arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty); - arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); + flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? + CHUNK_MAP_UNZEROED : 0; + arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + run_ind+need_pages-1))); + arena_mapbits_large_set(chunk, run_ind, size, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind))); + return (false); } -static void +static bool arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) { - arena_run_split_large_helper(arena, run, size, true, zero); + return (arena_run_split_large_helper(arena, run, size, true, zero)); } -static void +static bool arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) { - arena_run_split_large_helper(arena, run, size, false, zero); + return (arena_run_split_large_helper(arena, run, size, false, zero)); } -static void +static bool arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, - size_t binind) + index_t binind) { arena_chunk_t *chunk; - size_t flag_dirty, run_ind, need_pages, i; + arena_chunk_map_misc_t *miscelm; + size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; assert(binind != BININD_INVALID); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); + miscelm = arena_run_to_miscelm(run); + run_ind = arena_miscelm_to_pageind(miscelm); flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); + flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); need_pages = (size >> LG_PAGE); assert(need_pages > 0); - arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages); + if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, + run_ind << LG_PAGE, size, arena->ind)) + return (true); - /* - * Propagate the dirty and unzeroed flags to the allocated small run, - * so that arena_dalloc_bin_run() has the ability to conditionally trim - * clean pages. - */ - arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty); - /* - * The first page will always be dirtied during small run - * initialization, so a validation failure here would not actually - * cause an observable failure. - */ - if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk, - run_ind) == 0) - arena_run_page_validate_zeroed(chunk, run_ind); - for (i = 1; i < need_pages - 1; i++) { - arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); - if (config_debug && flag_dirty == 0 && - arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) + arena_run_split_remove(arena, chunk, run_ind, flag_dirty, + flag_decommitted, need_pages); + + for (i = 0; i < need_pages; i++) { + size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, + run_ind+i); + arena_mapbits_small_set(chunk, run_ind+i, i, binind, + flag_unzeroed); + if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) arena_run_page_validate_zeroed(chunk, run_ind+i); } - arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1, - binind, flag_dirty); - if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk, - run_ind+need_pages-1) == 0) - arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1); - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); + return (false); } static arena_chunk_t * @@ -533,76 +557,143 @@ arena_chunk_init_spare(arena_t *arena) assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxclass); + arena_maxrun); assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxclass); + arena_maxrun); assert(arena_mapbits_dirty_get(chunk, map_bias) == arena_mapbits_dirty_get(chunk, chunk_npages-1)); return (chunk); } +static bool +arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero) +{ + + /* + * The extent node notion of "committed" doesn't directly apply to + * arena chunks. Arbitrarily mark them as committed. The commit state + * of runs is tracked individually, and upon chunk deallocation the + * entire chunk is in a consistent commit state. + */ + extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); + extent_node_achunk_set(&chunk->node, true); + return (chunk_register(chunk, &chunk->node)); +} + static arena_chunk_t * -arena_chunk_init_hard(arena_t *arena) +arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, + bool *zero, bool *commit) { arena_chunk_t *chunk; - bool zero; - size_t unzeroed, i; - assert(arena->spare == NULL); - - zero = false; malloc_mutex_unlock(&arena->lock); - chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false, - &zero, arena->dss_prec); + + chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL, + chunksize, chunksize, zero, commit); + if (chunk != NULL && !*commit) { + /* Commit header. */ + if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << + LG_PAGE, arena->ind)) { + chunk_dalloc_wrapper(arena, chunk_hooks, + (void *)chunk, chunksize, *commit); + chunk = NULL; + } + } + if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) { + if (!*commit) { + /* Undo commit of header. */ + chunk_hooks->decommit(chunk, chunksize, 0, map_bias << + LG_PAGE, arena->ind); + } + chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk, + chunksize, *commit); + chunk = NULL; + } + malloc_mutex_lock(&arena->lock); - if (chunk == NULL) - return (NULL); - if (config_stats) + return (chunk); +} + +static arena_chunk_t * +arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit) +{ + arena_chunk_t *chunk; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + + chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize, + chunksize, zero, true); + if (chunk != NULL) { + if (arena_chunk_register(arena, chunk, *zero)) { + chunk_dalloc_cache(arena, &chunk_hooks, chunk, + chunksize, true); + return (NULL); + } + *commit = true; + } + if (chunk == NULL) { + chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks, + zero, commit); + } + + if (config_stats && chunk != NULL) { arena->stats.mapped += chunksize; + arena->stats.metadata_mapped += (map_bias << LG_PAGE); + } - chunk->arena = arena; + return (chunk); +} - /* - * Claim that no pages are in use, since the header is merely overhead. - */ - chunk->ndirty = 0; +static arena_chunk_t * +arena_chunk_init_hard(arena_t *arena) +{ + arena_chunk_t *chunk; + bool zero, commit; + size_t flag_unzeroed, flag_decommitted, i; - chunk->nruns_avail = 0; - chunk->nruns_adjac = 0; + assert(arena->spare == NULL); + + zero = false; + commit = false; + chunk = arena_chunk_alloc_internal(arena, &zero, &commit); + if (chunk == NULL) + return (NULL); /* * Initialize the map to contain one maximal free untouched run. Mark - * the pages as zeroed iff chunk_alloc() returned a zeroed chunk. + * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted + * chunk. */ - unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; - arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass, - unzeroed); + flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; + flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; + arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, + flag_unzeroed | flag_decommitted); /* * There is no need to initialize the internal page map entries unless * the chunk is not zeroed. */ - if (zero == false) { - VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk, - map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk, - chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, + if (!zero) { + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( + (void *)arena_bitselm_get(chunk, map_bias+1), + (size_t)((uintptr_t) arena_bitselm_get(chunk, + chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, map_bias+1))); for (i = map_bias+1; i < chunk_npages-1; i++) - arena_mapbits_unzeroed_set(chunk, i, unzeroed); + arena_mapbits_internal_set(chunk, i, flag_unzeroed); } else { - VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk, - map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk, - chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, - map_bias+1))); + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void + *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t) + arena_bitselm_get(chunk, chunk_npages-1) - + (uintptr_t)arena_bitselm_get(chunk, map_bias+1))); if (config_debug) { for (i = map_bias+1; i < chunk_npages-1; i++) { assert(arena_mapbits_unzeroed_get(chunk, i) == - unzeroed); + flag_unzeroed); } } } - arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass, - unzeroed); + arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, + flag_unzeroed); return (chunk); } @@ -621,65 +712,383 @@ arena_chunk_alloc(arena_t *arena) } /* Insert the run into the runs_avail tree. */ - arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias, - false, false); + arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); return (chunk); } static void -arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) +arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) { + assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxclass); + arena_maxrun); assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxclass); + arena_maxrun); assert(arena_mapbits_dirty_get(chunk, map_bias) == arena_mapbits_dirty_get(chunk, chunk_npages-1)); + assert(arena_mapbits_decommitted_get(chunk, map_bias) == + arena_mapbits_decommitted_get(chunk, chunk_npages-1)); /* * Remove run from the runs_avail tree, so that the arena does not use * it. */ - arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias, - false, false); + arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); if (arena->spare != NULL) { arena_chunk_t *spare = arena->spare; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + bool committed; arena->spare = chunk; - malloc_mutex_unlock(&arena->lock); - chunk_dealloc((void *)spare, chunksize, true); - malloc_mutex_lock(&arena->lock); - if (config_stats) + if (arena_mapbits_dirty_get(spare, map_bias) != 0) { + arena_run_dirty_remove(arena, spare, map_bias, + chunk_npages-map_bias); + } + + chunk_deregister(spare, &spare->node); + + committed = (arena_mapbits_decommitted_get(spare, map_bias) == + 0); + if (!committed) { + /* + * Decommit the header. Mark the chunk as decommitted + * even if header decommit fails, since treating a + * partially committed chunk as committed has a high + * potential for causing later access of decommitted + * memory. + */ + chunk_hooks = chunk_hooks_get(arena); + chunk_hooks.decommit(spare, chunksize, 0, map_bias << + LG_PAGE, arena->ind); + } + + chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare, + chunksize, committed); + + if (config_stats) { arena->stats.mapped -= chunksize; + arena->stats.metadata_mapped -= (map_bias << LG_PAGE); + } } else arena->spare = chunk; } -static arena_run_t * -arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) +static void +arena_huge_malloc_stats_update(arena_t *arena, size_t usize) { - arena_run_t *run; - arena_chunk_map_t *mapelm, key; + index_t index = size2index(usize) - nlclasses - NBINS; - key.bits = size | CHUNK_MAP_KEY; - mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key); - if (mapelm != NULL) { - arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); - size_t pageind = (((uintptr_t)mapelm - - (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) - + map_bias; + cassert(config_stats); - run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << - LG_PAGE)); - arena_run_split_large(arena, run, size, zero); - return (run); + arena->stats.nmalloc_huge++; + arena->stats.allocated_huge += usize; + arena->stats.hstats[index].nmalloc++; + arena->stats.hstats[index].curhchunks++; +} + +static void +arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) +{ + index_t index = size2index(usize) - nlclasses - NBINS; + + cassert(config_stats); + + arena->stats.nmalloc_huge--; + arena->stats.allocated_huge -= usize; + arena->stats.hstats[index].nmalloc--; + arena->stats.hstats[index].curhchunks--; +} + +static void +arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) +{ + index_t index = size2index(usize) - nlclasses - NBINS; + + cassert(config_stats); + + arena->stats.ndalloc_huge++; + arena->stats.allocated_huge -= usize; + arena->stats.hstats[index].ndalloc++; + arena->stats.hstats[index].curhchunks--; +} + +static void +arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) +{ + index_t index = size2index(usize) - nlclasses - NBINS; + + cassert(config_stats); + + arena->stats.ndalloc_huge--; + arena->stats.allocated_huge += usize; + arena->stats.hstats[index].ndalloc--; + arena->stats.hstats[index].curhchunks++; +} + +static void +arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) +{ + + arena_huge_dalloc_stats_update(arena, oldsize); + arena_huge_malloc_stats_update(arena, usize); +} + +static void +arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, + size_t usize) +{ + + arena_huge_dalloc_stats_update_undo(arena, oldsize); + arena_huge_malloc_stats_update_undo(arena, usize); +} + +extent_node_t * +arena_node_alloc(arena_t *arena) +{ + extent_node_t *node; + + malloc_mutex_lock(&arena->node_cache_mtx); + node = ql_last(&arena->node_cache, ql_link); + if (node == NULL) { + malloc_mutex_unlock(&arena->node_cache_mtx); + return (base_alloc(sizeof(extent_node_t))); } + ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); + malloc_mutex_unlock(&arena->node_cache_mtx); + return (node); +} - return (NULL); +void +arena_node_dalloc(arena_t *arena, extent_node_t *node) +{ + + malloc_mutex_lock(&arena->node_cache_mtx); + ql_elm_new(node, ql_link); + ql_tail_insert(&arena->node_cache, node, ql_link); + malloc_mutex_unlock(&arena->node_cache_mtx); +} + +static void * +arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, + size_t usize, size_t alignment, bool *zero, size_t csize) +{ + void *ret; + bool commit = true; + + ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment, + zero, &commit); + if (ret == NULL) { + /* Revert optimistic stats updates. */ + malloc_mutex_lock(&arena->lock); + if (config_stats) { + arena_huge_malloc_stats_update_undo(arena, usize); + arena->stats.mapped -= usize; + } + arena->nactive -= (usize >> LG_PAGE); + malloc_mutex_unlock(&arena->lock); + } + + return (ret); +} + +void * +arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, + bool *zero) +{ + void *ret; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + size_t csize = CHUNK_CEILING(usize); + + malloc_mutex_lock(&arena->lock); + + /* Optimistically update stats. */ + if (config_stats) { + arena_huge_malloc_stats_update(arena, usize); + arena->stats.mapped += usize; + } + arena->nactive += (usize >> LG_PAGE); + + ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment, + zero, true); + malloc_mutex_unlock(&arena->lock); + if (ret == NULL) { + ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize, + alignment, zero, csize); + } + + if (config_stats && ret != NULL) + stats_cactive_add(usize); + return (ret); +} + +void +arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) +{ + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + size_t csize; + + csize = CHUNK_CEILING(usize); + malloc_mutex_lock(&arena->lock); + if (config_stats) { + arena_huge_dalloc_stats_update(arena, usize); + arena->stats.mapped -= usize; + stats_cactive_sub(usize); + } + arena->nactive -= (usize >> LG_PAGE); + + chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true); + malloc_mutex_unlock(&arena->lock); +} + +void +arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, + size_t usize) +{ + + assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); + assert(oldsize != usize); + + malloc_mutex_lock(&arena->lock); + if (config_stats) + arena_huge_ralloc_stats_update(arena, oldsize, usize); + if (oldsize < usize) { + size_t udiff = usize - oldsize; + arena->nactive += udiff >> LG_PAGE; + if (config_stats) + stats_cactive_add(udiff); + } else { + size_t udiff = oldsize - usize; + arena->nactive -= udiff >> LG_PAGE; + if (config_stats) + stats_cactive_sub(udiff); + } + malloc_mutex_unlock(&arena->lock); +} + +void +arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, + size_t usize) +{ + size_t udiff = oldsize - usize; + size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); + + malloc_mutex_lock(&arena->lock); + if (config_stats) { + arena_huge_ralloc_stats_update(arena, oldsize, usize); + if (cdiff != 0) { + arena->stats.mapped -= cdiff; + stats_cactive_sub(udiff); + } + } + arena->nactive -= udiff >> LG_PAGE; + + if (cdiff != 0) { + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + void *nchunk = (void *)((uintptr_t)chunk + + CHUNK_CEILING(usize)); + + chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true); + } + malloc_mutex_unlock(&arena->lock); +} + +static bool +arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk, + size_t udiff, size_t cdiff) +{ + bool err; + bool commit = true; + + err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize, + zero, &commit) == NULL); + if (err) { + /* Revert optimistic stats updates. */ + malloc_mutex_lock(&arena->lock); + if (config_stats) { + arena_huge_ralloc_stats_update_undo(arena, oldsize, + usize); + arena->stats.mapped -= cdiff; + } + arena->nactive -= (udiff >> LG_PAGE); + malloc_mutex_unlock(&arena->lock); + } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, + cdiff, true, arena->ind)) { + chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero, + true); + err = true; + } + return (err); +} + +bool +arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, + size_t usize, bool *zero) +{ + bool err; + chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); + void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); + size_t udiff = usize - oldsize; + size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); + + malloc_mutex_lock(&arena->lock); + + /* Optimistically update stats. */ + if (config_stats) { + arena_huge_ralloc_stats_update(arena, oldsize, usize); + arena->stats.mapped += cdiff; + } + arena->nactive += (udiff >> LG_PAGE); + + err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff, + chunksize, zero, true) == NULL); + malloc_mutex_unlock(&arena->lock); + if (err) { + err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks, + chunk, oldsize, usize, zero, nchunk, udiff, + cdiff); + } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, + cdiff, true, arena->ind)) { + chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero, + true); + err = true; + } + + if (config_stats && !err) + stats_cactive_add(udiff); + return (err); +} + +/* + * Do first-best-fit run selection, i.e. select the lowest run that best fits. + * Run sizes are quantized, so not all candidate runs are necessarily exactly + * the same size. + */ +static arena_run_t * +arena_run_first_best_fit(arena_t *arena, size_t size) +{ + size_t search_size = run_quantize_first(size); + arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size); + arena_chunk_map_misc_t *miscelm = + arena_avail_tree_nsearch(&arena->runs_avail, key); + if (miscelm == NULL) + return (NULL); + return (&miscelm->run); +} + +static arena_run_t * +arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) +{ + arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); + if (run != NULL) { + if (arena_run_split_large(arena, run, size, zero)) + run = NULL; + } + return (run); } static arena_run_t * @@ -688,8 +1097,8 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero) arena_chunk_t *chunk; arena_run_t *run; - assert(size <= arena_maxclass); - assert((size & PAGE_MASK) == 0); + assert(size <= arena_maxrun); + assert(size == PAGE_CEILING(size)); /* Search the arena's chunks for the lowest best fit. */ run = arena_run_alloc_large_helper(arena, size, zero); @@ -701,8 +1110,9 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero) */ chunk = arena_chunk_alloc(arena); if (chunk != NULL) { - run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); - arena_run_split_large(arena, run, size, zero); + run = &arena_miscelm_get(chunk, map_bias)->run; + if (arena_run_split_large(arena, run, size, zero)) + run = NULL; return (run); } @@ -715,36 +1125,24 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero) } static arena_run_t * -arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind) +arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind) { - arena_run_t *run; - arena_chunk_map_t *mapelm, key; - - key.bits = size | CHUNK_MAP_KEY; - mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key); - if (mapelm != NULL) { - arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); - size_t pageind = (((uintptr_t)mapelm - - (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) - + map_bias; - - run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << - LG_PAGE)); - arena_run_split_small(arena, run, size, binind); - return (run); + arena_run_t *run = arena_run_first_best_fit(arena, size); + if (run != NULL) { + if (arena_run_split_small(arena, run, size, binind)) + run = NULL; } - - return (NULL); + return (run); } static arena_run_t * -arena_run_alloc_small(arena_t *arena, size_t size, size_t binind) +arena_run_alloc_small(arena_t *arena, size_t size, index_t binind) { arena_chunk_t *chunk; arena_run_t *run; - assert(size <= arena_maxclass); - assert((size & PAGE_MASK) == 0); + assert(size <= arena_maxrun); + assert(size == PAGE_CEILING(size)); assert(binind != BININD_INVALID); /* Search the arena's chunks for the lowest best fit. */ @@ -757,8 +1155,9 @@ arena_run_alloc_small(arena_t *arena, size_t size, size_t binind) */ chunk = arena_chunk_alloc(arena); if (chunk != NULL) { - run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); - arena_run_split_small(arena, run, size, binind); + run = &arena_miscelm_get(chunk, map_bias)->run; + if (arena_run_split_small(arena, run, size, binind)) + run = NULL; return (run); } @@ -770,313 +1169,373 @@ arena_run_alloc_small(arena_t *arena, size_t size, size_t binind) return (arena_run_alloc_small_helper(arena, size, binind)); } -static inline void +static bool +arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) +{ + + return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) + << 3)); +} + +ssize_t +arena_lg_dirty_mult_get(arena_t *arena) +{ + ssize_t lg_dirty_mult; + + malloc_mutex_lock(&arena->lock); + lg_dirty_mult = arena->lg_dirty_mult; + malloc_mutex_unlock(&arena->lock); + + return (lg_dirty_mult); +} + +bool +arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult) +{ + + if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) + return (true); + + malloc_mutex_lock(&arena->lock); + arena->lg_dirty_mult = lg_dirty_mult; + arena_maybe_purge(arena); + malloc_mutex_unlock(&arena->lock); + + return (false); +} + +void arena_maybe_purge(arena_t *arena) { - size_t npurgeable, threshold; /* Don't purge if the option is disabled. */ - if (opt_lg_dirty_mult < 0) + if (arena->lg_dirty_mult < 0) return; - /* Don't purge if all dirty pages are already being purged. */ - if (arena->ndirty <= arena->npurgatory) + /* Don't recursively purge. */ + if (arena->purging) return; - npurgeable = arena->ndirty - arena->npurgatory; - threshold = (arena->nactive >> opt_lg_dirty_mult); /* - * Don't purge unless the number of purgeable pages exceeds the - * threshold. + * Iterate, since preventing recursive purging could otherwise leave too + * many dirty pages. */ - if (npurgeable <= threshold) - return; - - arena_purge(arena, false); + while (true) { + size_t threshold = (arena->nactive >> arena->lg_dirty_mult); + if (threshold < chunk_npages) + threshold = chunk_npages; + /* + * Don't purge unless the number of purgeable pages exceeds the + * threshold. + */ + if (arena->ndirty <= threshold) + return; + arena_purge(arena, false); + } } -static arena_chunk_t * -chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg) +static size_t +arena_dirty_count(arena_t *arena) { - size_t *ndirty = (size_t *)arg; + size_t ndirty = 0; + arena_runs_dirty_link_t *rdelm; + extent_node_t *chunkselm; + + for (rdelm = qr_next(&arena->runs_dirty, rd_link), + chunkselm = qr_next(&arena->chunks_cache, cc_link); + rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) { + size_t npages; + + if (rdelm == &chunkselm->rd) { + npages = extent_node_size_get(chunkselm) >> LG_PAGE; + chunkselm = qr_next(chunkselm, cc_link); + } else { + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( + rdelm); + arena_chunk_map_misc_t *miscelm = + arena_rd_to_miscelm(rdelm); + size_t pageind = arena_miscelm_to_pageind(miscelm); + assert(arena_mapbits_allocated_get(chunk, pageind) == + 0); + assert(arena_mapbits_large_get(chunk, pageind) == 0); + assert(arena_mapbits_dirty_get(chunk, pageind) != 0); + npages = arena_mapbits_unallocated_size_get(chunk, + pageind) >> LG_PAGE; + } + ndirty += npages; + } - assert(chunk->ndirty != 0); - *ndirty += chunk->ndirty; - return (NULL); + return (ndirty); } static size_t -arena_compute_npurgatory(arena_t *arena, bool all) +arena_compute_npurge(arena_t *arena, bool all) { - size_t npurgatory, npurgeable; + size_t npurge; /* * Compute the minimum number of pages that this thread should try to * purge. */ - npurgeable = arena->ndirty - arena->npurgatory; + if (!all) { + size_t threshold = (arena->nactive >> arena->lg_dirty_mult); + threshold = threshold < chunk_npages ? chunk_npages : threshold; - if (all == false) { - size_t threshold = (arena->nactive >> opt_lg_dirty_mult); - - npurgatory = npurgeable - threshold; + npurge = arena->ndirty - threshold; } else - npurgatory = npurgeable; + npurge = arena->ndirty; - return (npurgatory); + return (npurge); } -static void -arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all, - arena_chunk_mapelms_t *mapelms) -{ - size_t pageind, npages; - - /* - * Temporarily allocate free dirty runs within chunk. If all is false, - * only operate on dirty runs that are fragments; otherwise operate on - * all dirty runs. - */ - for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); - if (arena_mapbits_allocated_get(chunk, pageind) == 0) { +static size_t +arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, + size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel, + extent_node_t *purge_chunks_sentinel) +{ + arena_runs_dirty_link_t *rdelm, *rdelm_next; + extent_node_t *chunkselm; + size_t nstashed = 0; + + /* Stash at least npurge pages. */ + for (rdelm = qr_next(&arena->runs_dirty, rd_link), + chunkselm = qr_next(&arena->chunks_cache, cc_link); + rdelm != &arena->runs_dirty; rdelm = rdelm_next) { + size_t npages; + rdelm_next = qr_next(rdelm, rd_link); + + if (rdelm == &chunkselm->rd) { + extent_node_t *chunkselm_next; + bool zero; + UNUSED void *chunk; + + chunkselm_next = qr_next(chunkselm, cc_link); + /* + * Allocate. chunkselm remains valid due to the + * dalloc_node=false argument to chunk_alloc_cache(). + */ + zero = false; + chunk = chunk_alloc_cache(arena, chunk_hooks, + extent_node_addr_get(chunkselm), + extent_node_size_get(chunkselm), chunksize, &zero, + false); + assert(chunk == extent_node_addr_get(chunkselm)); + assert(zero == extent_node_zeroed_get(chunkselm)); + extent_node_dirty_insert(chunkselm, purge_runs_sentinel, + purge_chunks_sentinel); + npages = extent_node_size_get(chunkselm) >> LG_PAGE; + chunkselm = chunkselm_next; + } else { + arena_chunk_t *chunk = + (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); + arena_chunk_map_misc_t *miscelm = + arena_rd_to_miscelm(rdelm); + size_t pageind = arena_miscelm_to_pageind(miscelm); + arena_run_t *run = &miscelm->run; size_t run_size = arena_mapbits_unallocated_size_get(chunk, pageind); npages = run_size >> LG_PAGE; + assert(pageind + npages <= chunk_npages); assert(arena_mapbits_dirty_get(chunk, pageind) == arena_mapbits_dirty_get(chunk, pageind+npages-1)); - if (arena_mapbits_dirty_get(chunk, pageind) != 0 && - (all || arena_avail_adjac(chunk, pageind, - npages))) { - arena_run_t *run = (arena_run_t *)((uintptr_t) - chunk + (uintptr_t)(pageind << LG_PAGE)); - - arena_run_split_large(arena, run, run_size, - false); - /* Append to list for later processing. */ - ql_elm_new(mapelm, u.ql_link); - ql_tail_insert(mapelms, mapelm, u.ql_link); - } - } else { - /* Skip run. */ - if (arena_mapbits_large_get(chunk, pageind) != 0) { - npages = arena_mapbits_large_size_get(chunk, - pageind) >> LG_PAGE; - } else { - size_t binind; - arena_bin_info_t *bin_info; - arena_run_t *run = (arena_run_t *)((uintptr_t) - chunk + (uintptr_t)(pageind << LG_PAGE)); - - assert(arena_mapbits_small_runind_get(chunk, - pageind) == 0); - binind = arena_bin_index(arena, run->bin); - bin_info = &arena_bin_info[binind]; - npages = bin_info->run_size >> LG_PAGE; + /* + * If purging the spare chunk's run, make it available + * prior to allocation. + */ + if (chunk == arena->spare) + arena_chunk_alloc(arena); + + /* Temporarily allocate the free dirty run. */ + arena_run_split_large(arena, run, run_size, false); + /* Stash. */ + if (false) + qr_new(rdelm, rd_link); /* Redundant. */ + else { + assert(qr_next(rdelm, rd_link) == rdelm); + assert(qr_prev(rdelm, rd_link) == rdelm); } + qr_meld(purge_runs_sentinel, rdelm, rd_link); } + + nstashed += npages; + if (!all && nstashed >= npurge) + break; } - assert(pageind == chunk_npages); - assert(chunk->ndirty == 0 || all == false); - assert(chunk->nruns_adjac == 0); + + return (nstashed); } static size_t -arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk, - arena_chunk_mapelms_t *mapelms) +arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, + arena_runs_dirty_link_t *purge_runs_sentinel, + extent_node_t *purge_chunks_sentinel) { - size_t npurged, pageind, npages, nmadvise; - arena_chunk_map_t *mapelm; + size_t npurged, nmadvise; + arena_runs_dirty_link_t *rdelm; + extent_node_t *chunkselm; - malloc_mutex_unlock(&arena->lock); if (config_stats) nmadvise = 0; npurged = 0; - ql_foreach(mapelm, mapelms, u.ql_link) { - bool unzeroed; - size_t flag_unzeroed, i; - - pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t)) + map_bias; - npages = arena_mapbits_large_size_get(chunk, pageind) >> - LG_PAGE; - assert(pageind + npages <= chunk_npages); - unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind << - LG_PAGE)), (npages << LG_PAGE)); - flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0; - /* - * Set the unzeroed flag for all pages, now that pages_purge() - * has returned whether the pages were zeroed as a side effect - * of purging. This chunk map modification is safe even though - * the arena mutex isn't currently owned by this thread, - * because the run is marked as allocated, thus protecting it - * from being modified by any other thread. As long as these - * writes don't perturb the first and last elements' - * CHUNK_MAP_ALLOCATED bits, behavior is well defined. - */ - for (i = 0; i < npages; i++) { - arena_mapbits_unzeroed_set(chunk, pageind+i, - flag_unzeroed); + + malloc_mutex_unlock(&arena->lock); + for (rdelm = qr_next(purge_runs_sentinel, rd_link), + chunkselm = qr_next(purge_chunks_sentinel, cc_link); + rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { + size_t npages; + + if (rdelm == &chunkselm->rd) { + /* + * Don't actually purge the chunk here because 1) + * chunkselm is embedded in the chunk and must remain + * valid, and 2) we deallocate the chunk in + * arena_unstash_purged(), where it is destroyed, + * decommitted, or purged, depending on chunk + * deallocation policy. + */ + size_t size = extent_node_size_get(chunkselm); + npages = size >> LG_PAGE; + chunkselm = qr_next(chunkselm, cc_link); + } else { + size_t pageind, run_size, flag_unzeroed, flags, i; + bool decommitted; + arena_chunk_t *chunk = + (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); + arena_chunk_map_misc_t *miscelm = + arena_rd_to_miscelm(rdelm); + pageind = arena_miscelm_to_pageind(miscelm); + run_size = arena_mapbits_large_size_get(chunk, pageind); + npages = run_size >> LG_PAGE; + + assert(pageind + npages <= chunk_npages); + assert(!arena_mapbits_decommitted_get(chunk, pageind)); + assert(!arena_mapbits_decommitted_get(chunk, + pageind+npages-1)); + decommitted = !chunk_hooks->decommit(chunk, chunksize, + pageind << LG_PAGE, npages << LG_PAGE, arena->ind); + if (decommitted) { + flag_unzeroed = 0; + flags = CHUNK_MAP_DECOMMITTED; + } else { + flag_unzeroed = chunk_purge_wrapper(arena, + chunk_hooks, chunk, chunksize, pageind << + LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; + flags = flag_unzeroed; + } + arena_mapbits_large_set(chunk, pageind+npages-1, 0, + flags); + arena_mapbits_large_set(chunk, pageind, run_size, + flags); + + /* + * Set the unzeroed flag for internal pages, now that + * chunk_purge_wrapper() has returned whether the pages + * were zeroed as a side effect of purging. This chunk + * map modification is safe even though the arena mutex + * isn't currently owned by this thread, because the run + * is marked as allocated, thus protecting it from being + * modified by any other thread. As long as these + * writes don't perturb the first and last elements' + * CHUNK_MAP_ALLOCATED bits, behavior is well defined. + */ + for (i = 1; i < npages-1; i++) { + arena_mapbits_internal_set(chunk, pageind+i, + flag_unzeroed); + } } + npurged += npages; if (config_stats) nmadvise++; } malloc_mutex_lock(&arena->lock); - if (config_stats) + + if (config_stats) { arena->stats.nmadvise += nmadvise; + arena->stats.purged += npurged; + } return (npurged); } static void -arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk, - arena_chunk_mapelms_t *mapelms) -{ - arena_chunk_map_t *mapelm; - size_t pageind; - - /* Deallocate runs. */ - for (mapelm = ql_first(mapelms); mapelm != NULL; - mapelm = ql_first(mapelms)) { - arena_run_t *run; - - pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t)) + map_bias; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind << - LG_PAGE)); - ql_remove(mapelms, mapelm, u.ql_link); - arena_run_dalloc(arena, run, false, true); +arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks, + arena_runs_dirty_link_t *purge_runs_sentinel, + extent_node_t *purge_chunks_sentinel) +{ + arena_runs_dirty_link_t *rdelm, *rdelm_next; + extent_node_t *chunkselm; + + /* Deallocate chunks/runs. */ + for (rdelm = qr_next(purge_runs_sentinel, rd_link), + chunkselm = qr_next(purge_chunks_sentinel, cc_link); + rdelm != purge_runs_sentinel; rdelm = rdelm_next) { + rdelm_next = qr_next(rdelm, rd_link); + if (rdelm == &chunkselm->rd) { + extent_node_t *chunkselm_next = qr_next(chunkselm, + cc_link); + void *addr = extent_node_addr_get(chunkselm); + size_t size = extent_node_size_get(chunkselm); + bool zeroed = extent_node_zeroed_get(chunkselm); + bool committed = extent_node_committed_get(chunkselm); + extent_node_dirty_remove(chunkselm); + arena_node_dalloc(arena, chunkselm); + chunkselm = chunkselm_next; + chunk_dalloc_arena(arena, chunk_hooks, addr, size, + zeroed, committed); + } else { + arena_chunk_t *chunk = + (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); + arena_chunk_map_misc_t *miscelm = + arena_rd_to_miscelm(rdelm); + size_t pageind = arena_miscelm_to_pageind(miscelm); + bool decommitted = (arena_mapbits_decommitted_get(chunk, + pageind) != 0); + arena_run_t *run = &miscelm->run; + qr_remove(rdelm, rd_link); + arena_run_dalloc(arena, run, false, true, decommitted); + } } } -static inline size_t -arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all) +static void +arena_purge(arena_t *arena, bool all) { - size_t npurged; - arena_chunk_mapelms_t mapelms; - - ql_new(&mapelms); - - /* - * If chunk is the spare, temporarily re-allocate it, 1) so that its - * run is reinserted into runs_avail, and 2) so that it cannot be - * completely discarded by another thread while arena->lock is dropped - * by this thread. Note that the arena_run_dalloc() call will - * implicitly deallocate the chunk, so no explicit action is required - * in this function to deallocate the chunk. - * - * Note that once a chunk contains dirty pages, it cannot again contain - * a single run unless 1) it is a dirty run, or 2) this function purges - * dirty pages and causes the transition to a single clean run. Thus - * (chunk == arena->spare) is possible, but it is not possible for - * this function to be called on the spare unless it contains a dirty - * run. - */ - if (chunk == arena->spare) { - assert(arena_mapbits_dirty_get(chunk, map_bias) != 0); - assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0); - - arena_chunk_alloc(arena); - } + chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); + size_t npurge, npurgeable, npurged; + arena_runs_dirty_link_t purge_runs_sentinel; + extent_node_t purge_chunks_sentinel; - if (config_stats) - arena->stats.purged += chunk->ndirty; + arena->purging = true; /* - * Operate on all dirty runs if there is no clean/dirty run - * fragmentation. + * Calls to arena_dirty_count() are disabled even for debug builds + * because overhead grows nonlinearly as memory usage increases. */ - if (chunk->nruns_adjac == 0) - all = true; - - arena_chunk_stash_dirty(arena, chunk, all, &mapelms); - npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms); - arena_chunk_unstash_purged(arena, chunk, &mapelms); - - return (npurged); -} - -static void -arena_purge(arena_t *arena, bool all) -{ - arena_chunk_t *chunk; - size_t npurgatory; - if (config_debug) { - size_t ndirty = 0; - - arena_chunk_dirty_iter(&arena->chunks_dirty, NULL, - chunks_dirty_iter_cb, (void *)&ndirty); + if (false && config_debug) { + size_t ndirty = arena_dirty_count(arena); assert(ndirty == arena->ndirty); } - assert(arena->ndirty > arena->npurgatory || all); - assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - - arena->npurgatory) || all); + assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all); if (config_stats) arena->stats.npurge++; - /* - * Add the minimum number of pages this thread should try to purge to - * arena->npurgatory. This will keep multiple threads from racing to - * reduce ndirty below the threshold. - */ - npurgatory = arena_compute_npurgatory(arena, all); - arena->npurgatory += npurgatory; - - while (npurgatory > 0) { - size_t npurgeable, npurged, nunpurged; + npurge = arena_compute_npurge(arena, all); + qr_new(&purge_runs_sentinel, rd_link); + extent_node_dirty_linkage_init(&purge_chunks_sentinel); - /* Get next chunk with dirty pages. */ - chunk = arena_chunk_dirty_first(&arena->chunks_dirty); - if (chunk == NULL) { - /* - * This thread was unable to purge as many pages as - * originally intended, due to races with other threads - * that either did some of the purging work, or re-used - * dirty pages. - */ - arena->npurgatory -= npurgatory; - return; - } - npurgeable = chunk->ndirty; - assert(npurgeable != 0); + npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge, + &purge_runs_sentinel, &purge_chunks_sentinel); + assert(npurgeable >= npurge); + npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel, + &purge_chunks_sentinel); + assert(npurged == npurgeable); + arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel, + &purge_chunks_sentinel); - if (npurgeable > npurgatory && chunk->nruns_adjac == 0) { - /* - * This thread will purge all the dirty pages in chunk, - * so set npurgatory to reflect this thread's intent to - * purge the pages. This tends to reduce the chances - * of the following scenario: - * - * 1) This thread sets arena->npurgatory such that - * (arena->ndirty - arena->npurgatory) is at the - * threshold. - * 2) This thread drops arena->lock. - * 3) Another thread causes one or more pages to be - * dirtied, and immediately determines that it must - * purge dirty pages. - * - * If this scenario *does* play out, that's okay, - * because all of the purging work being done really - * needs to happen. - */ - arena->npurgatory += npurgeable - npurgatory; - npurgatory = npurgeable; - } - - /* - * Keep track of how many pages are purgeable, versus how many - * actually get purged, and adjust counters accordingly. - */ - arena->npurgatory -= npurgeable; - npurgatory -= npurgeable; - npurged = arena_chunk_purge(arena, chunk, all); - nunpurged = npurgeable - npurged; - arena->npurgatory += nunpurged; - npurgatory += nunpurged; - } + arena->purging = false; } void @@ -1090,7 +1549,8 @@ arena_purge_all(arena_t *arena) static void arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, - size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty) + size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, + size_t flag_decommitted) { size_t size = *p_size; size_t run_ind = *p_run_ind; @@ -1099,7 +1559,9 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, /* Try to coalesce forward. */ if (run_ind + run_pages < chunk_npages && arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && - arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) { + arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty && + arena_mapbits_decommitted_get(chunk, run_ind+run_pages) == + flag_decommitted) { size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages); size_t nrun_pages = nrun_size >> LG_PAGE; @@ -1112,8 +1574,18 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, run_ind+run_pages+nrun_pages-1) == nrun_size); assert(arena_mapbits_dirty_get(chunk, run_ind+run_pages+nrun_pages-1) == flag_dirty); - arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages, - false, true); + assert(arena_mapbits_decommitted_get(chunk, + run_ind+run_pages+nrun_pages-1) == flag_decommitted); + arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); + + /* + * If the successor is dirty, remove it from the set of dirty + * pages. + */ + if (flag_dirty != 0) { + arena_run_dirty_remove(arena, chunk, run_ind+run_pages, + nrun_pages); + } size += nrun_size; run_pages += nrun_pages; @@ -1126,7 +1598,8 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, /* Try to coalesce backward. */ if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == - flag_dirty) { + flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) == + flag_decommitted) { size_t prun_size = arena_mapbits_unallocated_size_get(chunk, run_ind-1); size_t prun_pages = prun_size >> LG_PAGE; @@ -1140,8 +1613,18 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == prun_size); assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); - arena_avail_remove(arena, chunk, run_ind, prun_pages, true, - false); + assert(arena_mapbits_decommitted_get(chunk, run_ind) == + flag_decommitted); + arena_avail_remove(arena, chunk, run_ind, prun_pages); + + /* + * If the predecessor is dirty, remove it from the set of dirty + * pages. + */ + if (flag_dirty != 0) { + arena_run_dirty_remove(arena, chunk, run_ind, + prun_pages); + } size += prun_size; run_pages += prun_pages; @@ -1156,26 +1639,53 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, *p_run_pages = run_pages; } -static void -arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) +static size_t +arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + size_t run_ind) { - arena_chunk_t *chunk; - size_t size, run_ind, run_pages, flag_dirty; + size_t size; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); assert(run_ind >= map_bias); assert(run_ind < chunk_npages); + if (arena_mapbits_large_get(chunk, run_ind) != 0) { size = arena_mapbits_large_size_get(chunk, run_ind); - assert(size == PAGE || - arena_mapbits_large_size_get(chunk, + assert(size == PAGE || arena_mapbits_large_size_get(chunk, run_ind+(size>>LG_PAGE)-1) == 0); } else { - size_t binind = arena_bin_index(arena, run->bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; + arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; size = bin_info->run_size; } + + return (size); +} + +static bool +arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run) +{ + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + size_t run_ind = arena_miscelm_to_pageind(miscelm); + size_t offset = run_ind << LG_PAGE; + size_t length = arena_run_size_get(arena, chunk, run, run_ind); + + return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length, + arena->ind)); +} + +static void +arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, + bool decommitted) +{ + arena_chunk_t *chunk; + arena_chunk_map_misc_t *miscelm; + size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + miscelm = arena_run_to_miscelm(run); + run_ind = arena_miscelm_to_pageind(miscelm); + assert(run_ind >= map_bias); + assert(run_ind < chunk_npages); + size = arena_run_size_get(arena, chunk, run, run_ind); run_pages = (size >> LG_PAGE); arena_cactive_update(arena, 0, run_pages); arena->nactive -= run_pages; @@ -1187,16 +1697,18 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) */ assert(arena_mapbits_dirty_get(chunk, run_ind) == arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0) + if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind) + != 0) dirty = true; flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; + flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0; /* Mark pages as unallocated in the chunk map. */ - if (dirty) { - arena_mapbits_unallocated_set(chunk, run_ind, size, - CHUNK_MAP_DIRTY); + if (dirty || decommitted) { + size_t flags = flag_dirty | flag_decommitted; + arena_mapbits_unallocated_set(chunk, run_ind, size, flags); arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - CHUNK_MAP_DIRTY); + flags); } else { arena_mapbits_unallocated_set(chunk, run_ind, size, arena_mapbits_unzeroed_get(chunk, run_ind)); @@ -1205,20 +1717,25 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) } arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, - flag_dirty); + flag_dirty, flag_decommitted); /* Insert into runs_avail, now that coalescing is complete. */ assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); assert(arena_mapbits_dirty_get(chunk, run_ind) == arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - arena_avail_insert(arena, chunk, run_ind, run_pages, true, true); + assert(arena_mapbits_decommitted_get(chunk, run_ind) == + arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1)); + arena_avail_insert(arena, chunk, run_ind, run_pages); + + if (dirty) + arena_run_dirty_insert(arena, chunk, run_ind, run_pages); /* Deallocate chunk if it is now completely unused. */ - if (size == arena_maxclass) { + if (size == arena_maxrun) { assert(run_ind == map_bias); - assert(run_pages == (arena_maxclass >> LG_PAGE)); - arena_chunk_dealloc(arena, chunk); + assert(run_pages == (arena_maxrun >> LG_PAGE)); + arena_chunk_dalloc(arena, chunk); } /* @@ -1233,12 +1750,25 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) } static void +arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run) +{ + bool committed = arena_run_decommit(arena, chunk, run); + + arena_run_dalloc(arena, run, committed, false, !committed); +} + +static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, size_t oldsize, size_t newsize) { - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + size_t pageind = arena_miscelm_to_pageind(miscelm); size_t head_npages = (oldsize - newsize) >> LG_PAGE; size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); + size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); + size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? + CHUNK_MAP_UNZEROED : 0; assert(oldsize > newsize); @@ -1248,8 +1778,11 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, * run first, in case of single-page runs. */ assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); - arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty); + arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+head_npages-1))); + arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); if (config_debug) { UNUSED size_t tail_npages = newsize >> LG_PAGE; @@ -1259,18 +1792,25 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, pageind+head_npages+tail_npages-1) == flag_dirty); } arena_mapbits_large_set(chunk, pageind+head_npages, newsize, - flag_dirty); + flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+head_npages))); - arena_run_dalloc(arena, run, false, false); + arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0)); } static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, size_t oldsize, size_t newsize, bool dirty) { - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + size_t pageind = arena_miscelm_to_pageind(miscelm); size_t head_npages = newsize >> LG_PAGE; size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); + size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); + size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? + CHUNK_MAP_UNZEROED : 0; + arena_chunk_map_misc_t *tail_miscelm; + arena_run_t *tail_run; assert(oldsize > newsize); @@ -1280,8 +1820,11 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, * run first, in case of single-page runs. */ assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); - arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty); + arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+head_npages-1))); + arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); if (config_debug) { UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; @@ -1291,29 +1834,21 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, pageind+head_npages+tail_npages-1) == flag_dirty); } arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, - flag_dirty); + flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+head_npages))); - arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize), - dirty, false); + tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages); + tail_run = &tail_miscelm->run; + arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted != + 0)); } static arena_run_t * arena_bin_runs_first(arena_bin_t *bin) { - arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs); - if (mapelm != NULL) { - arena_chunk_t *chunk; - size_t pageind; - arena_run_t *run; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); - pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t))) + map_bias; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << - LG_PAGE)); - return (run); - } + arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs); + if (miscelm != NULL) + return (&miscelm->run); return (NULL); } @@ -1321,25 +1856,21 @@ arena_bin_runs_first(arena_bin_t *bin) static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) { - arena_chunk_t *chunk = CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - assert(arena_run_tree_search(&bin->runs, mapelm) == NULL); + assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); - arena_run_tree_insert(&bin->runs, mapelm); + arena_run_tree_insert(&bin->runs, miscelm); } static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) { - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - assert(arena_run_tree_search(&bin->runs, mapelm) != NULL); + assert(arena_run_tree_search(&bin->runs, miscelm) != NULL); - arena_run_tree_remove(&bin->runs, mapelm); + arena_run_tree_remove(&bin->runs, miscelm); } static arena_run_t * @@ -1358,7 +1889,7 @@ static arena_run_t * arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) { arena_run_t *run; - size_t binind; + index_t binind; arena_bin_info_t *bin_info; /* Look for a usable run. */ @@ -1376,14 +1907,10 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) malloc_mutex_lock(&arena->lock); run = arena_run_alloc_small(arena, bin_info->run_size, binind); if (run != NULL) { - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); - /* Initialize run internals. */ - run->bin = bin; - run->nextind = 0; + run->binind = binind; run->nfree = bin_info->nregs; - bitmap_init(bitmap, &bin_info->bitmap_info); + bitmap_init(run->bitmap, &bin_info->bitmap_info); } malloc_mutex_unlock(&arena->lock); /********************************/ @@ -1413,7 +1940,7 @@ static void * arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) { void *ret; - size_t binind; + index_t binind; arena_bin_info_t *bin_info; arena_run_t *run; @@ -1459,7 +1986,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) } void -arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, +arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind, uint64_t prof_accumbytes) { unsigned i, nfill; @@ -1479,9 +2006,20 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); else ptr = arena_bin_malloc_hard(arena, bin); - if (ptr == NULL) + if (ptr == NULL) { + /* + * OOM. tbin->avail isn't yet filled down to its first + * element, so the successful allocations (if any) must + * be moved to the base of tbin->avail before bailing + * out. + */ + if (i > 0) { + memmove(tbin->avail, &tbin->avail[nfill - i], + i * sizeof(void *)); + } break; - if (config_fill && opt_junk) { + } + if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ptr, &arena_bin_info[binind], true); } @@ -1489,9 +2027,9 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, tbin->avail[nfill - 1 - i] = ptr; } if (config_stats) { - bin->stats.allocated += i * arena_bin_info[binind].reg_size; bin->stats.nmalloc += i; bin->stats.nrequests += tbin->tstats.nrequests; + bin->stats.curregs += i; bin->stats.nfills++; tbin->tstats.nrequests = 0; } @@ -1543,24 +2081,29 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) size_t i; bool error = false; - for (i = 1; i <= redzone_size; i++) { - uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); - if (*byte != 0xa5) { - error = true; - arena_redzone_corruption(ptr, size, false, i, *byte); - if (reset) - *byte = 0xa5; + if (opt_junk_alloc) { + for (i = 1; i <= redzone_size; i++) { + uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); + if (*byte != 0xa5) { + error = true; + arena_redzone_corruption(ptr, size, false, i, + *byte); + if (reset) + *byte = 0xa5; + } } - } - for (i = 0; i < redzone_size; i++) { - uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); - if (*byte != 0xa5) { - error = true; - arena_redzone_corruption(ptr, size, true, i, *byte); - if (reset) - *byte = 0xa5; + for (i = 0; i < redzone_size; i++) { + uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); + if (*byte != 0xa5) { + error = true; + arena_redzone_corruption(ptr, size, true, i, + *byte); + if (reset) + *byte = 0xa5; + } } } + if (opt_abort && error) abort(); } @@ -1588,14 +2131,14 @@ arena_dalloc_junk_small_t *arena_dalloc_junk_small = void arena_quarantine_junk_small(void *ptr, size_t usize) { - size_t binind; + index_t binind; arena_bin_info_t *bin_info; cassert(config_fill); - assert(opt_junk); + assert(opt_junk_free); assert(opt_quarantine); assert(usize <= SMALL_MAXCLASS); - binind = SMALL_SIZE2BIN(usize); + binind = size2index(usize); bin_info = &arena_bin_info[binind]; arena_redzones_validate(ptr, bin_info, true); } @@ -1606,12 +2149,12 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero) void *ret; arena_bin_t *bin; arena_run_t *run; - size_t binind; + index_t binind; - binind = SMALL_SIZE2BIN(size); + binind = size2index(size); assert(binind < NBINS); bin = &arena->bins[binind]; - size = arena_bin_info[binind].reg_size; + size = index2size(binind); malloc_mutex_lock(&bin->lock); if ((run = bin->runcur) != NULL && run->nfree > 0) @@ -1625,29 +2168,29 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero) } if (config_stats) { - bin->stats.allocated += size; bin->stats.nmalloc++; bin->stats.nrequests++; + bin->stats.curregs++; } malloc_mutex_unlock(&bin->lock); - if (config_prof && isthreaded == false && arena_prof_accum(arena, size)) + if (config_prof && !isthreaded && arena_prof_accum(arena, size)) prof_idump(); - if (zero == false) { + if (!zero) { if (config_fill) { - if (opt_junk) { + if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); - } else if (opt_zero) + } else if (unlikely(opt_zero)) memset(ret, 0, size); } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } else { - if (config_fill && opt_junk) { + if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); } @@ -1658,36 +2201,59 @@ void * arena_malloc_large(arena_t *arena, size_t size, bool zero) { void *ret; + size_t usize; + uintptr_t random_offset; + arena_run_t *run; + arena_chunk_map_misc_t *miscelm; UNUSED bool idump; /* Large allocation. */ - size = PAGE_CEILING(size); + usize = s2u(size); malloc_mutex_lock(&arena->lock); - ret = (void *)arena_run_alloc_large(arena, size, zero); - if (ret == NULL) { + if (config_cache_oblivious) { + uint64_t r; + + /* + * Compute a uniformly distributed offset within the first page + * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 + * for 4 KiB pages and 64-byte cachelines. + */ + prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state, + UINT64_C(6364136223846793009), + UINT64_C(1442695040888963409)); + random_offset = ((uintptr_t)r) << LG_CACHELINE; + } else + random_offset = 0; + run = arena_run_alloc_large(arena, usize + large_pad, zero); + if (run == NULL) { malloc_mutex_unlock(&arena->lock); return (NULL); } + miscelm = arena_run_to_miscelm(run); + ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + + random_offset); if (config_stats) { + index_t index = size2index(usize) - NBINS; + arena->stats.nmalloc_large++; arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; + arena->stats.allocated_large += usize; + arena->stats.lstats[index].nmalloc++; + arena->stats.lstats[index].nrequests++; + arena->stats.lstats[index].curruns++; } if (config_prof) - idump = arena_prof_accum_locked(arena, size); + idump = arena_prof_accum_locked(arena, usize); malloc_mutex_unlock(&arena->lock); if (config_prof && idump) prof_idump(); - if (zero == false) { + if (!zero) { if (config_fill) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); + if (unlikely(opt_junk_alloc)) + memset(ret, 0xa5, usize); + else if (unlikely(opt_zero)) + memset(ret, 0, usize); } } @@ -1695,18 +2261,25 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero) } /* Only handles large allocations that require more than page alignment. */ -void * -arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) +static void * +arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, + bool zero) { void *ret; size_t alloc_size, leadsize, trailsize; arena_run_t *run; arena_chunk_t *chunk; + arena_chunk_map_misc_t *miscelm; + void *rpages; + + assert(usize == PAGE_CEILING(usize)); - assert((size & PAGE_MASK) == 0); + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) + return (NULL); alignment = PAGE_CEILING(alignment); - alloc_size = size + alignment - PAGE; + alloc_size = usize + large_pad + alignment - PAGE; malloc_mutex_lock(&arena->lock); run = arena_run_alloc_large(arena, alloc_size, false); @@ -1715,37 +2288,94 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) return (NULL); } chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + miscelm = arena_run_to_miscelm(run); + rpages = arena_miscelm_to_rpages(miscelm); - leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) - - (uintptr_t)run; - assert(alloc_size >= leadsize + size); - trailsize = alloc_size - leadsize - size; - ret = (void *)((uintptr_t)run + leadsize); + leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - + (uintptr_t)rpages; + assert(alloc_size >= leadsize + usize); + trailsize = alloc_size - leadsize - usize - large_pad; if (leadsize != 0) { - arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size - - leadsize); + arena_chunk_map_misc_t *head_miscelm = miscelm; + arena_run_t *head_run = run; + + miscelm = arena_miscelm_get(chunk, + arena_miscelm_to_pageind(head_miscelm) + (leadsize >> + LG_PAGE)); + run = &miscelm->run; + + arena_run_trim_head(arena, chunk, head_run, alloc_size, + alloc_size - leadsize); } if (trailsize != 0) { - arena_run_trim_tail(arena, chunk, ret, size + trailsize, size, - false); + arena_run_trim_tail(arena, chunk, run, usize + large_pad + + trailsize, usize + large_pad, false); + } + if (arena_run_init_large(arena, run, usize + large_pad, zero)) { + size_t run_ind = + arena_miscelm_to_pageind(arena_run_to_miscelm(run)); + bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0); + bool decommitted = (arena_mapbits_decommitted_get(chunk, + run_ind) != 0); + + assert(decommitted); /* Cause of OOM. */ + arena_run_dalloc(arena, run, dirty, false, decommitted); + malloc_mutex_unlock(&arena->lock); + return (NULL); } - arena_run_init_large(arena, (arena_run_t *)ret, size, zero); + ret = arena_miscelm_to_rpages(miscelm); if (config_stats) { + index_t index = size2index(usize) - NBINS; + arena->stats.nmalloc_large++; arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; + arena->stats.allocated_large += usize; + arena->stats.lstats[index].nmalloc++; + arena->stats.lstats[index].nrequests++; + arena->stats.lstats[index].curruns++; } malloc_mutex_unlock(&arena->lock); - if (config_fill && zero == false) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); + if (config_fill && !zero) { + if (unlikely(opt_junk_alloc)) + memset(ret, 0xa5, usize); + else if (unlikely(opt_zero)) + memset(ret, 0, usize); + } + return (ret); +} + +void * +arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, + bool zero, tcache_t *tcache) +{ + void *ret; + + if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE + && (usize & PAGE_MASK) == 0))) { + /* Small; alignment doesn't require special run placement. */ + ret = arena_malloc(tsd, arena, usize, zero, tcache); + } else if (usize <= arena_maxclass && alignment <= PAGE) { + /* + * Large; alignment doesn't require special run placement. + * However, the cached pointer may be at a random offset from + * the base of the run, so do some bit manipulation to retrieve + * the base. + */ + ret = arena_malloc(tsd, arena, usize, zero, tcache); + if (config_cache_oblivious) + ret = (void *)((uintptr_t)ret & ~PAGE_MASK); + } else { + if (likely(usize <= arena_maxclass)) { + ret = arena_palloc_large(tsd, arena, usize, alignment, + zero); + } else if (likely(alignment <= chunksize)) + ret = huge_malloc(tsd, arena, usize, zero, tcache); + else { + ret = huge_palloc(tsd, arena, usize, alignment, zero, + tcache); + } } return (ret); } @@ -1754,22 +2384,23 @@ void arena_prof_promoted(const void *ptr, size_t size) { arena_chunk_t *chunk; - size_t pageind, binind; + size_t pageind; + index_t binind; cassert(config_prof); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); - assert(isalloc(ptr, false) == PAGE); - assert(isalloc(ptr, true) == PAGE); + assert(isalloc(ptr, false) == LARGE_MINCLASS); + assert(isalloc(ptr, true) == LARGE_MINCLASS); assert(size <= SMALL_MAXCLASS); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - binind = SMALL_SIZE2BIN(size); + binind = size2index(size); assert(binind < NBINS); arena_mapbits_large_binind_set(chunk, pageind, binind); - assert(isalloc(ptr, false) == PAGE); + assert(isalloc(ptr, false) == LARGE_MINCLASS); assert(isalloc(ptr, true) == size); } @@ -1782,7 +2413,8 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, if (run == bin->runcur) bin->runcur = NULL; else { - size_t binind = arena_bin_index(chunk->arena, bin); + index_t binind = arena_bin_index(extent_node_arena_get( + &chunk->node), bin); arena_bin_info_t *bin_info = &arena_bin_info[binind]; if (bin_info->nregs != 1) { @@ -1800,46 +2432,15 @@ static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin) { - size_t binind; - arena_bin_info_t *bin_info; - size_t npages, run_ind, past; assert(run != bin->runcur); - assert(arena_run_tree_search(&bin->runs, - arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)) - == NULL); - - binind = arena_bin_index(chunk->arena, run->bin); - bin_info = &arena_bin_info[binind]; + assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) == + NULL); malloc_mutex_unlock(&bin->lock); /******************************/ - npages = bin_info->run_size >> LG_PAGE; - run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - past = (size_t)(PAGE_CEILING((uintptr_t)run + - (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind * - bin_info->reg_interval - bin_info->redzone_size) - - (uintptr_t)chunk) >> LG_PAGE); malloc_mutex_lock(&arena->lock); - - /* - * If the run was originally clean, and some pages were never touched, - * trim the clean pages before deallocating the dirty portion of the - * run. - */ - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+npages-1)); - if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind < - npages) { - /* Trim clean pages. Convert to large run beforehand. */ - assert(npages > 0); - arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0); - arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0); - arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE), - ((past - run_ind) << LG_PAGE), false); - /* npages = past - run_ind; */ - } - arena_run_dalloc(arena, run, true, false); + arena_run_dalloc_decommit(arena, chunk, run); malloc_mutex_unlock(&arena->lock); /****************************/ malloc_mutex_lock(&bin->lock); @@ -1868,26 +2469,24 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_runs_insert(bin, run); } -void -arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_t *mapelm) +static void +arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, + arena_chunk_map_bits_t *bitselm, bool junked) { - size_t pageind; + size_t pageind, rpages_ind; arena_run_t *run; arena_bin_t *bin; arena_bin_info_t *bin_info; - size_t size, binind; + index_t binind; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); - bin = run->bin; - binind = arena_ptr_small_binind_get(ptr, mapelm->bits); + rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); + run = &arena_miscelm_get(chunk, rpages_ind)->run; + binind = run->binind; + bin = &arena->bins[binind]; bin_info = &arena_bin_info[binind]; - if (config_fill || config_stats) - size = bin_info->reg_size; - if (config_fill && opt_junk) + if (!junked && config_fill && unlikely(opt_junk_free)) arena_dalloc_junk_small(ptr, bin_info); arena_run_reg_dalloc(run, ptr); @@ -1898,23 +2497,32 @@ arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_bin_lower_run(arena, chunk, run, bin); if (config_stats) { - bin->stats.allocated -= size; bin->stats.ndalloc++; + bin->stats.curregs--; } } void +arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, + arena_chunk_map_bits_t *bitselm) +{ + + arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true); +} + +void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind, arena_chunk_map_t *mapelm) + size_t pageind, arena_chunk_map_bits_t *bitselm) { arena_run_t *run; arena_bin_t *bin; + size_t rpages_ind; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); - bin = run->bin; + rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); + run = &arena_miscelm_get(chunk, rpages_ind)->run; + bin = &arena->bins[run->binind]; malloc_mutex_lock(&bin->lock); - arena_dalloc_bin_locked(arena, chunk, ptr, mapelm); + arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); malloc_mutex_unlock(&bin->lock); } @@ -1922,26 +2530,26 @@ void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind) { - arena_chunk_map_t *mapelm; + arena_chunk_map_bits_t *bitselm; if (config_debug) { /* arena_ptr_small_binind_get() does extra sanity checking. */ assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, pageind)) != BININD_INVALID); } - mapelm = arena_mapp_get(chunk, pageind); - arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm); + bitselm = arena_bitselm_get(chunk, pageind); + arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); } #ifdef JEMALLOC_JET #undef arena_dalloc_junk_large #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) #endif -static void +void arena_dalloc_junk_large(void *ptr, size_t usize) { - if (config_fill && opt_junk) + if (config_fill && unlikely(opt_junk_free)) memset(ptr, 0x5a, usize); } #ifdef JEMALLOC_JET @@ -1952,23 +2560,38 @@ arena_dalloc_junk_large_t *arena_dalloc_junk_large = #endif void -arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr) +arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, + void *ptr, bool junked) { + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); + arena_run_t *run = &miscelm->run; if (config_fill || config_stats) { - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t usize = arena_mapbits_large_size_get(chunk, pageind); + size_t usize = arena_mapbits_large_size_get(chunk, pageind) - + large_pad; - arena_dalloc_junk_large(ptr, usize); + if (!junked) + arena_dalloc_junk_large(ptr, usize); if (config_stats) { + index_t index = size2index(usize) - NBINS; + arena->stats.ndalloc_large++; arena->stats.allocated_large -= usize; - arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--; + arena->stats.lstats[index].ndalloc++; + arena->stats.lstats[index].curruns--; } } - arena_run_dalloc(arena, (arena_run_t *)ptr, true, false); + arena_run_dalloc_decommit(arena, chunk, run); +} + +void +arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, + void *ptr) +{ + + arena_dalloc_large_locked_impl(arena, chunk, ptr, true); } void @@ -1976,7 +2599,7 @@ arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) { malloc_mutex_lock(&arena->lock); - arena_dalloc_large_locked(arena, chunk, ptr); + arena_dalloc_large_locked_impl(arena, chunk, ptr, false); malloc_mutex_unlock(&arena->lock); } @@ -1984,6 +2607,9 @@ static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t oldsize, size_t size) { + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); + arena_run_t *run = &miscelm->run; assert(size < oldsize); @@ -1992,20 +2618,23 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, * allocations. */ malloc_mutex_lock(&arena->lock); - arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size, - true); + arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size + + large_pad, true); if (config_stats) { + index_t oldindex = size2index(oldsize) - NBINS; + index_t index = size2index(size) - NBINS; + arena->stats.ndalloc_large++; arena->stats.allocated_large -= oldsize; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; + arena->stats.lstats[oldindex].ndalloc++; + arena->stats.lstats[oldindex].curruns--; arena->stats.nmalloc_large++; arena->stats.nrequests_large++; arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; + arena->stats.lstats[index].nmalloc++; + arena->stats.lstats[index].nrequests++; + arena->stats.lstats[index].curruns++; } malloc_mutex_unlock(&arena->lock); } @@ -2015,31 +2644,42 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t oldsize, size_t size, size_t extra, bool zero) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t npages = oldsize >> LG_PAGE; + size_t npages = (oldsize + large_pad) >> LG_PAGE; size_t followsize; + size_t usize_min = s2u(size); - assert(oldsize == arena_mapbits_large_size_get(chunk, pageind)); + assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - + large_pad); /* Try to extend the run. */ - assert(size + extra > oldsize); + assert(usize_min > oldsize); malloc_mutex_lock(&arena->lock); - if (pageind + npages < chunk_npages && + if (pageind+npages < chunk_npages && arena_mapbits_allocated_get(chunk, pageind+npages) == 0 && (followsize = arena_mapbits_unallocated_size_get(chunk, - pageind+npages)) >= size - oldsize) { + pageind+npages)) >= usize_min - oldsize) { /* * The next run is available and sufficiently large. Split the * following run, then merge the first part with the existing * allocation. */ - size_t flag_dirty; - size_t splitsize = (oldsize + followsize <= size + extra) - ? followsize : size + extra - oldsize; - arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk + - ((pageind+npages) << LG_PAGE)), splitsize, zero); + arena_run_t *run; + size_t flag_dirty, flag_unzeroed_mask, splitsize, usize; + + usize = s2u(size + extra); + while (oldsize + followsize < usize) + usize = index2size(size2index(usize)-1); + assert(usize >= usize_min); + splitsize = usize - oldsize; + + run = &arena_miscelm_get(chunk, pageind+npages)->run; + if (arena_run_split_large(arena, run, splitsize, zero)) { + malloc_mutex_unlock(&arena->lock); + return (true); + } size = oldsize + splitsize; - npages = size >> LG_PAGE; + npages = (size + large_pad) >> LG_PAGE; /* * Mark the extended run as dirty if either portion of the run @@ -2051,21 +2691,29 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, */ flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | arena_mapbits_dirty_get(chunk, pageind+npages-1); - arena_mapbits_large_set(chunk, pageind, size, flag_dirty); - arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty); + flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0; + arena_mapbits_large_set(chunk, pageind, size + large_pad, + flag_dirty | (flag_unzeroed_mask & + arena_mapbits_unzeroed_get(chunk, pageind))); + arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+npages-1))); if (config_stats) { + index_t oldindex = size2index(oldsize) - NBINS; + index_t index = size2index(size) - NBINS; + arena->stats.ndalloc_large++; arena->stats.allocated_large -= oldsize; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; + arena->stats.lstats[oldindex].ndalloc++; + arena->stats.lstats[oldindex].curruns--; arena->stats.nmalloc_large++; arena->stats.nrequests_large++; arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; + arena->stats.lstats[index].nmalloc++; + arena->stats.lstats[index].nrequests++; + arena->stats.lstats[index].curruns++; } malloc_mutex_unlock(&arena->lock); return (false); @@ -2083,7 +2731,7 @@ static void arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) { - if (config_fill && opt_junk) { + if (config_fill && unlikely(opt_junk_free)) { memset((void *)((uintptr_t)ptr + usize), 0x5a, old_usize - usize); } @@ -2103,10 +2751,14 @@ static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero) { - size_t psize; + size_t usize; - psize = PAGE_CEILING(size + extra); - if (psize == oldsize) { + /* Make sure extra can't cause size_t overflow. */ + if (unlikely(extra >= arena_maxclass)) + return (true); + + usize = s2u(size + extra); + if (usize == oldsize) { /* Same size class. */ return (false); } else { @@ -2114,24 +2766,23 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, arena_t *arena; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = chunk->arena; + arena = extent_node_arena_get(&chunk->node); - if (psize < oldsize) { + if (usize < oldsize) { /* Fill before shrinking in order avoid a race. */ - arena_ralloc_junk_large(ptr, oldsize, psize); + arena_ralloc_junk_large(ptr, oldsize, usize); arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, - psize); + usize); return (false); } else { bool ret = arena_ralloc_large_grow(arena, chunk, ptr, - oldsize, PAGE_CEILING(size), - psize - PAGE_CEILING(size), zero); - if (config_fill && ret == false && zero == false) { - if (opt_junk) { + oldsize, size, extra, zero); + if (config_fill && !ret && !zero) { + if (unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)ptr + oldsize), 0xa5, isalloc(ptr, config_prof) - oldsize); - } else if (opt_zero) { + } else if (unlikely(opt_zero)) { memset((void *)((uintptr_t)ptr + oldsize), 0, isalloc(ptr, config_prof) - oldsize); @@ -2147,84 +2798,103 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero) { - /* - * Avoid moving the allocation if the size class can be left the same. - */ - if (oldsize <= arena_maxclass) { - if (oldsize <= SMALL_MAXCLASS) { - assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size - == oldsize); - if ((size + extra <= SMALL_MAXCLASS && - SMALL_SIZE2BIN(size + extra) == - SMALL_SIZE2BIN(oldsize)) || (size <= oldsize && - size + extra >= oldsize)) - return (false); - } else { - assert(size <= arena_maxclass); - if (size + extra > SMALL_MAXCLASS) { - if (arena_ralloc_large(ptr, oldsize, size, - extra, zero) == false) + if (likely(size <= arena_maxclass)) { + /* + * Avoid moving the allocation if the size class can be left the + * same. + */ + if (likely(oldsize <= arena_maxclass)) { + if (oldsize <= SMALL_MAXCLASS) { + assert( + arena_bin_info[size2index(oldsize)].reg_size + == oldsize); + if ((size + extra <= SMALL_MAXCLASS && + size2index(size + extra) == + size2index(oldsize)) || (size <= oldsize && + size + extra >= oldsize)) return (false); + } else { + assert(size <= arena_maxclass); + if (size + extra > SMALL_MAXCLASS) { + if (!arena_ralloc_large(ptr, oldsize, + size, extra, zero)) + return (false); + } } } - } - /* Reallocation would require a move. */ - return (true); + /* Reallocation would require a move. */ + return (true); + } else + return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero)); } void * -arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, - bool try_tcache_dalloc) +arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero, tcache_t *tcache) { void *ret; - size_t copysize; - /* Try to avoid moving the allocation. */ - if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false) - return (ptr); + if (likely(size <= arena_maxclass)) { + size_t copysize; - /* - * size and oldsize are different enough that we need to move the - * object. In that case, fall back to allocating new space and - * copying. - */ - if (alignment != 0) { - size_t usize = sa2u(size + extra, alignment); - if (usize == 0) - return (NULL); - ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); - } else - ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc); + /* Try to avoid moving the allocation. */ + if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero)) + return (ptr); - if (ret == NULL) { - if (extra == 0) - return (NULL); - /* Try again, this time without extra. */ + /* + * size and oldsize are different enough that we need to move + * the object. In that case, fall back to allocating new space + * and copying. + */ if (alignment != 0) { - size_t usize = sa2u(size, alignment); + size_t usize = sa2u(size + extra, alignment); if (usize == 0) return (NULL); - ret = ipalloct(usize, alignment, zero, try_tcache_alloc, + ret = ipalloct(tsd, usize, alignment, zero, tcache, arena); - } else - ret = arena_malloc(arena, size, zero, try_tcache_alloc); + } else { + ret = arena_malloc(tsd, arena, size + extra, zero, + tcache); + } - if (ret == NULL) - return (NULL); - } + if (ret == NULL) { + if (extra == 0) + return (NULL); + /* Try again, this time without extra. */ + if (alignment != 0) { + size_t usize = sa2u(size, alignment); + if (usize == 0) + return (NULL); + ret = ipalloct(tsd, usize, alignment, zero, + tcache, arena); + } else { + ret = arena_malloc(tsd, arena, size, zero, + tcache); + } - /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */ + if (ret == NULL) + return (NULL); + } - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. - */ - copysize = (size < oldsize) ? size : oldsize; - VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); - memcpy(ret, ptr, copysize); - iqalloct(ptr, try_tcache_dalloc); + /* + * Junk/zero-filling were already done by + * ipalloc()/arena_malloc(). + */ + + /* + * Copy at most size bytes (not size+extra), since the caller + * has no expectation that the extra bytes will be reliably + * preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); + memcpy(ret, ptr, copysize); + isqalloc(tsd, ptr, oldsize, tcache); + } else { + ret = huge_ralloc(tsd, arena, ptr, oldsize, size, extra, + alignment, zero, tcache); + } return (ret); } @@ -2239,24 +2909,46 @@ arena_dss_prec_get(arena_t *arena) return (ret); } -void +bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { + if (!have_dss) + return (dss_prec != dss_prec_disabled); malloc_mutex_lock(&arena->lock); arena->dss_prec = dss_prec; malloc_mutex_unlock(&arena->lock); + return (false); +} + +ssize_t +arena_lg_dirty_mult_default_get(void) +{ + + return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); +} + +bool +arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) +{ + + if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) + return (true); + atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); + return (false); } void -arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, - size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, - malloc_large_stats_t *lstats) +arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, + size_t *nactive, size_t *ndirty, arena_stats_t *astats, + malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, + malloc_huge_stats_t *hstats) { unsigned i; malloc_mutex_lock(&arena->lock); *dss = dss_prec_names[arena->dss_prec]; + *lg_dirty_mult = arena->lg_dirty_mult; *nactive += arena->nactive; *ndirty += arena->ndirty; @@ -2264,10 +2956,15 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, astats->npurge += arena->stats.npurge; astats->nmadvise += arena->stats.nmadvise; astats->purged += arena->stats.purged; + astats->metadata_mapped += arena->stats.metadata_mapped; + astats->metadata_allocated += arena_metadata_allocated_get(arena); astats->allocated_large += arena->stats.allocated_large; astats->nmalloc_large += arena->stats.nmalloc_large; astats->ndalloc_large += arena->stats.ndalloc_large; astats->nrequests_large += arena->stats.nrequests_large; + astats->allocated_huge += arena->stats.allocated_huge; + astats->nmalloc_huge += arena->stats.nmalloc_huge; + astats->ndalloc_huge += arena->stats.ndalloc_huge; for (i = 0; i < nlclasses; i++) { lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; @@ -2275,16 +2972,22 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, lstats[i].nrequests += arena->stats.lstats[i].nrequests; lstats[i].curruns += arena->stats.lstats[i].curruns; } + + for (i = 0; i < nhclasses; i++) { + hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; + hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; + hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; + } malloc_mutex_unlock(&arena->lock); for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; malloc_mutex_lock(&bin->lock); - bstats[i].allocated += bin->stats.allocated; bstats[i].nmalloc += bin->stats.nmalloc; bstats[i].ndalloc += bin->stats.ndalloc; bstats[i].nrequests += bin->stats.nrequests; + bstats[i].curregs += bin->stats.curregs; if (config_tcache) { bstats[i].nfills += bin->stats.nfills; bstats[i].nflushes += bin->stats.nflushes; @@ -2296,27 +2999,42 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, } } -bool -arena_new(arena_t *arena, unsigned ind) +arena_t * +arena_new(unsigned ind) { + arena_t *arena; unsigned i; arena_bin_t *bin; + /* + * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly + * because there is no way to clean up if base_alloc() OOMs. + */ + if (config_stats) { + arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t)) + + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + + nhclasses) * sizeof(malloc_huge_stats_t)); + } else + arena = (arena_t *)base_alloc(sizeof(arena_t)); + if (arena == NULL) + return (NULL); + arena->ind = ind; arena->nthreads = 0; - if (malloc_mutex_init(&arena->lock)) - return (true); + return (NULL); if (config_stats) { memset(&arena->stats, 0, sizeof(arena_stats_t)); - arena->stats.lstats = - (malloc_large_stats_t *)base_alloc(nlclasses * - sizeof(malloc_large_stats_t)); - if (arena->stats.lstats == NULL) - return (true); + arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena + + CACHELINE_CEILING(sizeof(arena_t))); memset(arena->stats.lstats, 0, nlclasses * sizeof(malloc_large_stats_t)); + arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena + + CACHELINE_CEILING(sizeof(arena_t)) + + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); + memset(arena->stats.hstats, 0, nhclasses * + sizeof(malloc_huge_stats_t)); if (config_tcache) ql_new(&arena->tcache_ql); } @@ -2324,56 +3042,76 @@ arena_new(arena_t *arena, unsigned ind) if (config_prof) arena->prof_accumbytes = 0; + if (config_cache_oblivious) { + /* + * A nondeterministic seed based on the address of arena reduces + * the likelihood of lockstep non-uniform cache index + * utilization among identical concurrent processes, but at the + * cost of test repeatability. For debug builds, instead use a + * deterministic seed. + */ + arena->offset_state = config_debug ? ind : + (uint64_t)(uintptr_t)arena; + } + arena->dss_prec = chunk_dss_prec_get(); - /* Initialize chunks. */ - arena_chunk_dirty_new(&arena->chunks_dirty); arena->spare = NULL; + arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); + arena->purging = false; arena->nactive = 0; arena->ndirty = 0; - arena->npurgatory = 0; arena_avail_tree_new(&arena->runs_avail); + qr_new(&arena->runs_dirty, rd_link); + qr_new(&arena->chunks_cache, cc_link); + + ql_new(&arena->huge); + if (malloc_mutex_init(&arena->huge_mtx)) + return (NULL); + + extent_tree_szad_new(&arena->chunks_szad_cached); + extent_tree_ad_new(&arena->chunks_ad_cached); + extent_tree_szad_new(&arena->chunks_szad_retained); + extent_tree_ad_new(&arena->chunks_ad_retained); + if (malloc_mutex_init(&arena->chunks_mtx)) + return (NULL); + ql_new(&arena->node_cache); + if (malloc_mutex_init(&arena->node_cache_mtx)) + return (NULL); + + arena->chunk_hooks = chunk_hooks_default; /* Initialize bins. */ for (i = 0; i < NBINS; i++) { bin = &arena->bins[i]; if (malloc_mutex_init(&bin->lock)) - return (true); + return (NULL); bin->runcur = NULL; arena_run_tree_new(&bin->runs); if (config_stats) memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); } - return (false); + return (arena); } /* * Calculate bin_info->run_size such that it meets the following constraints: * - * *) bin_info->run_size >= min_run_size - * *) bin_info->run_size <= arena_maxclass - * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed). + * *) bin_info->run_size <= arena_maxrun * *) bin_info->nregs <= RUN_MAXREGS * - * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also - * calculated here, since these settings are all interdependent. + * bin_info->nregs and bin_info->reg0_offset are also calculated here, since + * these settings are all interdependent. */ -static size_t -bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) +static void +bin_info_run_size_calc(arena_bin_info_t *bin_info) { size_t pad_size; - size_t try_run_size, good_run_size; - uint32_t try_nregs, good_nregs; - uint32_t try_hdr_size, good_hdr_size; - uint32_t try_bitmap_offset, good_bitmap_offset; - uint32_t try_ctx0_offset, good_ctx0_offset; - uint32_t try_redzone0_offset, good_redzone0_offset; - - assert(min_run_size >= PAGE); - assert(min_run_size <= arena_maxclass); + size_t try_run_size, perfect_run_size, actual_run_size; + uint32_t try_nregs, perfect_nregs, actual_nregs; /* * Determine redzone size based on minimum alignment and minimum @@ -2382,8 +3120,9 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) * minimum alignment; without the padding, each redzone would have to * be twice as large in order to maintain alignment. */ - if (config_fill && opt_redzone) { - size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1); + if (config_fill && unlikely(opt_redzone)) { + size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - + 1); if (align_min <= REDZONE_MINSIZE) { bin_info->redzone_size = REDZONE_MINSIZE; pad_size = 0; @@ -2399,127 +3138,114 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) (bin_info->redzone_size << 1); /* - * Calculate known-valid settings before entering the run_size - * expansion loop, so that the first part of the loop always copies - * valid settings. - * - * The do..while loop iteratively reduces the number of regions until - * the run header and the regions no longer overlap. A closed formula - * would be quite messy, since there is an interdependency between the - * header's mask length and the number of regions. + * Compute run size under ideal conditions (no redzones, no limit on run + * size). */ - try_run_size = min_run_size; - try_nregs = ((try_run_size - sizeof(arena_run_t)) / - bin_info->reg_interval) - + 1; /* Counter-act try_nregs-- in loop. */ - if (try_nregs > RUN_MAXREGS) { - try_nregs = RUN_MAXREGS - + 1; /* Counter-act try_nregs-- in loop. */ - } - do { - try_nregs--; - try_hdr_size = sizeof(arena_run_t); - /* Pad to a long boundary. */ - try_hdr_size = LONG_CEILING(try_hdr_size); - try_bitmap_offset = try_hdr_size; - /* Add space for bitmap. */ - try_hdr_size += bitmap_size(try_nregs); - if (config_prof && opt_prof && prof_promote == false) { - /* Pad to a quantum boundary. */ - try_hdr_size = QUANTUM_CEILING(try_hdr_size); - try_ctx0_offset = try_hdr_size; - /* Add space for one (prof_ctx_t *) per region. */ - try_hdr_size += try_nregs * sizeof(prof_ctx_t *); - } else - try_ctx0_offset = 0; - try_redzone0_offset = try_run_size - (try_nregs * - bin_info->reg_interval) - pad_size; - } while (try_hdr_size > try_redzone0_offset); - - /* run_size expansion loop. */ + try_run_size = PAGE; + try_nregs = try_run_size / bin_info->reg_size; do { - /* - * Copy valid settings before trying more aggressive settings. - */ - good_run_size = try_run_size; - good_nregs = try_nregs; - good_hdr_size = try_hdr_size; - good_bitmap_offset = try_bitmap_offset; - good_ctx0_offset = try_ctx0_offset; - good_redzone0_offset = try_redzone0_offset; - - /* Try more aggressive settings. */ + perfect_run_size = try_run_size; + perfect_nregs = try_nregs; + try_run_size += PAGE; - try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) / - bin_info->reg_interval) - + 1; /* Counter-act try_nregs-- in loop. */ - if (try_nregs > RUN_MAXREGS) { - try_nregs = RUN_MAXREGS - + 1; /* Counter-act try_nregs-- in loop. */ - } - do { - try_nregs--; - try_hdr_size = sizeof(arena_run_t); - /* Pad to a long boundary. */ - try_hdr_size = LONG_CEILING(try_hdr_size); - try_bitmap_offset = try_hdr_size; - /* Add space for bitmap. */ - try_hdr_size += bitmap_size(try_nregs); - if (config_prof && opt_prof && prof_promote == false) { - /* Pad to a quantum boundary. */ - try_hdr_size = QUANTUM_CEILING(try_hdr_size); - try_ctx0_offset = try_hdr_size; - /* - * Add space for one (prof_ctx_t *) per region. - */ - try_hdr_size += try_nregs * - sizeof(prof_ctx_t *); - } - try_redzone0_offset = try_run_size - (try_nregs * - bin_info->reg_interval) - pad_size; - } while (try_hdr_size > try_redzone0_offset); - } while (try_run_size <= arena_maxclass - && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) > - RUN_MAX_OVRHD_RELAX - && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size - && try_nregs < RUN_MAXREGS); + try_nregs = try_run_size / bin_info->reg_size; + } while (perfect_run_size != perfect_nregs * bin_info->reg_size); + assert(perfect_nregs <= RUN_MAXREGS); + + actual_run_size = perfect_run_size; + actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval; + + /* + * Redzones can require enough padding that not even a single region can + * fit within the number of pages that would normally be dedicated to a + * run for this size class. Increase the run size until at least one + * region fits. + */ + while (actual_nregs == 0) { + assert(config_fill && unlikely(opt_redzone)); - assert(good_hdr_size <= good_redzone0_offset); + actual_run_size += PAGE; + actual_nregs = (actual_run_size - pad_size) / + bin_info->reg_interval; + } + + /* + * Make sure that the run will fit within an arena chunk. + */ + while (actual_run_size > arena_maxrun) { + actual_run_size -= PAGE; + actual_nregs = (actual_run_size - pad_size) / + bin_info->reg_interval; + } + assert(actual_nregs > 0); + assert(actual_run_size == s2u(actual_run_size)); /* Copy final settings. */ - bin_info->run_size = good_run_size; - bin_info->nregs = good_nregs; - bin_info->bitmap_offset = good_bitmap_offset; - bin_info->ctx0_offset = good_ctx0_offset; - bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size; + bin_info->run_size = actual_run_size; + bin_info->nregs = actual_nregs; + bin_info->reg0_offset = actual_run_size - (actual_nregs * + bin_info->reg_interval) - pad_size + bin_info->redzone_size; + + if (actual_run_size > small_maxrun) + small_maxrun = actual_run_size; assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs * bin_info->reg_interval) + pad_size == bin_info->run_size); - - return (good_run_size); } static void bin_info_init(void) { arena_bin_info_t *bin_info; - size_t prev_run_size = PAGE; -#define SIZE_CLASS(bin, delta, size) \ - bin_info = &arena_bin_info[bin]; \ +#define BIN_INFO_INIT_bin_yes(index, size) \ + bin_info = &arena_bin_info[index]; \ bin_info->reg_size = size; \ - prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\ + bin_info_run_size_calc(bin_info); \ bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); +#define BIN_INFO_INIT_bin_no(index, size) +#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ + BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) SIZE_CLASSES -#undef SIZE_CLASS +#undef BIN_INFO_INIT_bin_yes +#undef BIN_INFO_INIT_bin_no +#undef SC } -void +static bool +small_run_size_init(void) +{ + + assert(small_maxrun != 0); + + small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >> + LG_PAGE)); + if (small_run_tab == NULL) + return (true); + +#define TAB_INIT_bin_yes(index, size) { \ + arena_bin_info_t *bin_info = &arena_bin_info[index]; \ + small_run_tab[bin_info->run_size >> LG_PAGE] = true; \ + } +#define TAB_INIT_bin_no(index, size) +#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ + TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) + SIZE_CLASSES +#undef TAB_INIT_bin_yes +#undef TAB_INIT_bin_no +#undef SC + + return (false); +} + +bool arena_boot(void) { size_t header_size; unsigned i; + arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); + /* * Compute the header size such that it is large enough to contain the * page map. The page map is biased to omit entries for the header @@ -2534,16 +3260,33 @@ arena_boot(void) */ map_bias = 0; for (i = 0; i < 3; i++) { - header_size = offsetof(arena_chunk_t, map) + - (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias)); - map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK) - != 0); + header_size = offsetof(arena_chunk_t, map_bits) + + ((sizeof(arena_chunk_map_bits_t) + + sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); + map_bias = (header_size + PAGE_MASK) >> LG_PAGE; } assert(map_bias > 0); - arena_maxclass = chunksize - (map_bias << LG_PAGE); + map_misc_offset = offsetof(arena_chunk_t, map_bits) + + sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); + + arena_maxrun = chunksize - (map_bias << LG_PAGE); + assert(arena_maxrun > 0); + arena_maxclass = index2size(size2index(chunksize)-1); + if (arena_maxclass > arena_maxrun) { + /* + * For small chunk sizes it's possible for there to be fewer + * non-header pages available than are necessary to serve the + * size classes just below chunksize. + */ + arena_maxclass = arena_maxrun; + } + assert(arena_maxclass > 0); + nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS); + nhclasses = NSIZES - nlclasses - NBINS; bin_info_init(); + return (small_run_size_init()); } void @@ -2552,6 +3295,9 @@ arena_prefork(arena_t *arena) unsigned i; malloc_mutex_prefork(&arena->lock); + malloc_mutex_prefork(&arena->huge_mtx); + malloc_mutex_prefork(&arena->chunks_mtx); + malloc_mutex_prefork(&arena->node_cache_mtx); for (i = 0; i < NBINS; i++) malloc_mutex_prefork(&arena->bins[i].lock); } @@ -2563,6 +3309,9 @@ arena_postfork_parent(arena_t *arena) for (i = 0; i < NBINS; i++) malloc_mutex_postfork_parent(&arena->bins[i].lock); + malloc_mutex_postfork_parent(&arena->node_cache_mtx); + malloc_mutex_postfork_parent(&arena->chunks_mtx); + malloc_mutex_postfork_parent(&arena->huge_mtx); malloc_mutex_postfork_parent(&arena->lock); } @@ -2573,5 +3322,8 @@ arena_postfork_child(arena_t *arena) for (i = 0; i < NBINS; i++) malloc_mutex_postfork_child(&arena->bins[i].lock); + malloc_mutex_postfork_child(&arena->node_cache_mtx); + malloc_mutex_postfork_child(&arena->chunks_mtx); + malloc_mutex_postfork_child(&arena->huge_mtx); malloc_mutex_postfork_child(&arena->lock); } |