diff options
author | jasone <jasone@FreeBSD.org> | 2015-10-24 23:18:05 +0000 |
---|---|---|
committer | jasone <jasone@FreeBSD.org> | 2015-10-24 23:18:05 +0000 |
commit | edc319ed79837a53b421d478b95557670e810469 (patch) | |
tree | d226a6911ce385e8bb7af573d89a5267efbd78b3 /contrib/jemalloc/src | |
parent | 6ca81b14be12ea716987d633de1ca7d563af72f6 (diff) | |
download | FreeBSD-src-edc319ed79837a53b421d478b95557670e810469.zip FreeBSD-src-edc319ed79837a53b421d478b95557670e810469.tar.gz |
Update jemalloc to version 4.0.4.
Diffstat (limited to 'contrib/jemalloc/src')
-rw-r--r-- | contrib/jemalloc/src/arena.c | 16 | ||||
-rw-r--r-- | contrib/jemalloc/src/huge.c | 30 | ||||
-rw-r--r-- | contrib/jemalloc/src/prof.c | 22 | ||||
-rw-r--r-- | contrib/jemalloc/src/tsd.c | 3 |
4 files changed, 52 insertions, 19 deletions
diff --git a/contrib/jemalloc/src/arena.c b/contrib/jemalloc/src/arena.c index 7f4a6ca..43733cc 100644 --- a/contrib/jemalloc/src/arena.c +++ b/contrib/jemalloc/src/arena.c @@ -2679,6 +2679,22 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, if (arena_run_split_large(arena, run, splitsize, zero)) goto label_fail; + if (config_cache_oblivious && zero) { + /* + * Zero the trailing bytes of the original allocation's + * last page, since they are in an indeterminate state. + * There will always be trailing bytes, because ptr's + * offset from the beginning of the run is a multiple of + * CACHELINE in [0 .. PAGE). + */ + void *zbase = (void *)((uintptr_t)ptr + oldsize); + void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + + PAGE)); + size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; + assert(nzero > 0); + memset(zbase, 0, nzero); + } + size = oldsize + splitsize; npages = (size + large_pad) >> LG_PAGE; diff --git a/contrib/jemalloc/src/huge.c b/contrib/jemalloc/src/huge.c index f8778db..1e9a665 100644 --- a/contrib/jemalloc/src/huge.c +++ b/contrib/jemalloc/src/huge.c @@ -133,7 +133,7 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min, extent_node_t *node; arena_t *arena; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - bool zeroed; + bool pre_zeroed, post_zeroed; /* Increase usize to incorporate extra. */ for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1)) @@ -145,26 +145,27 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min, node = huge_node_get(ptr); arena = extent_node_arena_get(node); + pre_zeroed = extent_node_zeroed_get(node); /* Fill if necessary (shrinking). */ if (oldsize > usize) { size_t sdiff = oldsize - usize; if (config_fill && unlikely(opt_junk_free)) { memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff); - zeroed = false; + post_zeroed = false; } else { - zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr, - CHUNK_CEILING(oldsize), usize, sdiff); + post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, + ptr, CHUNK_CEILING(oldsize), usize, sdiff); } } else - zeroed = true; + post_zeroed = pre_zeroed; malloc_mutex_lock(&arena->huge_mtx); /* Update the size of the huge allocation. */ assert(extent_node_size_get(node) != usize); extent_node_size_set(node, usize); - /* Clear node's zeroed field if zeroing failed above. */ - extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed); + /* Update zeroed. */ + extent_node_zeroed_set(node, post_zeroed); malloc_mutex_unlock(&arena->huge_mtx); arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize); @@ -172,7 +173,7 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min, /* Fill if necessary (growing). */ if (oldsize < usize) { if (zero || (config_fill && unlikely(opt_zero))) { - if (!zeroed) { + if (!pre_zeroed) { memset((void *)((uintptr_t)ptr + oldsize), 0, usize - oldsize); } @@ -190,10 +191,11 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) arena_t *arena; chunk_hooks_t chunk_hooks; size_t cdiff; - bool zeroed; + bool pre_zeroed, post_zeroed; node = huge_node_get(ptr); arena = extent_node_arena_get(node); + pre_zeroed = extent_node_zeroed_get(node); chunk_hooks = chunk_hooks_get(arena); assert(oldsize > usize); @@ -209,21 +211,21 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) if (config_fill && unlikely(opt_junk_free)) { huge_dalloc_junk((void *)((uintptr_t)ptr + usize), sdiff); - zeroed = false; + post_zeroed = false; } else { - zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, + post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr + usize), CHUNK_CEILING(oldsize), CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); } } else - zeroed = true; + post_zeroed = pre_zeroed; malloc_mutex_lock(&arena->huge_mtx); /* Update the size of the huge allocation. */ extent_node_size_set(node, usize); - /* Clear node's zeroed field if zeroing failed above. */ - extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed); + /* Update zeroed. */ + extent_node_zeroed_set(node, post_zeroed); malloc_mutex_unlock(&arena->huge_mtx); /* Zap the excess chunks. */ diff --git a/contrib/jemalloc/src/prof.c b/contrib/jemalloc/src/prof.c index 0a08062..5d2b959 100644 --- a/contrib/jemalloc/src/prof.c +++ b/contrib/jemalloc/src/prof.c @@ -1102,11 +1102,23 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { bool propagate_err = *(bool *)arg; - if (prof_dump_printf(propagate_err, - " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", - tctx->thr_uid, tctx->dump_cnts.curobjs, tctx->dump_cnts.curbytes, - tctx->dump_cnts.accumobjs, tctx->dump_cnts.accumbytes)) - return (tctx); + switch (tctx->state) { + case prof_tctx_state_initializing: + case prof_tctx_state_nominal: + /* Not captured by this dump. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + if (prof_dump_printf(propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " + "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, + tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, + tctx->dump_cnts.accumbytes)) + return (tctx); + break; + default: + not_reached(); + } return (NULL); } diff --git a/contrib/jemalloc/src/tsd.c b/contrib/jemalloc/src/tsd.c index 2100833..9ffe9af 100644 --- a/contrib/jemalloc/src/tsd.c +++ b/contrib/jemalloc/src/tsd.c @@ -73,6 +73,9 @@ tsd_cleanup(void *arg) tsd_t *tsd = (tsd_t *)arg; switch (tsd->state) { + case tsd_state_uninitialized: + /* Do nothing. */ + break; case tsd_state_nominal: #define O(n, t) \ n##_cleanup(tsd); |