diff options
author | jasone <jasone@FreeBSD.org> | 2008-02-08 08:02:34 +0000 |
---|---|---|
committer | jasone <jasone@FreeBSD.org> | 2008-02-08 08:02:34 +0000 |
commit | f6ce9fe6014b706e334f76614a12ffd1cb901eaa (patch) | |
tree | cab241ad5db25b09f06bf77cf47b731032ef5b2d /lib/libc/stdlib | |
parent | 4fa28da3c9c38d1bd9b235fa85b264caf475fe4c (diff) | |
download | FreeBSD-src-f6ce9fe6014b706e334f76614a12ffd1cb901eaa.zip FreeBSD-src-f6ce9fe6014b706e334f76614a12ffd1cb901eaa.tar.gz |
Fix a bug in lazy deallocation that was introduced when
arena_dalloc_lazy_hard() was split out of arena_dalloc_lazy() in revision
1.162.
Reduce thundering herd problems in lazy deallocation by randomly varying
how many probes a thread does before taking the slow path.
Diffstat (limited to 'lib/libc/stdlib')
-rw-r--r-- | lib/libc/stdlib/malloc.c | 17 |
1 files changed, 10 insertions, 7 deletions
diff --git a/lib/libc/stdlib/malloc.c b/lib/libc/stdlib/malloc.c index 0bfa17b..6039998 100644 --- a/lib/libc/stdlib/malloc.c +++ b/lib/libc/stdlib/malloc.c @@ -315,7 +315,8 @@ __FBSDID("$FreeBSD$"); * trials (each deallocation is a trial), so the actual average threshold * for clearing the cache is somewhat lower. */ -# define LAZY_FREE_NPROBES 5 +# define LAZY_FREE_NPROBES_2POW_MIN 2 +# define LAZY_FREE_NPROBES_2POW_MAX 3 #endif /* @@ -931,7 +932,7 @@ static void *arena_palloc(arena_t *arena, size_t alignment, size_t size, static size_t arena_salloc(const void *ptr); #ifdef MALLOC_LAZY_FREE static void arena_dalloc_lazy_hard(arena_t *arena, arena_chunk_t *chunk, - void *ptr, size_t pageind, arena_chunk_map_t *mapelm); + void *ptr, size_t pageind, arena_chunk_map_t *mapelm, unsigned slot); #endif static void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); @@ -3348,7 +3349,7 @@ arena_dalloc_lazy(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind, arena_chunk_map_t *mapelm) { void **free_cache = arena->free_cache; - unsigned i, slot; + unsigned i, nprobes, slot; if (__isthreaded == false || opt_lazy_free_2pow < 0) { malloc_spin_lock(&arena->lock); @@ -3357,7 +3358,9 @@ arena_dalloc_lazy(arena_t *arena, arena_chunk_t *chunk, void *ptr, return; } - for (i = 0; i < LAZY_FREE_NPROBES; i++) { + nprobes = (1U << LAZY_FREE_NPROBES_2POW_MIN) + PRN(lazy_free, + (LAZY_FREE_NPROBES_2POW_MAX - LAZY_FREE_NPROBES_2POW_MIN)); + for (i = 0; i < nprobes; i++) { slot = PRN(lazy_free, opt_lazy_free_2pow); if (atomic_cmpset_ptr((uintptr_t *)&free_cache[slot], (uintptr_t)NULL, (uintptr_t)ptr)) { @@ -3365,15 +3368,15 @@ arena_dalloc_lazy(arena_t *arena, arena_chunk_t *chunk, void *ptr, } } - arena_dalloc_lazy_hard(arena, chunk, ptr, pageind, mapelm); + arena_dalloc_lazy_hard(arena, chunk, ptr, pageind, mapelm, slot); } static void arena_dalloc_lazy_hard(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind, arena_chunk_map_t *mapelm) + size_t pageind, arena_chunk_map_t *mapelm, unsigned slot) { void **free_cache = arena->free_cache; - unsigned i, slot; + unsigned i; malloc_spin_lock(&arena->lock); arena_dalloc_small(arena, chunk, ptr, pageind, *mapelm); |