diff options
author | mav <mav@FreeBSD.org> | 2013-11-19 10:51:46 +0000 |
---|---|---|
committer | mav <mav@FreeBSD.org> | 2013-11-19 10:51:46 +0000 |
commit | ff33031e0d56cfc04c1d9b1404b0984d9432b718 (patch) | |
tree | 83779a74d9879380f8fca40f5771563d53483fab /sys/vm | |
parent | 073851700eea204c35444e48223db6c06db43f25 (diff) | |
download | FreeBSD-src-ff33031e0d56cfc04c1d9b1404b0984d9432b718.zip FreeBSD-src-ff33031e0d56cfc04c1d9b1404b0984d9432b718.tar.gz |
Implement mechanism to safely but slowly purge UMA per-CPU caches.
This is a last resort for very low memory condition in case other measures
to free memory were ineffective. Sequentially cycle through all CPUs and
extract per-CPU cache buckets into zone cache from where they can be freed.
Diffstat (limited to 'sys/vm')
-rw-r--r-- | sys/vm/uma_core.c | 77 |
1 files changed, 77 insertions, 0 deletions
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index 20a1bf6..2b32a3e 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -75,6 +75,7 @@ __FBSDID("$FreeBSD$"); #include <sys/proc.h> #include <sys/rwlock.h> #include <sys/sbuf.h> +#include <sys/sched.h> #include <sys/smp.h> #include <sys/vmmeter.h> @@ -684,6 +685,78 @@ cache_drain(uma_zone_t zone) ZONE_UNLOCK(zone); } +static void +cache_shrink(uma_zone_t zone) +{ + + if (zone->uz_flags & UMA_ZFLAG_INTERNAL) + return; + + ZONE_LOCK(zone); + zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2; + ZONE_UNLOCK(zone); +} + +static void +cache_drain_safe_cpu(uma_zone_t zone) +{ + uma_cache_t cache; + + if (zone->uz_flags & UMA_ZFLAG_INTERNAL) + return; + + ZONE_LOCK(zone); + critical_enter(); + cache = &zone->uz_cpu[curcpu]; + if (cache->uc_allocbucket) { + LIST_INSERT_HEAD(&zone->uz_buckets, cache->uc_allocbucket, + ub_link); + cache->uc_allocbucket = NULL; + } + if (cache->uc_freebucket) { + LIST_INSERT_HEAD(&zone->uz_buckets, cache->uc_freebucket, + ub_link); + cache->uc_freebucket = NULL; + } + critical_exit(); + ZONE_UNLOCK(zone); +} + +/* + * Safely drain per-CPU caches of a zone(s) to alloc bucket. + * This is an expensive call because it needs to bind to all CPUs + * one by one and enter a critical section on each of them in order + * to safely access their cache buckets. + * Zone lock must not be held on call this function. + */ +static void +cache_drain_safe(uma_zone_t zone) +{ + int cpu; + + /* + * Polite bucket sizes shrinking was not enouth, shrink aggressively. + */ + if (zone) + cache_shrink(zone); + else + zone_foreach(cache_shrink); + + CPU_FOREACH(cpu) { + thread_lock(curthread); + sched_bind(curthread, cpu); + thread_unlock(curthread); + + if (zone) + cache_drain_safe_cpu(zone); + else + zone_foreach(cache_drain_safe_cpu); + } + thread_lock(curthread); + sched_unbind(curthread); + thread_unlock(curthread); +} + /* * Drain the cached buckets from a zone. Expects a locked zone on entry. */ @@ -3068,6 +3141,10 @@ uma_reclaim(void) #endif bucket_enable(); zone_foreach(zone_drain); + if (vm_page_count_min()) { + cache_drain_safe(NULL); + zone_foreach(zone_drain); + } /* * Some slabs may have been freed but this zone will be visited early * we visit again so that we can free pages that are empty once other |