summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2003-07-30 05:59:17 +0000
committerjeff <jeff@FreeBSD.org>2003-07-30 05:59:17 +0000
commit8512070a52bb32100e8c97967590e21122064fa5 (patch)
tree14bf87a82f8dea97f3cc358d0f483c8a7b310eec
parent50d6e1a8229852e208beb80ffd69bec00453dbea (diff)
downloadFreeBSD-src-8512070a52bb32100e8c97967590e21122064fa5.zip
FreeBSD-src-8512070a52bb32100e8c97967590e21122064fa5.tar.gz
- Get rid of the ill-conceived uz_cachefree member of uma_zone.
- In sysctl_vm_zone use the per cpu locks to read the current cache statistics this makes them more accurate while under heavy load. Submitted by: tegge
-rw-r--r--sys/vm/uma_core.c39
-rw-r--r--sys/vm/uma_int.h1
2 files changed, 24 insertions, 16 deletions
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index eb92b85..973af48 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -236,11 +236,9 @@ zone_timeout(uma_zone_t zone)
{
uma_cache_t cache;
u_int64_t alloc;
- int free;
int cpu;
alloc = 0;
- free = 0;
/*
* Aggregate per cpu cache statistics back to the zone.
@@ -259,10 +257,6 @@ zone_timeout(uma_zone_t zone)
/* Add them up, and reset */
alloc += cache->uc_allocs;
cache->uc_allocs = 0;
- if (cache->uc_allocbucket)
- free += cache->uc_allocbucket->ub_ptr + 1;
- if (cache->uc_freebucket)
- free += cache->uc_freebucket->ub_ptr + 1;
CPU_UNLOCK(cpu);
}
}
@@ -272,12 +266,6 @@ zone_timeout(uma_zone_t zone)
zone->uz_allocs += alloc;
/*
- * cachefree is an instantanious snapshot of what is in the per cpu
- * caches, not an accurate counter
- */
- zone->uz_cachefree = free;
-
- /*
* Expand the zone hash table.
*
* This is done if the number of slabs is larger than the hash size.
@@ -550,8 +538,6 @@ cache_drain(uma_zone_t zone)
continue;
CPU_UNLOCK(cpu);
}
-
- zone->uz_cachefree = 0;
}
/*
@@ -2093,6 +2079,10 @@ sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
char *tmpbuf, *offset;
uma_zone_t z;
char *p;
+ int cpu;
+ int cachefree;
+ uma_bucket_t bucket;
+ uma_cache_t cache;
cnt = 0;
mtx_lock(&uma_mtx);
@@ -2113,8 +2103,27 @@ sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
LIST_FOREACH(z, &uma_zones, uz_link) {
if (cnt == 0) /* list may have changed size */
break;
+ for (cpu = 0; cpu < maxcpu; cpu++) {
+ if (CPU_ABSENT(cpu))
+ continue;
+ CPU_LOCK(cpu);
+ }
ZONE_LOCK(z);
- totalfree = z->uz_free + z->uz_cachefree;
+ cachefree = 0;
+ for (cpu = 0; cpu < maxcpu; cpu++) {
+ if (CPU_ABSENT(cpu))
+ continue;
+ cache = &z->uz_cpu[cpu];
+ if (cache->uc_allocbucket != NULL)
+ cachefree += cache->uc_allocbucket->ub_ptr + 1;
+ if (cache->uc_freebucket != NULL)
+ cachefree += cache->uc_freebucket->ub_ptr + 1;
+ CPU_UNLOCK(cpu);
+ }
+ LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) {
+ cachefree += bucket->ub_ptr + 1;
+ }
+ totalfree = z->uz_free + cachefree;
len = snprintf(offset, linesize,
"%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
z->uz_name, z->uz_size,
diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h
index 78493c7..6066440 100644
--- a/sys/vm/uma_int.h
+++ b/sys/vm/uma_int.h
@@ -240,7 +240,6 @@ struct uma_zone {
struct vm_object *uz_obj; /* Zone specific object */
vm_offset_t uz_kva; /* Base kva for zones with objs */
u_int32_t uz_maxpages; /* Maximum number of pages to alloc */
- u_int32_t uz_cachefree; /* Last count of items free in caches */
u_int64_t uz_oallocs; /* old allocs count */
u_int64_t uz_wssize; /* Working set size */
int uz_recurse; /* Allocation recursion count */
OpenPOWER on IntegriCloud