From 18ba9c64014ff7a1a09007df5c74425023227925 Mon Sep 17 00:00:00 2001 From: rwatson Date: Sat, 16 Jul 2005 09:40:34 +0000 Subject: Move the unlocking of the zone mutex in sysctl_vm_zone_stats() so that it covers the following of the uc_alloc/freebucket cache pointers. Originally, I felt that the race wasn't helped by holding the mutex, hence a comment in the code and not holding it across the cache access. However, it does improve consistency, as while it doesn't prevent bucket exchange, it does prevent bucket pointer invalidation. So a race in gathering cache free space statistics still can occur, but not one that follows an invalid bucket pointer, if the mutex is held. Submitted by: yongari MFC after: 1 week --- sys/vm/uma_core.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index c38c030..6491295 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -2986,17 +2986,19 @@ restart: uth.uth_allocs = z->uz_allocs; uth.uth_frees = z->uz_frees; uth.uth_fails = z->uz_fails; - ZONE_UNLOCK(z); if (sbuf_bcat(&sbuf, &uth, sizeof(uth)) < 0) { + ZONE_UNLOCK(z); mtx_unlock(&uma_mtx); error = ENOMEM; goto out; } /* - * XXXRW: Should not access bucket fields from - * non-local CPU. Instead need to modify the caches - * to directly maintain these statistics so we don't - * have to. + * While it is not normally safe to access the cache + * bucket pointers while not on the CPU that owns the + * cache, we only allow the pointers to be exchanged + * without the zone lock held, not invalidated, so + * accept the possible race associated with bucket + * exchange during monitoring. */ for (i = 0; i < MAXCPU; i++) { bzero(&ups, sizeof(ups)); @@ -3013,11 +3015,13 @@ restart: ups.ups_frees = cache->uc_frees; skip: if (sbuf_bcat(&sbuf, &ups, sizeof(ups)) < 0) { + ZONE_UNLOCK(z); mtx_unlock(&uma_mtx); error = ENOMEM; goto out; } } + ZONE_UNLOCK(z); } } mtx_unlock(&uma_mtx); -- cgit v1.1