summaryrefslogtreecommitdiffstats
path: root/sys/ia64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2007-09-15 18:47:02 +0000
committeralc <alc@FreeBSD.org>2007-09-15 18:47:02 +0000
commit20b10da7063cd4f24e83139ffcb5c6c7eec6f5ed (patch)
tree3e80f44469adf9c118d276d6774b85e72d19ce96 /sys/ia64
parent0188378655831f69b6dfe72c699ed262ae4d42f3 (diff)
downloadFreeBSD-src-20b10da7063cd4f24e83139ffcb5c6c7eec6f5ed.zip
FreeBSD-src-20b10da7063cd4f24e83139ffcb5c6c7eec6f5ed.tar.gz
It has been observed on the mailing lists that the different categories
of pages don't sum to anywhere near the total number of pages on amd64. This is for the most part because uma_small_alloc() pages have never been counted as wired pages, like their kmem_malloc() brethren. They should be. This changes fixes that. It is no longer necessary for the page queues lock to be held to free pages allocated by uma_small_alloc(). I removed the acquisition and release of the page queues lock from uma_small_free() on amd64 and ia64 weeks ago. This patch updates the other architectures that have uma_small_alloc() and uma_small_free(). Approved by: re (kensmith)
Diffstat (limited to 'sys/ia64')
-rw-r--r--sys/ia64/ia64/uma_machdep.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/sys/ia64/ia64/uma_machdep.c b/sys/ia64/ia64/uma_machdep.c
index 873d1bb..c8084ba 100644
--- a/sys/ia64/ia64/uma_machdep.c
+++ b/sys/ia64/ia64/uma_machdep.c
@@ -48,9 +48,9 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
*flags = UMA_SLAB_PRIV;
if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
- pflags = VM_ALLOC_INTERRUPT;
+ pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
else
- pflags = VM_ALLOC_SYSTEM;
+ pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
if (wait & M_ZERO)
pflags |= VM_ALLOC_ZERO;
@@ -76,5 +76,7 @@ uma_small_free(void *mem, int size, u_int8_t flags)
vm_page_t m;
m = PHYS_TO_VM_PAGE(IA64_RR_MASK((u_int64_t)mem));
+ m->wire_count--;
vm_page_free(m);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
}
OpenPOWER on IntegriCloud