summaryrefslogtreecommitdiffstats
path: root/sys/amd64/amd64/uma_machdep.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2007-09-15 18:47:02 +0000
committeralc <alc@FreeBSD.org>2007-09-15 18:47:02 +0000
commit20b10da7063cd4f24e83139ffcb5c6c7eec6f5ed (patch)
tree3e80f44469adf9c118d276d6774b85e72d19ce96 /sys/amd64/amd64/uma_machdep.c
parent0188378655831f69b6dfe72c699ed262ae4d42f3 (diff)
downloadFreeBSD-src-20b10da7063cd4f24e83139ffcb5c6c7eec6f5ed.zip
FreeBSD-src-20b10da7063cd4f24e83139ffcb5c6c7eec6f5ed.tar.gz
It has been observed on the mailing lists that the different categories
of pages don't sum to anywhere near the total number of pages on amd64. This is for the most part because uma_small_alloc() pages have never been counted as wired pages, like their kmem_malloc() brethren. They should be. This changes fixes that. It is no longer necessary for the page queues lock to be held to free pages allocated by uma_small_alloc(). I removed the acquisition and release of the page queues lock from uma_small_free() on amd64 and ia64 weeks ago. This patch updates the other architectures that have uma_small_alloc() and uma_small_free(). Approved by: re (kensmith)
Diffstat (limited to 'sys/amd64/amd64/uma_machdep.c')
-rw-r--r--sys/amd64/amd64/uma_machdep.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/sys/amd64/amd64/uma_machdep.c b/sys/amd64/amd64/uma_machdep.c
index 4a35c67..1329668 100644
--- a/sys/amd64/amd64/uma_machdep.c
+++ b/sys/amd64/amd64/uma_machdep.c
@@ -50,9 +50,9 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
*flags = UMA_SLAB_PRIV;
if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
- pflags = VM_ALLOC_INTERRUPT;
+ pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
else
- pflags = VM_ALLOC_SYSTEM;
+ pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
if (wait & M_ZERO)
pflags |= VM_ALLOC_ZERO;
for (;;) {
@@ -82,5 +82,7 @@ uma_small_free(void *mem, int size, u_int8_t flags)
pa = DMAP_TO_PHYS((vm_offset_t)mem);
dump_drop_page(pa);
m = PHYS_TO_VM_PAGE(pa);
+ m->wire_count--;
vm_page_free(m);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
}
OpenPOWER on IntegriCloud