diff options
author | alc <alc@FreeBSD.org> | 2007-09-15 18:47:02 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2007-09-15 18:47:02 +0000 |
commit | 20b10da7063cd4f24e83139ffcb5c6c7eec6f5ed (patch) | |
tree | 3e80f44469adf9c118d276d6774b85e72d19ce96 /sys | |
parent | 0188378655831f69b6dfe72c699ed262ae4d42f3 (diff) | |
download | FreeBSD-src-20b10da7063cd4f24e83139ffcb5c6c7eec6f5ed.zip FreeBSD-src-20b10da7063cd4f24e83139ffcb5c6c7eec6f5ed.tar.gz |
It has been observed on the mailing lists that the different categories
of pages don't sum to anywhere near the total number of pages on amd64.
This is for the most part because uma_small_alloc() pages have never been
counted as wired pages, like their kmem_malloc() brethren. They should
be. This changes fixes that.
It is no longer necessary for the page queues lock to be held to free
pages allocated by uma_small_alloc(). I removed the acquisition and
release of the page queues lock from uma_small_free() on amd64 and ia64
weeks ago. This patch updates the other architectures that have
uma_small_alloc() and uma_small_free().
Approved by: re (kensmith)
Diffstat (limited to 'sys')
-rw-r--r-- | sys/amd64/amd64/uma_machdep.c | 6 | ||||
-rw-r--r-- | sys/arm/arm/vm_machdep.c | 8 | ||||
-rw-r--r-- | sys/ia64/ia64/uma_machdep.c | 6 | ||||
-rw-r--r-- | sys/powerpc/aim/uma_machdep.c | 8 | ||||
-rw-r--r-- | sys/powerpc/powerpc/uma_machdep.c | 8 | ||||
-rw-r--r-- | sys/sparc64/sparc64/vm_machdep.c | 8 | ||||
-rw-r--r-- | sys/sun4v/sun4v/vm_machdep.c | 8 |
7 files changed, 28 insertions, 24 deletions
diff --git a/sys/amd64/amd64/uma_machdep.c b/sys/amd64/amd64/uma_machdep.c index 4a35c67..1329668 100644 --- a/sys/amd64/amd64/uma_machdep.c +++ b/sys/amd64/amd64/uma_machdep.c @@ -50,9 +50,9 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) *flags = UMA_SLAB_PRIV; if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) - pflags = VM_ALLOC_INTERRUPT; + pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; else - pflags = VM_ALLOC_SYSTEM; + pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; if (wait & M_ZERO) pflags |= VM_ALLOC_ZERO; for (;;) { @@ -82,5 +82,7 @@ uma_small_free(void *mem, int size, u_int8_t flags) pa = DMAP_TO_PHYS((vm_offset_t)mem); dump_drop_page(pa); m = PHYS_TO_VM_PAGE(pa); + m->wire_count--; vm_page_free(m); + atomic_subtract_int(&cnt.v_wire_count, 1); } diff --git a/sys/arm/arm/vm_machdep.c b/sys/arm/arm/vm_machdep.c index 2f6cb0c..2e87030 100644 --- a/sys/arm/arm/vm_machdep.c +++ b/sys/arm/arm/vm_machdep.c @@ -580,9 +580,9 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) return (ret); } if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) - pflags = VM_ALLOC_INTERRUPT; + pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; else - pflags = VM_ALLOC_SYSTEM; + pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; if (wait & M_ZERO) pflags |= VM_ALLOC_ZERO; for (;;) { @@ -640,9 +640,9 @@ uma_small_free(void *mem, int size, u_int8_t flags) vm_paddr_t pa = vtophys((vm_offset_t)mem); m = PHYS_TO_VM_PAGE(pa); - vm_page_lock_queues(); + m->wire_count--; vm_page_free(m); - vm_page_unlock_queues(); + atomic_subtract_int(&cnt.v_wire_count, 1); } } } diff --git a/sys/ia64/ia64/uma_machdep.c b/sys/ia64/ia64/uma_machdep.c index 873d1bb..c8084ba 100644 --- a/sys/ia64/ia64/uma_machdep.c +++ b/sys/ia64/ia64/uma_machdep.c @@ -48,9 +48,9 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) *flags = UMA_SLAB_PRIV; if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) - pflags = VM_ALLOC_INTERRUPT; + pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; else - pflags = VM_ALLOC_SYSTEM; + pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; if (wait & M_ZERO) pflags |= VM_ALLOC_ZERO; @@ -76,5 +76,7 @@ uma_small_free(void *mem, int size, u_int8_t flags) vm_page_t m; m = PHYS_TO_VM_PAGE(IA64_RR_MASK((u_int64_t)mem)); + m->wire_count--; vm_page_free(m); + atomic_subtract_int(&cnt.v_wire_count, 1); } diff --git a/sys/powerpc/aim/uma_machdep.c b/sys/powerpc/aim/uma_machdep.c index 9fff32e..89d092a 100644 --- a/sys/powerpc/aim/uma_machdep.c +++ b/sys/powerpc/aim/uma_machdep.c @@ -54,9 +54,9 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) *flags = UMA_SLAB_PRIV; if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) - pflags = VM_ALLOC_INTERRUPT; + pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; else - pflags = VM_ALLOC_SYSTEM; + pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; if (wait & M_ZERO) pflags |= VM_ALLOC_ZERO; @@ -84,8 +84,8 @@ uma_small_free(void *mem, int size, u_int8_t flags) vm_page_t m; m = PHYS_TO_VM_PAGE((u_int32_t)mem); - vm_page_lock_queues(); + m->wire_count--; vm_page_free(m); - vm_page_unlock_queues(); + atomic_subtract_int(&cnt.v_wire_count, 1); atomic_subtract_int(&hw_uma_mdpages, 1); } diff --git a/sys/powerpc/powerpc/uma_machdep.c b/sys/powerpc/powerpc/uma_machdep.c index 9fff32e..89d092a 100644 --- a/sys/powerpc/powerpc/uma_machdep.c +++ b/sys/powerpc/powerpc/uma_machdep.c @@ -54,9 +54,9 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) *flags = UMA_SLAB_PRIV; if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) - pflags = VM_ALLOC_INTERRUPT; + pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; else - pflags = VM_ALLOC_SYSTEM; + pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; if (wait & M_ZERO) pflags |= VM_ALLOC_ZERO; @@ -84,8 +84,8 @@ uma_small_free(void *mem, int size, u_int8_t flags) vm_page_t m; m = PHYS_TO_VM_PAGE((u_int32_t)mem); - vm_page_lock_queues(); + m->wire_count--; vm_page_free(m); - vm_page_unlock_queues(); + atomic_subtract_int(&cnt.v_wire_count, 1); atomic_subtract_int(&hw_uma_mdpages, 1); } diff --git a/sys/sparc64/sparc64/vm_machdep.c b/sys/sparc64/sparc64/vm_machdep.c index 10e39ea..fce7786 100644 --- a/sys/sparc64/sparc64/vm_machdep.c +++ b/sys/sparc64/sparc64/vm_machdep.c @@ -462,9 +462,9 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) *flags = UMA_SLAB_PRIV; if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) - pflags = VM_ALLOC_INTERRUPT; + pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; else - pflags = VM_ALLOC_SYSTEM; + pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; if (wait & M_ZERO) pflags |= VM_ALLOC_ZERO; @@ -501,7 +501,7 @@ uma_small_free(void *mem, int size, u_int8_t flags) PMAP_STATS_INC(uma_nsmall_free); m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)mem)); - vm_page_lock_queues(); + m->wire_count--; vm_page_free(m); - vm_page_unlock_queues(); + atomic_subtract_int(&cnt.v_wire_count, 1); } diff --git a/sys/sun4v/sun4v/vm_machdep.c b/sys/sun4v/sun4v/vm_machdep.c index d5d9a70..fbfa4a0 100644 --- a/sys/sun4v/sun4v/vm_machdep.c +++ b/sys/sun4v/sun4v/vm_machdep.c @@ -392,9 +392,9 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) *flags = UMA_SLAB_PRIV; if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) - pflags = VM_ALLOC_INTERRUPT; + pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; else - pflags = VM_ALLOC_SYSTEM; + pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; if (wait & M_ZERO) pflags |= VM_ALLOC_ZERO; @@ -423,7 +423,7 @@ uma_small_free(void *mem, int size, u_int8_t flags) vm_page_t m; m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)mem)); - vm_page_lock_queues(); + m->wire_count--; vm_page_free(m); - vm_page_unlock_queues(); + atomic_subtract_int(&cnt.v_wire_count, 1); } |