summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorscottl <scottl@FreeBSD.org>2006-01-29 08:24:54 +0000
committerscottl <scottl@FreeBSD.org>2006-01-29 08:24:54 +0000
commita3330c2d56280efc18ba9d27f10adecead499d50 (patch)
tree946310afdd23d85e0d00033553135f53c0fc621c /sys/vm
parent89c22f2e1ab0fd5e0513521c6edaee0da543b5b8 (diff)
downloadFreeBSD-src-a3330c2d56280efc18ba9d27f10adecead499d50.zip
FreeBSD-src-a3330c2d56280efc18ba9d27f10adecead499d50.tar.gz
The change a few years ago of having contigmalloc start its scan at the top
of physical RAM instead of the bottom was a sound idea, but the implementation left a lot to be desired. Scans would spend considerable time looking at pages that are above of the address range given by the caller, and multiple calls (like what happens in busdma) would spend more time on top of that rescanning the same pages over and over. Solve this, at least for now, with two simple optimizations. The first is to not bother scanning high ordered pages that are outside of the provided address range. Second is to cache the page index from the last successful operation so that subsequent scans don't have to restart from the top. This is conditional on the numpages argument being the same or greater between calls. MFC After: 2 weeks
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_contig.c21
1 files changed, 19 insertions, 2 deletions
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c
index 6208774..4b37aa8 100644
--- a/sys/vm/vm_contig.c
+++ b/sys/vm/vm_contig.c
@@ -387,7 +387,9 @@ vm_page_alloc_contig(vm_pindex_t npages, vm_paddr_t low, vm_paddr_t high,
vm_offset_t size;
vm_paddr_t phys;
vm_page_t pga = vm_page_array;
- int i, pass, pqtype, start;
+ static vm_pindex_t np = 0;
+ static vm_pindex_t start = 0;
+ int i, pass, pqtype;
size = npages << PAGE_SHIFT;
if (size == 0)
@@ -397,8 +399,22 @@ vm_page_alloc_contig(vm_pindex_t npages, vm_paddr_t low, vm_paddr_t high,
if ((boundary & (boundary - 1)) != 0)
panic("vm_page_alloc_contig: boundary must be a power of 2");
+ /*
+ * Two simple optimizations. First, don't scan high ordered pages
+ * if they are outside of the requested address range. Second, cache
+ * the starting page index across calls and reuse it instead of
+ * restarting the scan from the top. This is conditional on the
+ * requested number of pages being the same or greater than the
+ * cached amount.
+ */
for (pass = 0; pass < 2; pass++) {
- start = vm_page_array_size - npages + 1;
+ if ((np == 0) || (np > npages)) {
+ if (atop(high) < vm_page_array_size)
+ start = atop(high) - npages + 1;
+ else
+ start = vm_page_array_size - npages + 1;
+ }
+ np = 0;
vm_page_lock_queues();
retry:
start--;
@@ -496,6 +512,7 @@ cleanup_freed:
/*
* We've found a contiguous chunk that meets are requirements.
*/
+ np = npages;
return (&pga[start]);
}
return (NULL);
OpenPOWER on IntegriCloud