summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_fault.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2007-07-20 06:55:11 +0000
committeralc <alc@FreeBSD.org>2007-07-20 06:55:11 +0000
commit8765bda351ab8fcc0e2fcece54e27515b2bd12a9 (patch)
tree6dbe4d92e382c0626f69634767ff83d4ed84d81c /sys/vm/vm_fault.c
parent91c4c4d8d5b70fd9f013e29ad79440cb377b8688 (diff)
downloadFreeBSD-src-8765bda351ab8fcc0e2fcece54e27515b2bd12a9.zip
FreeBSD-src-8765bda351ab8fcc0e2fcece54e27515b2bd12a9.tar.gz
Two changes to vm_fault_additional_pages():
1. Rewrite the backward scan. Specifically, reverse the order in which pages are allocated so that upon failure it is never necessary to free pages that were just allocated. Moreover, any allocated pages can be put to use. This makes the backward scan behave just like the forward scan. 2. Eliminate an explicit, unsynchronized check for low memory before calling vm_page_alloc(). It serves no useful purpose. It is, in effect, optimizing the uncommon case at the expense of the common case. Approved by: re (hrs) MFC after: 3 weeks
Diffstat (limited to 'sys/vm/vm_fault.c')
-rw-r--r--sys/vm/vm_fault.c30
1 files changed, 11 insertions, 19 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index e09fbfb..8b843dc 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -1255,17 +1255,6 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
}
/*
- * try to do any readahead that we might have free pages for.
- */
- if ((rahead + rbehind) >
- ((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) {
- pagedaemon_wakeup();
- marray[0] = m;
- *reqpage = 0;
- return 1;
- }
-
- /*
* scan backward for the read behind pages -- in memory
*/
if (pindex > 0) {
@@ -1280,21 +1269,24 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
rtm->pindex >= startpindex)
startpindex = rtm->pindex + 1;
- for (i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) {
+ /* tpindex is unsigned; beware of numeric underflow. */
+ for (i = 0, tpindex = pindex - 1; tpindex >= startpindex &&
+ tpindex < pindex; i++, tpindex--) {
rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
if (rtm == NULL) {
- vm_page_lock_queues();
+ /*
+ * Shift the allocated pages to the
+ * beginning of the array.
+ */
for (j = 0; j < i; j++) {
- vm_page_free(marray[j]);
+ marray[j] = marray[j + tpindex + 1 -
+ startpindex];
}
- vm_page_unlock_queues();
- marray[0] = m;
- *reqpage = 0;
- return 1;
+ break;
}
- marray[i] = rtm;
+ marray[tpindex - startpindex] = rtm;
}
} else {
startpindex = 0;
OpenPOWER on IntegriCloud