summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2014-08-25 21:16:57 +0000
committerkib <kib@FreeBSD.org>2014-08-25 21:16:57 +0000
commit982492098020b7969f4cbe23875a1ee6be782ebd (patch)
tree5ae75a5528da903853401493a1f2d8ac814ff2d0 /sys/vm
parent511f9b75258e6b2a2341104376e11174a75322b9 (diff)
downloadFreeBSD-src-982492098020b7969f4cbe23875a1ee6be782ebd.zip
FreeBSD-src-982492098020b7969f4cbe23875a1ee6be782ebd.tar.gz
MFC r261412 (by alc):
Make prefaulting more aggressive on hard faults.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_fault.c56
1 files changed, 32 insertions, 24 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 5f7bab0..b5dc373 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -104,17 +104,8 @@ __FBSDID("$FreeBSD$");
#define PFBAK 4
#define PFFOR 4
-#define PAGEORDER_SIZE (PFBAK+PFFOR)
-
-static int prefault_pageorder[] = {
- -1 * PAGE_SIZE, 1 * PAGE_SIZE,
- -2 * PAGE_SIZE, 2 * PAGE_SIZE,
- -3 * PAGE_SIZE, 3 * PAGE_SIZE,
- -4 * PAGE_SIZE, 4 * PAGE_SIZE
-};
static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *);
-static void vm_fault_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
#define VM_FAULT_READ_BEHIND 8
#define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX)
@@ -136,6 +127,8 @@ struct faultstate {
};
static void vm_fault_cache_behind(const struct faultstate *fs, int distance);
+static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
+ int faultcount, int reqpage);
static inline void
release_page(struct faultstate *fs)
@@ -911,7 +904,7 @@ vnode_locked:
pmap_enter(fs.map->pmap, vaddr, fs.m, prot,
fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0);
if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0)
- vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
+ vm_fault_prefault(&fs, vaddr, faultcount, reqpage);
VM_OBJECT_WLOCK(fs.object);
vm_page_lock(fs.m);
@@ -1006,31 +999,49 @@ vm_fault_cache_behind(const struct faultstate *fs, int distance)
* of mmap time.
*/
static void
-vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
+vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
+ int faultcount, int reqpage)
{
- int i;
+ pmap_t pmap;
+ vm_map_entry_t entry;
+ vm_object_t backing_object, lobject;
vm_offset_t addr, starta;
vm_pindex_t pindex;
vm_page_t m;
- vm_object_t object;
+ int backward, forward, i;
+ pmap = fs->map->pmap;
if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))
return;
- object = entry->object.vm_object;
+ if (faultcount > 0) {
+ backward = reqpage;
+ forward = faultcount - reqpage - 1;
+ } else {
+ backward = PFBAK;
+ forward = PFFOR;
+ }
+ entry = fs->entry;
- starta = addra - PFBAK * PAGE_SIZE;
+ starta = addra - backward * PAGE_SIZE;
if (starta < entry->start) {
starta = entry->start;
} else if (starta > addra) {
starta = 0;
}
- for (i = 0; i < PAGEORDER_SIZE; i++) {
- vm_object_t backing_object, lobject;
-
- addr = addra + prefault_pageorder[i];
- if (addr > addra + (PFFOR * PAGE_SIZE))
+ /*
+ * Generate the sequence of virtual addresses that are candidates for
+ * prefaulting in an outward spiral from the faulting virtual address,
+ * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra
+ * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ...
+ * If the candidate address doesn't have a backing physical page, then
+ * the loop immediately terminates.
+ */
+ for (i = 0; i < 2 * imax(backward, forward); i++) {
+ addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE :
+ PAGE_SIZE);
+ if (addr > addra + forward * PAGE_SIZE)
addr = 0;
if (addr < starta || addr >= entry->end)
@@ -1040,7 +1051,7 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
continue;
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
- lobject = object;
+ lobject = entry->object.vm_object;
VM_OBJECT_RLOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
lobject->type == OBJT_DEFAULT &&
@@ -1052,9 +1063,6 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
VM_OBJECT_RUNLOCK(lobject);
lobject = backing_object;
}
- /*
- * give-up when a page is not in memory
- */
if (m == NULL) {
VM_OBJECT_RUNLOCK(lobject);
break;
OpenPOWER on IntegriCloud