diff options
author | kib <kib@FreeBSD.org> | 2015-08-06 08:51:15 +0000 |
---|---|---|
committer | kib <kib@FreeBSD.org> | 2015-08-06 08:51:15 +0000 |
commit | 048ed92d7203d427b1aecb04c2bd81fdc6776530 (patch) | |
tree | 88509edd14232cc8e20fe9ff7947c8f614356986 /sys/vm | |
parent | 42c87af3f5887a39f64e5c00827506eef459c1c8 (diff) | |
download | FreeBSD-src-048ed92d7203d427b1aecb04c2bd81fdc6776530.zip FreeBSD-src-048ed92d7203d427b1aecb04c2bd81fdc6776530.tar.gz |
MFC r286086:
Do not pretend that vm_fault(9) supports unwiring the address.
Diffstat (limited to 'sys/vm')
-rw-r--r-- | sys/vm/vm_fault.c | 39 | ||||
-rw-r--r-- | sys/vm/vm_map.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_map.h | 6 |
3 files changed, 21 insertions, 26 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 0c111da..b5ac58f 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -189,7 +189,7 @@ vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot, VM_OBJECT_ASSERT_LOCKED(m->object); need_dirty = ((fault_type & VM_PROT_WRITE) != 0 && - (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) || + (fault_flags & VM_FAULT_WIRE) == 0) || (fault_flags & VM_FAULT_DIRTY) != 0; if (set_wd) @@ -240,15 +240,6 @@ vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot, } /* - * TRYPAGER - used by vm_fault to calculate whether the pager for the - * current object *might* contain the page. - * - * default objects are zero-fill, there is no real pager. - */ -#define TRYPAGER (fs.object->type != OBJT_DEFAULT && \ - ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 || wired)) - -/* * vm_fault: * * Handle a page fault occurring at the given address, @@ -358,9 +349,12 @@ RetryFault:; if (wired) fault_type = prot | (fault_type & VM_PROT_COPY); + else + KASSERT((fault_flags & VM_FAULT_WIRE) == 0, + ("!wired && VM_FAULT_WIRE")); if (fs.vp == NULL /* avoid locked vnode leak */ && - (fault_flags & (VM_FAULT_CHANGE_WIRING | VM_FAULT_DIRTY)) == 0 && + (fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0 && /* avoid calling vm_object_set_writeable_dirty() */ ((prot & VM_PROT_WRITE) == 0 || (fs.first_object->type != OBJT_VNODE && @@ -506,10 +500,12 @@ fast_failed: } /* - * Page is not resident, If this is the search termination + * Page is not resident. If this is the search termination * or the pager might contain the page, allocate a new page. + * Default objects are zero-fill, there is no real pager. */ - if (TRYPAGER || fs.object == fs.first_object) { + if (fs.object->type != OBJT_DEFAULT || + fs.object == fs.first_object) { if (fs.pindex >= fs.object->size) { unlock_and_deallocate(&fs); return (KERN_PROTECTION_FAILURE); @@ -556,9 +552,10 @@ readrest: * * Attempt to fault-in the page if there is a chance that the * pager has it, and potentially fault in additional pages - * at the same time. + * at the same time. For default objects simply provide + * zero-filled pages. */ - if (TRYPAGER) { + if (fs.object->type != OBJT_DEFAULT) { int rv; u_char behavior = vm_map_entry_behavior(fs.entry); @@ -868,7 +865,7 @@ vnode_locked: pmap_copy_page(fs.m, fs.first_m); fs.first_m->valid = VM_PAGE_BITS_ALL; if (wired && (fault_flags & - VM_FAULT_CHANGE_WIRING) == 0) { + VM_FAULT_WIRE) == 0) { vm_page_lock(fs.first_m); vm_page_wire(fs.first_m); vm_page_unlock(fs.first_m); @@ -989,7 +986,7 @@ vnode_locked: */ pmap_enter(fs.map->pmap, vaddr, fs.m, prot, fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0); - if (faultcount != 1 && (fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && + if (faultcount != 1 && (fault_flags & VM_FAULT_WIRE) == 0 && wired == 0) vm_fault_prefault(&fs, vaddr, faultcount, reqpage); VM_OBJECT_WLOCK(fs.object); @@ -999,11 +996,9 @@ vnode_locked: * If the page is not wired down, then put it where the pageout daemon * can find it. */ - if (fault_flags & VM_FAULT_CHANGE_WIRING) { - if (wired) - vm_page_wire(fs.m); - else - vm_page_unwire(fs.m, 1); + if ((fault_flags & VM_FAULT_WIRE) != 0) { + KASSERT(wired, ("VM_FAULT_WIRE && !wired")); + vm_page_wire(fs.m); } else vm_page_activate(fs.m); if (m_hold != NULL) { diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 8493ee3..93db8d1 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -2603,7 +2603,7 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, * it into the physical map. */ if ((rv = vm_fault(map, faddr, VM_PROT_NONE, - VM_FAULT_CHANGE_WIRING)) != KERN_SUCCESS) + VM_FAULT_WIRE)) != KERN_SUCCESS) break; } while ((faddr += PAGE_SIZE) < saved_end); vm_map_lock(map); diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h index 8cced05..a8378ef 100644 --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -327,9 +327,9 @@ long vmspace_resident_count(struct vmspace *vmspace); /* * vm_fault option flags */ -#define VM_FAULT_NORMAL 0 /* Nothing special */ -#define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */ -#define VM_FAULT_DIRTY 2 /* Dirty the page; use w/VM_PROT_COPY */ +#define VM_FAULT_NORMAL 0 /* Nothing special */ +#define VM_FAULT_WIRE 1 /* Wire the mapped page */ +#define VM_FAULT_DIRTY 2 /* Dirty the page; use w/VM_PROT_COPY */ /* * Initially, mappings are slightly sequential. The maximum window size must |