summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2009-11-18 18:05:54 +0000
committeralc <alc@FreeBSD.org>2009-11-18 18:05:54 +0000
commitca67dc4da40d836cf1919f08c2fba266a28d19b2 (patch)
tree3197a0817148614fefed82c9320e8a968b09917e /sys/vm
parent5df0c181df0fabff74fce8340202811f15a55b4e (diff)
downloadFreeBSD-src-ca67dc4da40d836cf1919f08c2fba266a28d19b2.zip
FreeBSD-src-ca67dc4da40d836cf1919f08c2fba266a28d19b2.tar.gz
Simplify both the invocation and the implementation of vm_fault() for wiring
pages. (Note: Claims made in the comments about the handling of breakpoints in wired pages have been false for roughly a decade. This and another bug involving breakpoints will be fixed in coming changes.) Reviewed by: kib
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_extern.h2
-rw-r--r--sys/vm/vm_fault.c45
-rw-r--r--sys/vm/vm_map.c4
-rw-r--r--sys/vm/vm_map.h2
4 files changed, 16 insertions, 37 deletions
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index ff48983..ad6087e 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -58,7 +58,7 @@ int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int);
void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t,
vm_ooffset_t *);
void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
-int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
+int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace *, int);
void vm_waitproc(struct proc *);
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index b10270c..202df3d 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -185,7 +185,7 @@ unlock_and_deallocate(struct faultstate *fs)
* default objects are zero-fill, there is no real pager.
*/
#define TRYPAGER (fs.object->type != OBJT_DEFAULT && \
- (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired))
+ ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 || wired))
/*
* vm_fault:
@@ -238,31 +238,15 @@ RetryFault:;
result = vm_map_lookup(&fs.map, vaddr, fault_type, &fs.entry,
&fs.first_object, &fs.first_pindex, &prot, &wired);
if (result != KERN_SUCCESS) {
- if (result != KERN_PROTECTION_FAILURE ||
- (fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) {
- if (growstack && result == KERN_INVALID_ADDRESS &&
- map != kernel_map && curproc != NULL) {
- result = vm_map_growstack(curproc, vaddr);
- if (result != KERN_SUCCESS)
- return (KERN_FAILURE);
- growstack = FALSE;
- goto RetryFault;
- }
- return (result);
+ if (growstack && result == KERN_INVALID_ADDRESS &&
+ map != kernel_map) {
+ result = vm_map_growstack(curproc, vaddr);
+ if (result != KERN_SUCCESS)
+ return (KERN_FAILURE);
+ growstack = FALSE;
+ goto RetryFault;
}
-
- /*
- * If we are user-wiring a r/w segment, and it is COW, then
- * we need to do the COW operation. Note that we don't COW
- * currently RO sections now, because it is NOT desirable
- * to COW .text. We simply keep .text from ever being COW'ed
- * and take the heat that one cannot debug wired .text sections.
- */
- result = vm_map_lookup(&fs.map, vaddr,
- VM_PROT_READ|VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE,
- &fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired);
- if (result != KERN_SUCCESS)
- return (result);
+ return (result);
}
map_generation = fs.map->timestamp;
@@ -919,9 +903,8 @@ vnode_locked:
* won't find it (yet).
*/
pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired);
- if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
+ if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0)
vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
- }
VM_OBJECT_LOCK(fs.object);
vm_page_lock_queues();
vm_page_flag_set(fs.m, PG_REFERENCED);
@@ -930,7 +913,7 @@ vnode_locked:
* If the page is not wired down, then put it where the pageout daemon
* can find it.
*/
- if (fault_flags & VM_FAULT_WIRE_MASK) {
+ if (fault_flags & VM_FAULT_CHANGE_WIRING) {
if (wired)
vm_page_wire(fs.m);
else
@@ -1048,7 +1031,7 @@ vm_fault_quick(caddr_t v, int prot)
*/
int
vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
- boolean_t user_wire, boolean_t fictitious)
+ boolean_t fictitious)
{
vm_offset_t va;
int rv;
@@ -1059,9 +1042,7 @@ vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
* read-only sections.
*/
for (va = start; va < end; va += PAGE_SIZE) {
- rv = vm_fault(map, va,
- user_wire ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
- user_wire ? VM_FAULT_USER_WIRE : VM_FAULT_CHANGE_WIRING);
+ rv = vm_fault(map, va, VM_PROT_NONE, VM_FAULT_CHANGE_WIRING);
if (rv) {
if (va != start)
vm_fault_unwire(map, start, va, fictitious);
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 414d4e6..a77347f 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -2381,7 +2381,7 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
*/
vm_map_unlock(map);
rv = vm_fault_wire(map, saved_start, saved_end,
- user_wire, fictitious);
+ fictitious);
vm_map_lock(map);
if (last_timestamp + 1 != map->timestamp) {
/*
@@ -3563,7 +3563,7 @@ RetryLookup:;
else
prot = entry->protection;
fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
- if ((fault_type & prot) != fault_type) {
+ if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
vm_map_unlock_read(map);
return (KERN_PROTECTION_FAILURE);
}
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 0c586ab..dbf2c67 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -319,8 +319,6 @@ long vmspace_wired_count(struct vmspace *vmspace);
*/
#define VM_FAULT_NORMAL 0 /* Nothing special */
#define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */
-#define VM_FAULT_USER_WIRE 2 /* Likewise, but for user purposes */
-#define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE)
#define VM_FAULT_DIRTY 8 /* Dirty the page */
/*
OpenPOWER on IntegriCloud