summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/sys_process.c21
-rw-r--r--sys/vm/vm.h2
-rw-r--r--sys/vm/vm_fault.c2
-rw-r--r--sys/vm/vm_map.c29
4 files changed, 22 insertions, 32 deletions
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index 1aa6995..dfc36ba 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_kern.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
#include <vm/vm_param.h>
#ifdef COMPAT_IA32
@@ -213,10 +214,10 @@ int
proc_rwmem(struct proc *p, struct uio *uio)
{
vm_map_t map;
- vm_object_t backing_object, object = NULL;
- vm_offset_t pageno = 0; /* page number */
+ vm_object_t backing_object, object;
+ vm_offset_t pageno; /* page number */
vm_prot_t reqprot;
- int error, fault_flags, writing;
+ int error, writing;
/*
* Assert that someone has locked this vmspace. (Should be
@@ -232,9 +233,7 @@ proc_rwmem(struct proc *p, struct uio *uio)
map = &p->p_vmspace->vm_map;
writing = uio->uio_rw == UIO_WRITE;
- reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
- VM_PROT_READ;
- fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL;
+ reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ;
/*
* Only map in one page at a time. We don't have to, but it
@@ -269,7 +268,7 @@ proc_rwmem(struct proc *p, struct uio *uio)
/*
* Fault the page on behalf of the process
*/
- error = vm_fault(map, pageno, reqprot, fault_flags);
+ error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
if (error) {
if (error == KERN_RESOURCE_SHORTAGE)
error = ENOMEM;
@@ -279,8 +278,8 @@ proc_rwmem(struct proc *p, struct uio *uio)
}
/*
- * Now we need to get the page. out_entry, wired,
- * and single_use aren't used. One would think the vm code
+ * Now we need to get the page. out_entry and wired
+ * aren't used. One would think the vm code
* would be a *bit* nicer... We use tmap because
* vm_map_lookup() can change the map argument.
*/
@@ -303,6 +302,10 @@ proc_rwmem(struct proc *p, struct uio *uio)
VM_OBJECT_UNLOCK(object);
object = backing_object;
}
+ if (writing && m != NULL) {
+ vm_page_dirty(m);
+ vm_pager_page_unswapped(m);
+ }
VM_OBJECT_UNLOCK(object);
if (m == NULL) {
vm_map_lookup_done(tmap, out_entry);
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index 941300a..6dd3cbe 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -76,7 +76,7 @@ typedef u_char vm_prot_t; /* protection codes */
#define VM_PROT_READ ((vm_prot_t) 0x01)
#define VM_PROT_WRITE ((vm_prot_t) 0x02)
#define VM_PROT_EXECUTE ((vm_prot_t) 0x04)
-#define VM_PROT_OVERRIDE_WRITE ((vm_prot_t) 0x08) /* copy-on-write */
+#define VM_PROT_COPY ((vm_prot_t) 0x08) /* copy-on-read */
#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
#define VM_PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 202df3d..6a261c4 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -702,7 +702,7 @@ vnode_locked:
/*
* We only really need to copy if we want to write it.
*/
- if (fault_type & VM_PROT_WRITE) {
+ if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) {
/*
* This allows pages to be virtually copied from a
* backing_object into the first_object, where the
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index a77347f..d27c3a7 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -3554,14 +3554,8 @@ RetryLookup:;
/*
* Check whether this task is allowed to have this page.
- * Note the special case for MAP_ENTRY_COW
- * pages with an override. This is to implement a forced
- * COW for debuggers.
*/
- if (fault_type & VM_PROT_OVERRIDE_WRITE)
- prot = entry->max_protection;
- else
- prot = entry->protection;
+ prot = entry->protection;
fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
vm_map_unlock_read(map);
@@ -3569,8 +3563,7 @@ RetryLookup:;
}
if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
(entry->eflags & MAP_ENTRY_COW) &&
- (fault_type & VM_PROT_WRITE) &&
- (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
+ (fault_type & VM_PROT_WRITE)) {
vm_map_unlock_read(map);
return (KERN_PROTECTION_FAILURE);
}
@@ -3581,7 +3574,7 @@ RetryLookup:;
*/
*wired = (entry->wired_count != 0);
if (*wired)
- prot = fault_type = entry->protection;
+ fault_type = entry->protection;
size = entry->end - entry->start;
/*
* If the entry was copy-on-write, we either ...
@@ -3594,7 +3587,8 @@ RetryLookup:;
* If we don't need to write the page, we just demote the
* permissions allowed.
*/
- if (fault_type & VM_PROT_WRITE) {
+ if ((fault_type & VM_PROT_WRITE) != 0 ||
+ (fault_typea & VM_PROT_COPY) != 0) {
/*
* Make a new object, and place it in the object
* chain. Note that no new references have appeared
@@ -3717,21 +3711,14 @@ vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */
/*
* Check whether this task is allowed to have this page.
- * Note the special case for MAP_ENTRY_COW
- * pages with an override. This is to implement a forced
- * COW for debuggers.
*/
- if (fault_type & VM_PROT_OVERRIDE_WRITE)
- prot = entry->max_protection;
- else
- prot = entry->protection;
+ prot = entry->protection;
fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
if ((fault_type & prot) != fault_type)
return (KERN_PROTECTION_FAILURE);
if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
(entry->eflags & MAP_ENTRY_COW) &&
- (fault_type & VM_PROT_WRITE) &&
- (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0)
+ (fault_type & VM_PROT_WRITE))
return (KERN_PROTECTION_FAILURE);
/*
@@ -3740,7 +3727,7 @@ vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */
*/
*wired = (entry->wired_count != 0);
if (*wired)
- prot = fault_type = entry->protection;
+ fault_type = entry->protection;
if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
/*
OpenPOWER on IntegriCloud