summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_fault.c
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1996-12-14 17:54:17 +0000
committerdyson <dyson@FreeBSD.org>1996-12-14 17:54:17 +0000
commit765e5fd282a4abb7b729faacccf7be3eacb1fed1 (patch)
treeb1d0372c1df868572557b6875727b2db2a4648f4 /sys/vm/vm_fault.c
parent7c80d56f5026a662a0d69806f8719ff816a95490 (diff)
downloadFreeBSD-src-765e5fd282a4abb7b729faacccf7be3eacb1fed1.zip
FreeBSD-src-765e5fd282a4abb7b729faacccf7be3eacb1fed1.tar.gz
Implement closer-to POSIX mlock semantics. The major difference is
that we do allow mlock to span unallocated regions (of course, not mlocking them.) We also allow mlocking of RO regions (which the old code couldn't.) The restriction there is that once a RO region is wired (mlocked), it cannot be debugged (or EVER written to.) Under normal usage, the new mlock code will be a significant improvement over our old stuff.
Diffstat (limited to 'sys/vm/vm_fault.c')
-rw-r--r--sys/vm/vm_fault.c71
1 files changed, 69 insertions, 2 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index f395d97..ff64824 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_fault.c,v 1.57 1996/09/08 20:44:37 dyson Exp $
+ * $Id: vm_fault.c,v 1.58 1996/11/30 22:41:46 dyson Exp $
*/
/*
@@ -202,6 +202,32 @@ RetryFault:;
vaddr);
}
+ /*
+ * If we are user-wiring a r/w segment, and it is COW, then
+ * we need to do the COW operation. Note that we don't COW
+ * currently RO sections now, because there it is NOT desireable
+ * to COW .text. We simply keep .text from ever being COW'ed
+ * and take the heat that one cannot debug wired .text sections.
+ */
+ if ((change_wiring == VM_FAULT_USER_WIRE) && entry->needs_copy) {
+ if(entry->protection & VM_PROT_WRITE) {
+ int tresult;
+ vm_map_lookup_done(map, entry);
+
+ tresult = vm_map_lookup(&map, vaddr, VM_PROT_READ|VM_PROT_WRITE,
+ &entry, &first_object, &first_pindex, &prot, &wired, &su);
+ if (tresult != KERN_SUCCESS)
+ return tresult;
+ } else {
+ /*
+ * If we don't COW now, on a user wire, the user will never
+ * be able to write to the mapping. If we don't make this
+ * restriction, the bookkeeping would be nearly impossible.
+ */
+ entry->max_protection &= ~VM_PROT_WRITE;
+ }
+ }
+
vp = vnode_pager_lock(first_object);
lookup_still_valid = TRUE;
@@ -839,7 +865,48 @@ vm_fault_wire(map, start, end)
*/
for (va = start; va < end; va += PAGE_SIZE) {
- rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE, TRUE);
+ rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
+ VM_FAULT_CHANGE_WIRING);
+ if (rv) {
+ if (va != start)
+ vm_fault_unwire(map, start, va);
+ return (rv);
+ }
+ }
+ return (KERN_SUCCESS);
+}
+
+/*
+ * vm_fault_user_wire:
+ *
+ * Wire down a range of virtual addresses in a map. This
+ * is for user mode though, so we only ask for read access
+ * on currently read only sections.
+ */
+int
+vm_fault_user_wire(map, start, end)
+ vm_map_t map;
+ vm_offset_t start, end;
+{
+
+ register vm_offset_t va;
+ register pmap_t pmap;
+ int rv;
+
+ pmap = vm_map_pmap(map);
+
+ /*
+ * Inform the physical mapping system that the range of addresses may
+ * not fault, so that page tables and such can be locked down as well.
+ */
+ pmap_pageable(pmap, start, end, FALSE);
+
+ /*
+ * We simulate a fault to get the page and enter it in the physical
+ * map.
+ */
+ for (va = start; va < end; va += PAGE_SIZE) {
+ rv = vm_fault(map, va, VM_PROT_READ, VM_FAULT_USER_WIRE);
if (rv) {
if (va != start)
vm_fault_unwire(map, start, va);
OpenPOWER on IntegriCloud