From dcb93e6c955b320316982f0c9532ae0b0f8663ea Mon Sep 17 00:00:00 2001 From: alc Date: Fri, 27 Nov 2009 20:24:11 +0000 Subject: Simplify the invocation of vm_fault(). Specifically, eliminate the flag VM_FAULT_DIRTY. The information provided by this flag can be trivially inferred by vm_fault(). Discussed with: kib --- sys/amd64/amd64/trap.c | 4 +--- sys/arm/arm/trap.c | 3 +-- sys/dev/cxgb/ulp/tom/cxgb_vm.c | 3 +-- sys/i386/i386/trap.c | 4 +--- sys/ia64/ia64/trap.c | 3 +-- sys/mips/mips/trap.c | 12 +++--------- sys/powerpc/aim/trap.c | 4 +--- sys/powerpc/booke/trap.c | 3 +-- sys/sparc64/sparc64/trap.c | 9 +++------ sys/sun4v/sun4v/trap.c | 9 +++------ sys/vm/vm_fault.c | 19 +++++++++++-------- sys/vm/vm_map.h | 1 - 12 files changed, 27 insertions(+), 47 deletions(-) diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c index 3f7ea81..df301d1 100644 --- a/sys/amd64/amd64/trap.c +++ b/sys/amd64/amd64/trap.c @@ -750,9 +750,7 @@ trap_pfault(frame, usermode) PROC_UNLOCK(p); /* Fault in the user page: */ - rv = vm_fault(map, va, ftype, - (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY - : VM_FAULT_NORMAL); + rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); PROC_LOCK(p); --p->p_lock; diff --git a/sys/arm/arm/trap.c b/sys/arm/arm/trap.c index 348ec5a..284e797 100644 --- a/sys/arm/arm/trap.c +++ b/sys/arm/arm/trap.c @@ -425,8 +425,7 @@ data_abort_handler(trapframe_t *tf) p->p_lock++; PROC_UNLOCK(p); } - error = vm_fault(map, va, ftype, (ftype & VM_PROT_WRITE) ? - VM_FAULT_DIRTY : VM_FAULT_NORMAL); + error = vm_fault(map, va, ftype, VM_FAULT_NORMAL); pcb->pcb_onfault = onfault; if (map != kernel_map) { diff --git a/sys/dev/cxgb/ulp/tom/cxgb_vm.c b/sys/dev/cxgb/ulp/tom/cxgb_vm.c index e7a3893..9f366e8 100644 --- a/sys/dev/cxgb/ulp/tom/cxgb_vm.c +++ b/sys/dev/cxgb/ulp/tom/cxgb_vm.c @@ -131,8 +131,7 @@ vm_fault_hold_user_pages(vm_map_t map, vm_offset_t addr, vm_page_t *mp, * taken away from us before it is held */ while (*pages == NULL) { - rv = vm_fault(map, va, prot, - (prot & VM_PROT_WRITE) ? VM_FAULT_DIRTY : VM_FAULT_NORMAL); + rv = vm_fault(map, va, prot, VM_FAULT_NORMAL); if (rv) goto error; *pages = pmap_extract_and_hold(pmap, va, prot); diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c index f8d0ea0..1d3dc3b 100644 --- a/sys/i386/i386/trap.c +++ b/sys/i386/i386/trap.c @@ -825,9 +825,7 @@ trap_pfault(frame, usermode, eva) PROC_UNLOCK(p); /* Fault in the user page: */ - rv = vm_fault(map, va, ftype, - (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY - : VM_FAULT_NORMAL); + rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); PROC_LOCK(p); --p->p_lock; diff --git a/sys/ia64/ia64/trap.c b/sys/ia64/ia64/trap.c index c966f0a..4e38b5e 100644 --- a/sys/ia64/ia64/trap.c +++ b/sys/ia64/ia64/trap.c @@ -572,8 +572,7 @@ trap(int vector, struct trapframe *tf) PROC_UNLOCK(p); /* Fault in the user page: */ - rv = vm_fault(map, va, ftype, (ftype & VM_PROT_WRITE) - ? VM_FAULT_DIRTY : VM_FAULT_NORMAL); + rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); PROC_LOCK(p); --p->p_lock; diff --git a/sys/mips/mips/trap.c b/sys/mips/mips/trap.c index 1124b1c..3be4dcc 100644 --- a/sys/mips/mips/trap.c +++ b/sys/mips/mips/trap.c @@ -538,17 +538,11 @@ dofault: struct vmspace *vm; vm_map_t map; int rv = 0; - int flag; vm = p->p_vmspace; map = &vm->vm_map; va = trunc_page((vm_offset_t)trapframe->badvaddr); - if ((vm_offset_t)trapframe->badvaddr < VM_MIN_KERNEL_ADDRESS) { - if (ftype & VM_PROT_WRITE) - flag = VM_FAULT_DIRTY; - else - flag = VM_FAULT_NORMAL; - } else { + if ((vm_offset_t)trapframe->badvaddr >= VM_MIN_KERNEL_ADDRESS) { /* * Don't allow user-mode faults in kernel * address space. @@ -564,14 +558,14 @@ dofault: ++p->p_lock; PROC_UNLOCK(p); - rv = vm_fault(map, va, ftype, flag); + rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); PROC_LOCK(p); --p->p_lock; PROC_UNLOCK(p); #ifdef VMFAULT_TRACE printf("vm_fault(%x (pmap %x), %x (%x), %x, %d) -> %x at pc %x\n", - map, &vm->vm_pmap, va, trapframe->badvaddr, ftype, flag, + map, &vm->vm_pmap, va, trapframe->badvaddr, ftype, VM_FAULT_NORMAL, rv, trapframe->pc); #endif diff --git a/sys/powerpc/aim/trap.c b/sys/powerpc/aim/trap.c index ca122ac..943e3c6 100644 --- a/sys/powerpc/aim/trap.c +++ b/sys/powerpc/aim/trap.c @@ -491,9 +491,7 @@ trap_pfault(struct trapframe *frame, int user) PROC_UNLOCK(p); /* Fault in the user page: */ - rv = vm_fault(map, va, ftype, - (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY - : VM_FAULT_NORMAL); + rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); PROC_LOCK(p); --p->p_lock; diff --git a/sys/powerpc/booke/trap.c b/sys/powerpc/booke/trap.c index d98efb9..db9be64 100644 --- a/sys/powerpc/booke/trap.c +++ b/sys/powerpc/booke/trap.c @@ -497,8 +497,7 @@ trap_pfault(struct trapframe *frame, int user) PROC_UNLOCK(p); /* Fault in the user page: */ - rv = vm_fault(map, va, ftype, - (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : VM_FAULT_NORMAL); + rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); PROC_LOCK(p); --p->p_lock; diff --git a/sys/sparc64/sparc64/trap.c b/sys/sparc64/sparc64/trap.c index 0c07eaf..686fcba 100644 --- a/sys/sparc64/sparc64/trap.c +++ b/sys/sparc64/sparc64/trap.c @@ -409,7 +409,6 @@ trap_pfault(struct thread *td, struct trapframe *tf) vm_prot_t prot; vm_map_entry_t entry; u_long ctx; - int flags; int type; int rv; @@ -429,15 +428,13 @@ trap_pfault(struct thread *td, struct trapframe *tf) CTR4(KTR_TRAP, "trap_pfault: td=%p pm_ctx=%#lx va=%#lx ctx=%#lx", td, p->p_vmspace->vm_pmap.pm_context[curcpu], va, ctx); - if (type == T_DATA_PROTECTION) { + if (type == T_DATA_PROTECTION) prot = VM_PROT_WRITE; - flags = VM_FAULT_DIRTY; - } else { + else { if (type == T_DATA_MISS) prot = VM_PROT_READ; else prot = VM_PROT_READ | VM_PROT_EXECUTE; - flags = VM_FAULT_NORMAL; } if (ctx != TLB_CTX_KERNEL) { @@ -463,7 +460,7 @@ trap_pfault(struct thread *td, struct trapframe *tf) PROC_UNLOCK(p); /* Fault in the user page. */ - rv = vm_fault(&vm->vm_map, va, prot, flags); + rv = vm_fault(&vm->vm_map, va, prot, VM_FAULT_NORMAL); /* * Now the process can be swapped again. diff --git a/sys/sun4v/sun4v/trap.c b/sys/sun4v/sun4v/trap.c index 353462d..702fd5c 100644 --- a/sys/sun4v/sun4v/trap.c +++ b/sys/sun4v/sun4v/trap.c @@ -460,7 +460,6 @@ trap_pfault(struct thread *td, struct trapframe *tf, int64_t type, uint64_t data vm_offset_t va; vm_prot_t prot; u_long ctx; - int flags; int rv; if (td == NULL) @@ -487,15 +486,13 @@ trap_pfault(struct thread *td, struct trapframe *tf, int64_t type, uint64_t data KASSERT(td->td_proc->p_vmspace != NULL, ("trap_pfault: vmspace NULL")); - if (type == T_DATA_PROTECTION) { + if (type == T_DATA_PROTECTION) prot = VM_PROT_WRITE; - flags = VM_FAULT_DIRTY; - } else { + else { if (type == T_DATA_MISS) prot = VM_PROT_READ; else prot = VM_PROT_READ | VM_PROT_EXECUTE; - flags = VM_FAULT_NORMAL; } if (ctx != TLB_CTX_KERNEL) { @@ -521,7 +518,7 @@ trap_pfault(struct thread *td, struct trapframe *tf, int64_t type, uint64_t data PROC_UNLOCK(p); /* Fault in the user page. */ - rv = vm_fault(&vm->vm_map, va, prot, flags); + rv = vm_fault(&vm->vm_map, va, prot, VM_FAULT_NORMAL); /* * Now the process can be swapped again. diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 6a261c4..0896cba 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -857,19 +857,12 @@ vnode_locked: vm_object_set_writeable_dirty(fs.object); /* - * If the fault is a write, we know that this page is being - * written NOW so dirty it explicitly to save on - * pmap_is_modified() calls later. - * * If this is a NOSYNC mmap we do not want to set VPO_NOSYNC * if the page is already dirty to prevent data written with * the expectation of being synced from not being synced. * Likewise if this entry does not request NOSYNC then make * sure the page isn't marked NOSYNC. Applications sharing * data should use the same flags to avoid ping ponging. - * - * Also tell the backing pager, if any, that it should remove - * any swap backing since the page is now dirty. */ if (fs.entry->eflags & MAP_ENTRY_NOSYNC) { if (fs.m->dirty == 0) @@ -877,7 +870,17 @@ vnode_locked: } else { fs.m->oflags &= ~VPO_NOSYNC; } - if (fault_flags & VM_FAULT_DIRTY) { + + /* + * If the fault is a write, we know that this page is being + * written NOW so dirty it explicitly to save on + * pmap_is_modified() calls later. + * + * Also tell the backing pager, if any, that it should remove + * any swap backing since the page is now dirty. + */ + if ((fault_type & VM_PROT_WRITE) != 0 && + (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) { vm_page_dirty(fs.m); vm_pager_page_unswapped(fs.m); } diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h index dbf2c67..9630845 100644 --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -319,7 +319,6 @@ long vmspace_wired_count(struct vmspace *vmspace); */ #define VM_FAULT_NORMAL 0 /* Nothing special */ #define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */ -#define VM_FAULT_DIRTY 8 /* Dirty the page */ /* * The following "find_space" options are supported by vm_map_find() -- cgit v1.1