From 899ab645146d3b9d10334951eb65770773ed3630 Mon Sep 17 00:00:00 2001 From: attilio Date: Mon, 5 Aug 2013 08:55:35 +0000 Subject: Revert r253939: We cannot busy a page before doing pagefaults. Infact, it can deadlock against vnode lock, as it tries to vget(). Other functions, right now, have an opposite lock ordering, like vm_object_sync(), which acquires the vnode lock first and then sleeps on the busy mechanism. Before this patch is reinserted we need to break this ordering. Sponsored by: EMC / Isilon storage division Reported by: kib --- sys/kern/imgact_elf.c | 6 +++--- sys/kern/kern_exec.c | 4 ++-- sys/kern/sys_process.c | 11 +++++------ 3 files changed, 10 insertions(+), 11 deletions(-) (limited to 'sys/kern') diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index 9533635..61a2aef 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -378,7 +378,7 @@ __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, off = offset - trunc_page(offset); error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, end - start); - vm_imgact_unmap_page(object, sf); + vm_imgact_unmap_page(sf); if (error) { return (KERN_FAILURE); } @@ -433,7 +433,7 @@ __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, sz = PAGE_SIZE - off; error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, sz); - vm_imgact_unmap_page(object, sf); + vm_imgact_unmap_page(sf); if (error) { return (KERN_FAILURE); } @@ -553,7 +553,7 @@ __elfN(load_section)(struct image_params *imgp, vm_offset_t offset, trunc_page(offset + filsz); error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)map_addr, copy_len); - vm_imgact_unmap_page(object, sf); + vm_imgact_unmap_page(sf); if (error) { return (error); } diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 0c8b0c1..c0e1435 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -973,7 +973,7 @@ exec_map_first_page(imgp) vm_page_wakeup(ma[0]); } vm_page_lock(ma[0]); - vm_page_wire(ma[0]); + vm_page_hold(ma[0]); vm_page_unlock(ma[0]); VM_OBJECT_WUNLOCK(object); @@ -994,7 +994,7 @@ exec_unmap_first_page(imgp) sf_buf_free(imgp->firstpage); imgp->firstpage = NULL; vm_page_lock(m); - vm_page_unwire(m, 0); + vm_page_unhold(m); vm_page_unlock(m); } } diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c index 33fac71..5508dcf 100644 --- a/sys/kern/sys_process.c +++ b/sys/kern/sys_process.c @@ -263,7 +263,6 @@ proc_rwmem(struct proc *p, struct uio *uio) writing = uio->uio_rw == UIO_WRITE; reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; - fault_flags |= VM_FAULT_IOBUSY; /* * Only map in one page at a time. We don't have to, but it @@ -288,9 +287,9 @@ proc_rwmem(struct proc *p, struct uio *uio) len = min(PAGE_SIZE - page_offset, uio->uio_resid); /* - * Fault and busy the page on behalf of the process. + * Fault and hold the page on behalf of the process. */ - error = vm_fault_handle(map, pageno, reqprot, fault_flags, &m); + error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m); if (error != KERN_SUCCESS) { if (error == KERN_RESOURCE_SHORTAGE) error = ENOMEM; @@ -316,9 +315,9 @@ proc_rwmem(struct proc *p, struct uio *uio) /* * Release the page. */ - VM_OBJECT_WLOCK(m->object); - vm_page_io_finish(m); - VM_OBJECT_WUNLOCK(m->object); + vm_page_lock(m); + vm_page_unhold(m); + vm_page_unlock(m); } while (error == 0 && uio->uio_resid > 0); -- cgit v1.1