summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-08-05 08:55:35 +0000
committerattilio <attilio@FreeBSD.org>2013-08-05 08:55:35 +0000
commit899ab645146d3b9d10334951eb65770773ed3630 (patch)
tree668bc05fd597966d485deb8ea05c427ee434fde6 /sys/vm
parent05101f7501be1dd160855b1f31fcbe7c123817ec (diff)
downloadFreeBSD-src-899ab645146d3b9d10334951eb65770773ed3630.zip
FreeBSD-src-899ab645146d3b9d10334951eb65770773ed3630.tar.gz
Revert r253939:
We cannot busy a page before doing pagefaults. Infact, it can deadlock against vnode lock, as it tries to vget(). Other functions, right now, have an opposite lock ordering, like vm_object_sync(), which acquires the vnode lock first and then sleeps on the busy mechanism. Before this patch is reinserted we need to break this ordering. Sponsored by: EMC / Isilon storage division Reported by: kib
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_extern.h4
-rw-r--r--sys/vm/vm_fault.c13
-rw-r--r--sys/vm/vm_glue.c16
-rw-r--r--sys/vm/vm_map.h1
4 files changed, 16 insertions, 18 deletions
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 270426f..4a2dc04 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -63,7 +63,7 @@ void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t,
vm_ooffset_t *);
int vm_fault_disable_pagefaults(void);
void vm_fault_enable_pagefaults(int save);
-int vm_fault_handle(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
+int vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
int fault_flags, vm_page_t *m_hold);
int vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
vm_prot_t prot, vm_page_t *ma, int max_count);
@@ -87,7 +87,7 @@ void vnode_pager_setsize(struct vnode *, vm_ooffset_t);
int vslock(void *, size_t);
void vsunlock(void *, size_t);
struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset);
-void vm_imgact_unmap_page(vm_object_t, struct sf_buf *sf);
+void vm_imgact_unmap_page(struct sf_buf *sf);
void vm_thread_dispose(struct thread *td);
int vm_thread_new(struct thread *td, int pages);
int vm_mlock(struct proc *, struct ucred *, const void *, size_t);
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index b482709..f7f1889 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -221,8 +221,8 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
if (map != kernel_map && KTRPOINT(td, KTR_FAULT))
ktrfault(vaddr, fault_type);
#endif
- result = vm_fault_handle(map, trunc_page(vaddr), fault_type,
- fault_flags, NULL);
+ result = vm_fault_hold(map, trunc_page(vaddr), fault_type, fault_flags,
+ NULL);
#ifdef KTRACE
if (map != kernel_map && KTRPOINT(td, KTR_FAULTEND))
ktrfaultend(result);
@@ -231,7 +231,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
}
int
-vm_fault_handle(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
+vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
int fault_flags, vm_page_t *m_hold)
{
vm_prot_t prot;
@@ -943,10 +943,7 @@ vnode_locked:
vm_page_activate(fs.m);
if (m_hold != NULL) {
*m_hold = fs.m;
- if (fault_flags & VM_FAULT_IOBUSY)
- vm_page_io_start(fs.m);
- else
- vm_page_hold(fs.m);
+ vm_page_hold(fs.m);
}
vm_page_unlock(fs.m);
vm_page_wakeup(fs.m);
@@ -1148,7 +1145,7 @@ vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
* and hold these pages.
*/
for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE)
- if (*mp == NULL && vm_fault_handle(map, va, prot,
+ if (*mp == NULL && vm_fault_hold(map, va, prot,
VM_FAULT_NORMAL, mp) != KERN_SUCCESS)
goto error;
}
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 445a24a..948e2b3 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -223,7 +223,7 @@ vsunlock(void *addr, size_t len)
* Return the pinned page if successful; otherwise, return NULL.
*/
static vm_page_t
-vm_imgact_page_iostart(vm_object_t object, vm_ooffset_t offset)
+vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
{
vm_page_t m, ma[1];
vm_pindex_t pindex;
@@ -249,7 +249,9 @@ vm_imgact_page_iostart(vm_object_t object, vm_ooffset_t offset)
}
vm_page_wakeup(m);
}
- vm_page_io_start(m);
+ vm_page_lock(m);
+ vm_page_hold(m);
+ vm_page_unlock(m);
out:
VM_OBJECT_WUNLOCK(object);
return (m);
@@ -264,7 +266,7 @@ vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
{
vm_page_t m;
- m = vm_imgact_page_iostart(object, offset);
+ m = vm_imgact_hold_page(object, offset);
if (m == NULL)
return (NULL);
sched_pin();
@@ -275,16 +277,16 @@ vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
* Destroy the given CPU private mapping and unpin the page that it mapped.
*/
void
-vm_imgact_unmap_page(vm_object_t object, struct sf_buf *sf)
+vm_imgact_unmap_page(struct sf_buf *sf)
{
vm_page_t m;
m = sf_buf_page(sf);
sf_buf_free(sf);
sched_unpin();
- VM_OBJECT_WLOCK(object);
- vm_page_io_finish(m);
- VM_OBJECT_WUNLOCK(object);
+ vm_page_lock(m);
+ vm_page_unhold(m);
+ vm_page_unlock(m);
}
void
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index cdd9b87..824a9a0 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -329,7 +329,6 @@ long vmspace_resident_count(struct vmspace *vmspace);
#define VM_FAULT_NORMAL 0 /* Nothing special */
#define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */
#define VM_FAULT_DIRTY 2 /* Dirty the page; use w/VM_PROT_COPY */
-#define VM_FAULT_IOBUSY 4 /* Busy the faulted page */
/*
* Initially, mappings are slightly sequential. The maximum window size must
OpenPOWER on IntegriCloud