summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_glue.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-08-04 21:07:24 +0000
committerattilio <attilio@FreeBSD.org>2013-08-04 21:07:24 +0000
commit19b2ea9f815db5f4ef1071a79ee4f27a2a444a3f (patch)
tree66d78aa520f99833b11e6eca180f8fa7216b21f8 /sys/vm/vm_glue.c
parenta56cdf0d3470151f45842530ddf3cadef1d2819c (diff)
downloadFreeBSD-src-19b2ea9f815db5f4ef1071a79ee4f27a2a444a3f.zip
FreeBSD-src-19b2ea9f815db5f4ef1071a79ee4f27a2a444a3f.tar.gz
The page hold mechanism is fast but it has couple of fallouts:
- It does not let pages respect the LRU policy - It bloats the active/inactive queues of few pages Try to avoid it as much as possible with the long-term target to completely remove it. Use the soft-busy mechanism to protect page content accesses during short-term operations (like uiomove_fromphys()). After this change only vm_fault_quick_hold_pages() is still using the hold mechanism for page content access. There is an additional complexity there as the quick path cannot immediately access the page object to busy the page and the slow path cannot however busy more than one page a time (to avoid deadlocks). Fixing such primitive can bring to complete removal of the page hold mechanism. Sponsored by: EMC / Isilon storage division Discussed with: alc Reviewed by: jeff Tested by: pho
Diffstat (limited to 'sys/vm/vm_glue.c')
-rw-r--r--sys/vm/vm_glue.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 948e2b3..445a24a 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -223,7 +223,7 @@ vsunlock(void *addr, size_t len)
* Return the pinned page if successful; otherwise, return NULL.
*/
static vm_page_t
-vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
+vm_imgact_page_iostart(vm_object_t object, vm_ooffset_t offset)
{
vm_page_t m, ma[1];
vm_pindex_t pindex;
@@ -249,9 +249,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
}
vm_page_wakeup(m);
}
- vm_page_lock(m);
- vm_page_hold(m);
- vm_page_unlock(m);
+ vm_page_io_start(m);
out:
VM_OBJECT_WUNLOCK(object);
return (m);
@@ -266,7 +264,7 @@ vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
{
vm_page_t m;
- m = vm_imgact_hold_page(object, offset);
+ m = vm_imgact_page_iostart(object, offset);
if (m == NULL)
return (NULL);
sched_pin();
@@ -277,16 +275,16 @@ vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
* Destroy the given CPU private mapping and unpin the page that it mapped.
*/
void
-vm_imgact_unmap_page(struct sf_buf *sf)
+vm_imgact_unmap_page(vm_object_t object, struct sf_buf *sf)
{
vm_page_t m;
m = sf_buf_page(sf);
sf_buf_free(sf);
sched_unpin();
- vm_page_lock(m);
- vm_page_unhold(m);
- vm_page_unlock(m);
+ VM_OBJECT_WLOCK(object);
+ vm_page_io_finish(m);
+ VM_OBJECT_WUNLOCK(object);
}
void
OpenPOWER on IntegriCloud