summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authormarkj <markj@FreeBSD.org>2015-09-30 23:06:29 +0000
committermarkj <markj@FreeBSD.org>2015-09-30 23:06:29 +0000
commit6348241c12da94254c6b2797b8b3cedaf6642db1 (patch)
tree1e2b61f35c22941061c31586e1d027222bdcbb8a /sys/vm
parentaca221db9d8ee973f5ca86a03adfddf468891409 (diff)
downloadFreeBSD-src-6348241c12da94254c6b2797b8b3cedaf6642db1.zip
FreeBSD-src-6348241c12da94254c6b2797b8b3cedaf6642db1.tar.gz
As a step towards the elimination of PG_CACHED pages, rework the handling
of POSIX_FADV_DONTNEED so that it causes the backing pages to be moved to the head of the inactive queue instead of being cached. This affects the implementation of POSIX_FADV_NOREUSE as well, since it works by applying POSIX_FADV_DONTNEED to file ranges after they have been read or written. At that point the corresponding buffers may still be dirty, so the previous implementation would coalesce successive ranges and apply POSIX_FADV_DONTNEED to the result, ensuring that pages backing the dirty buffers would eventually be cached. To preserve this behaviour in an efficient manner, this change adds a new buf flag, B_NOREUSE, which causes the pages backing a VMIO buf to be placed at the head of the inactive queue when the buf is released. POSIX_FADV_NOREUSE then works by setting this flag in bufs that underlie the specified range. Reviewed by: alc, kib Sponsored by: EMC / Isilon Storage Division Differential Revision: https://reviews.freebsd.org/D3726
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_object.c22
-rw-r--r--sys/vm/vm_object.h4
-rw-r--r--sys/vm/vm_page.c16
-rw-r--r--sys/vm/vm_page.h1
4 files changed, 28 insertions, 15 deletions
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index a4aac95..0a3c2ef 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -1963,15 +1963,15 @@ skipmemq:
}
/*
- * vm_object_page_cache:
+ * vm_object_page_noreuse:
*
- * For the given object, attempt to move the specified clean
- * pages to the cache queue. If a page is wired for any reason,
- * then it will not be changed. Pages are specified by the given
- * range ["start", "end"). As a special case, if "end" is zero,
- * then the range extends from "start" to the end of the object.
- * Any mappings to the specified pages are removed before the
- * pages are moved to the cache queue.
+ * For the given object, attempt to move the specified pages to
+ * the head of the inactive queue. This bypasses regular LRU
+ * operation and allows the pages to be reused quickly under memory
+ * pressure. If a page is wired for any reason, then it will not
+ * be queued. Pages are specified by the range ["start", "end").
+ * As a special case, if "end" is zero, then the range extends from
+ * "start" to the end of the object.
*
* This operation should only be performed on objects that
* contain non-fictitious, managed pages.
@@ -1979,14 +1979,14 @@ skipmemq:
* The object must be locked.
*/
void
-vm_object_page_cache(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
+vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
{
struct mtx *mtx, *new_mtx;
vm_page_t p, next;
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
- ("vm_object_page_cache: illegal object %p", object));
+ ("vm_object_page_noreuse: illegal object %p", object));
if (object->resident_page_count == 0)
return;
p = vm_page_find_least(object, start);
@@ -2009,7 +2009,7 @@ vm_object_page_cache(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
mtx = new_mtx;
mtx_lock(mtx);
}
- vm_page_try_to_cache(p);
+ vm_page_deactivate_noreuse(p);
}
if (mtx != NULL)
mtx_unlock(mtx);
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 7e433ae..894a8d5 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -304,10 +304,10 @@ void vm_object_terminate (vm_object_t);
void vm_object_set_writeable_dirty (vm_object_t);
void vm_object_init (void);
void vm_object_madvise(vm_object_t, vm_pindex_t, vm_pindex_t, int);
-void vm_object_page_cache(vm_object_t object, vm_pindex_t start,
- vm_pindex_t end);
boolean_t vm_object_page_clean(vm_object_t object, vm_ooffset_t start,
vm_ooffset_t end, int flags);
+void vm_object_page_noreuse(vm_object_t object, vm_pindex_t start,
+ vm_pindex_t end);
void vm_object_page_remove(vm_object_t object, vm_pindex_t start,
vm_pindex_t end, int options);
boolean_t vm_object_populate(vm_object_t, vm_pindex_t, vm_pindex_t);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 2aaddfb..a3a9a10 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -2589,6 +2589,19 @@ vm_page_deactivate(vm_page_t m)
}
/*
+ * Move the specified page to the inactive queue with the expectation
+ * that it is unlikely to be reused.
+ *
+ * The page must be locked.
+ */
+void
+vm_page_deactivate_noreuse(vm_page_t m)
+{
+
+ _vm_page_deactivate(m, 1);
+}
+
+/*
* vm_page_try_to_cache:
*
* Returns 0 on failure, 1 on success
@@ -2740,8 +2753,7 @@ vm_page_cache(vm_page_t m)
/*
* vm_page_advise
*
- * Deactivate or do nothing, as appropriate. This routine is used
- * by madvise() and vop_stdadvise().
+ * Deactivate or do nothing, as appropriate.
*
* The object and page must be locked.
*/
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index fd7d3f4..dedd6ac 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -451,6 +451,7 @@ void vm_page_cache_transfer(vm_object_t, vm_pindex_t, vm_object_t);
int vm_page_try_to_cache (vm_page_t);
int vm_page_try_to_free (vm_page_t);
void vm_page_deactivate (vm_page_t);
+void vm_page_deactivate_noreuse(vm_page_t);
void vm_page_dequeue(vm_page_t m);
void vm_page_dequeue_locked(vm_page_t m);
vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
OpenPOWER on IntegriCloud