summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_pageout.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2012-07-18 05:21:34 +0000
committeralc <alc@FreeBSD.org>2012-07-18 05:21:34 +0000
commite5949174d4c7e5390bbcdfed87017d0a92326427 (patch)
treeab1f9e2eab50123e75b91ebed76bc286186d79b8 /sys/vm/vm_pageout.c
parent1a9d4d87e6ea943994c086e437518bbcb31d100a (diff)
downloadFreeBSD-src-e5949174d4c7e5390bbcdfed87017d0a92326427.zip
FreeBSD-src-e5949174d4c7e5390bbcdfed87017d0a92326427.tar.gz
Move what remains of vm/vm_contig.c into vm/vm_pageout.c, where similar
code resides. Rename vm_contig_grow_cache() to vm_pageout_grow_cache(). Reviewed by: kib
Diffstat (limited to 'sys/vm/vm_pageout.c')
-rw-r--r--sys/vm/vm_pageout.c139
1 files changed, 137 insertions, 2 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 11d040d..05bbbab 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -209,11 +209,14 @@ int vm_page_max_wired; /* XXX max # of wired pages system-wide */
SYSCTL_INT(_vm, OID_AUTO, max_wired,
CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
+static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
+static boolean_t vm_pageout_launder(int, int, vm_paddr_t, vm_paddr_t);
#if !defined(NO_SWAPPING)
static void vm_pageout_map_deactivate_pages(vm_map_t, long);
static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long);
static void vm_req_vmdaemon(int req);
#endif
+static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
static void vm_pageout_page_stats(void);
/*
@@ -247,7 +250,7 @@ vm_pageout_init_marker(vm_page_t marker, u_short queue)
* This function depends on both the lock portion of struct vm_object
* and normal struct vm_page being type stable.
*/
-boolean_t
+static boolean_t
vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
{
struct vm_page marker;
@@ -286,7 +289,7 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
*
* This function depends on normal struct vm_page being type stable.
*/
-boolean_t
+static boolean_t
vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
{
struct vm_page marker;
@@ -558,6 +561,138 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
return (numpagedout);
}
+static boolean_t
+vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
+{
+ struct mount *mp;
+ struct vnode *vp;
+ vm_object_t object;
+ vm_paddr_t pa;
+ vm_page_t m, m_tmp, next;
+ int vfslocked;
+
+ vm_page_lock_queues();
+ TAILQ_FOREACH_SAFE(m, &vm_page_queues[queue].pl, pageq, next) {
+ KASSERT(m->queue == queue,
+ ("vm_pageout_launder: page %p's queue is not %d", m,
+ queue));
+ if ((m->flags & PG_MARKER) != 0)
+ continue;
+ pa = VM_PAGE_TO_PHYS(m);
+ if (pa < low || pa + PAGE_SIZE > high)
+ continue;
+ if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
+ vm_page_unlock(m);
+ continue;
+ }
+ object = m->object;
+ if (!VM_OBJECT_TRYLOCK(object) &&
+ (!vm_pageout_fallback_object_lock(m, &next) ||
+ m->hold_count != 0)) {
+ vm_page_unlock(m);
+ VM_OBJECT_UNLOCK(object);
+ continue;
+ }
+ if ((m->oflags & VPO_BUSY) != 0 || m->busy != 0) {
+ if (tries == 0) {
+ vm_page_unlock(m);
+ VM_OBJECT_UNLOCK(object);
+ continue;
+ }
+ vm_page_sleep(m, "vpctw0");
+ VM_OBJECT_UNLOCK(object);
+ return (FALSE);
+ }
+ vm_page_test_dirty(m);
+ if (m->dirty == 0)
+ pmap_remove_all(m);
+ if (m->dirty != 0) {
+ vm_page_unlock(m);
+ if (tries == 0 || (object->flags & OBJ_DEAD) != 0) {
+ VM_OBJECT_UNLOCK(object);
+ continue;
+ }
+ if (object->type == OBJT_VNODE) {
+ vm_page_unlock_queues();
+ vp = object->handle;
+ vm_object_reference_locked(object);
+ VM_OBJECT_UNLOCK(object);
+ (void)vn_start_write(vp, &mp, V_WAIT);
+ vfslocked = VFS_LOCK_GIANT(vp->v_mount);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
+ VM_OBJECT_LOCK(object);
+ vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
+ VM_OBJECT_UNLOCK(object);
+ VOP_UNLOCK(vp, 0);
+ VFS_UNLOCK_GIANT(vfslocked);
+ vm_object_deallocate(object);
+ vn_finished_write(mp);
+ return (TRUE);
+ } else if (object->type == OBJT_SWAP ||
+ object->type == OBJT_DEFAULT) {
+ vm_page_unlock_queues();
+ m_tmp = m;
+ vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC,
+ 0, NULL, NULL);
+ VM_OBJECT_UNLOCK(object);
+ return (TRUE);
+ }
+ } else {
+ vm_page_cache(m);
+ vm_page_unlock(m);
+ }
+ VM_OBJECT_UNLOCK(object);
+ }
+ vm_page_unlock_queues();
+ return (FALSE);
+}
+
+/*
+ * Increase the number of cached pages. The specified value, "tries",
+ * determines which categories of pages are cached:
+ *
+ * 0: All clean, inactive pages within the specified physical address range
+ * are cached. Will not sleep.
+ * 1: The vm_lowmem handlers are called. All inactive pages within
+ * the specified physical address range are cached. May sleep.
+ * 2: The vm_lowmem handlers are called. All inactive and active pages
+ * within the specified physical address range are cached. May sleep.
+ */
+void
+vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high)
+{
+ int actl, actmax, inactl, inactmax;
+
+ if (tries > 0) {
+ /*
+ * Decrease registered cache sizes. The vm_lowmem handlers
+ * may acquire locks and/or sleep, so they can only be invoked
+ * when "tries" is greater than zero.
+ */
+ EVENTHANDLER_INVOKE(vm_lowmem, 0);
+
+ /*
+ * We do this explicitly after the caches have been drained
+ * above.
+ */
+ uma_reclaim();
+ }
+ inactl = 0;
+ inactmax = cnt.v_inactive_count;
+ actl = 0;
+ actmax = tries < 2 ? 0 : cnt.v_active_count;
+again:
+ if (inactl < inactmax && vm_pageout_launder(PQ_INACTIVE, tries, low,
+ high)) {
+ inactl++;
+ goto again;
+ }
+ if (actl < actmax && vm_pageout_launder(PQ_ACTIVE, tries, low, high)) {
+ actl++;
+ goto again;
+ }
+}
+
#if !defined(NO_SWAPPING)
/*
* vm_pageout_object_deactivate_pages
OpenPOWER on IntegriCloud