summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_pageout.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm/vm_pageout.c')
-rw-r--r--sys/vm/vm_pageout.c166
1 files changed, 0 insertions, 166 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 4997c82..b7f6887 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -237,8 +237,6 @@ SYSCTL_INT(_vm, OID_AUTO, max_wired,
CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
-static boolean_t vm_pageout_launder(struct vm_pagequeue *pq, int, vm_paddr_t,
- vm_paddr_t);
#if !defined(NO_SWAPPING)
static void vm_pageout_map_deactivate_pages(vm_map_t, long);
static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long);
@@ -595,170 +593,6 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
return (numpagedout);
}
-static boolean_t
-vm_pageout_launder(struct vm_pagequeue *pq, int tries, vm_paddr_t low,
- vm_paddr_t high)
-{
- struct mount *mp;
- struct vnode *vp;
- vm_object_t object;
- vm_paddr_t pa;
- vm_page_t m, m_tmp, next;
- int lockmode;
-
- vm_pagequeue_lock(pq);
- TAILQ_FOREACH_SAFE(m, &pq->pq_pl, plinks.q, next) {
- if ((m->flags & PG_MARKER) != 0)
- continue;
- pa = VM_PAGE_TO_PHYS(m);
- if (pa < low || pa + PAGE_SIZE > high)
- continue;
- if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
- vm_page_unlock(m);
- continue;
- }
- object = m->object;
- if ((!VM_OBJECT_TRYWLOCK(object) &&
- (!vm_pageout_fallback_object_lock(m, &next) ||
- m->hold_count != 0)) || vm_page_busied(m)) {
- vm_page_unlock(m);
- VM_OBJECT_WUNLOCK(object);
- continue;
- }
- vm_page_test_dirty(m);
- if (m->dirty == 0 && object->ref_count != 0)
- pmap_remove_all(m);
- if (m->dirty != 0) {
- vm_page_unlock(m);
- if (tries == 0 || (object->flags & OBJ_DEAD) != 0) {
- VM_OBJECT_WUNLOCK(object);
- continue;
- }
- if (object->type == OBJT_VNODE) {
- vm_pagequeue_unlock(pq);
- vp = object->handle;
- vm_object_reference_locked(object);
- VM_OBJECT_WUNLOCK(object);
- (void)vn_start_write(vp, &mp, V_WAIT);
- lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
- LK_SHARED : LK_EXCLUSIVE;
- vn_lock(vp, lockmode | LK_RETRY);
- VM_OBJECT_WLOCK(object);
- vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_WUNLOCK(object);
- VOP_UNLOCK(vp, 0);
- vm_object_deallocate(object);
- vn_finished_write(mp);
- return (TRUE);
- } else if (object->type == OBJT_SWAP ||
- object->type == OBJT_DEFAULT) {
- vm_pagequeue_unlock(pq);
- m_tmp = m;
- vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC,
- 0, NULL, NULL);
- VM_OBJECT_WUNLOCK(object);
- return (TRUE);
- }
- } else {
- /*
- * Dequeue here to prevent lock recursion in
- * vm_page_cache().
- */
- vm_page_dequeue_locked(m);
- vm_page_cache(m);
- vm_page_unlock(m);
- }
- VM_OBJECT_WUNLOCK(object);
- }
- vm_pagequeue_unlock(pq);
- return (FALSE);
-}
-
-/*
- * Increase the number of cached pages. The specified value, "tries",
- * determines which categories of pages are cached:
- *
- * 0: All clean, inactive pages within the specified physical address range
- * are cached. Will not sleep.
- * 1: The vm_lowmem handlers are called. All inactive pages within
- * the specified physical address range are cached. May sleep.
- * 2: The vm_lowmem handlers are called. All inactive and active pages
- * within the specified physical address range are cached. May sleep.
- */
-void
-vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high)
-{
- int actl, actmax, inactl, inactmax, dom, initial_dom;
- static int start_dom = 0;
-
- if (tries > 0) {
- /*
- * Decrease registered cache sizes. The vm_lowmem handlers
- * may acquire locks and/or sleep, so they can only be invoked
- * when "tries" is greater than zero.
- */
- SDT_PROBE0(vm, , , vm__lowmem_cache);
- EVENTHANDLER_INVOKE(vm_lowmem, 0);
-
- /*
- * We do this explicitly after the caches have been drained
- * above.
- */
- uma_reclaim();
- }
-
- /*
- * Make the next scan start on the next domain.
- */
- initial_dom = atomic_fetchadd_int(&start_dom, 1) % vm_ndomains;
-
- inactl = 0;
- inactmax = vm_cnt.v_inactive_count;
- actl = 0;
- actmax = tries < 2 ? 0 : vm_cnt.v_active_count;
- dom = initial_dom;
-
- /*
- * Scan domains in round-robin order, first inactive queues,
- * then active. Since domain usually owns large physically
- * contiguous chunk of memory, it makes sense to completely
- * exhaust one domain before switching to next, while growing
- * the pool of contiguous physical pages.
- *
- * Do not even start launder a domain which cannot contain
- * the specified address range, as indicated by segments
- * constituting the domain.
- */
-again_inact:
- if (inactl < inactmax) {
- if (vm_phys_domain_intersects(vm_dom[dom].vmd_segs,
- low, high) &&
- vm_pageout_launder(&vm_dom[dom].vmd_pagequeues[PQ_INACTIVE],
- tries, low, high)) {
- inactl++;
- goto again_inact;
- }
- if (++dom == vm_ndomains)
- dom = 0;
- if (dom != initial_dom)
- goto again_inact;
- }
-again_act:
- if (actl < actmax) {
- if (vm_phys_domain_intersects(vm_dom[dom].vmd_segs,
- low, high) &&
- vm_pageout_launder(&vm_dom[dom].vmd_pagequeues[PQ_ACTIVE],
- tries, low, high)) {
- actl++;
- goto again_act;
- }
- if (++dom == vm_ndomains)
- dom = 0;
- if (dom != initial_dom)
- goto again_act;
- }
-}
-
#if !defined(NO_SWAPPING)
/*
* vm_pageout_object_deactivate_pages
OpenPOWER on IntegriCloud