From 05e051bfe3217c74f0b53af172f01b0a6d902a04 Mon Sep 17 00:00:00 2001 From: markj Date: Fri, 13 Nov 2015 02:16:08 +0000 Subject: MFC r287235: Remove weighted page handling from vm_page_advise(). --- sys/vm/vm_page.c | 79 ++++++++++++++++++-------------------------------------- 1 file changed, 25 insertions(+), 54 deletions(-) (limited to 'sys/vm') diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 95bf6ca..243b11f 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -2430,19 +2430,26 @@ _vm_page_deactivate(vm_page_t m, int athead) struct vm_pagequeue *pq; int queue; - vm_page_lock_assert(m, MA_OWNED); + vm_page_assert_locked(m); /* - * Ignore if already inactive. + * Ignore if the page is already inactive, unless it is unlikely to be + * reactivated. */ - if ((queue = m->queue) == PQ_INACTIVE) + if ((queue = m->queue) == PQ_INACTIVE && !athead) return; if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { - if (queue != PQ_NONE) - vm_page_dequeue(m); - m->flags &= ~PG_WINATCFLS; pq = &vm_phys_domain(m)->vmd_pagequeues[PQ_INACTIVE]; - vm_pagequeue_lock(pq); + /* Avoid multiple acquisitions of the inactive queue lock. */ + if (queue == PQ_INACTIVE) { + vm_pagequeue_lock(pq); + vm_page_dequeue_locked(m); + } else { + if (queue != PQ_NONE) + vm_page_dequeue(m); + m->flags &= ~PG_WINATCFLS; + vm_pagequeue_lock(pq); + } m->queue = PQ_INACTIVE; if (athead) TAILQ_INSERT_HEAD(&pq->pq_pl, m, plinks.q); @@ -2618,34 +2625,18 @@ vm_page_cache(vm_page_t m) /* * vm_page_advise * - * Cache, deactivate, or do nothing as appropriate. This routine - * is used by madvise(). - * - * Generally speaking we want to move the page into the cache so - * it gets reused quickly. However, this can result in a silly syndrome - * due to the page recycling too quickly. Small objects will not be - * fully cached. On the other hand, if we move the page to the inactive - * queue we wind up with a problem whereby very large objects - * unnecessarily blow away our inactive and cache queues. - * - * The solution is to move the pages based on a fixed weighting. We - * either leave them alone, deactivate them, or move them to the cache, - * where moving them to the cache has the highest weighting. - * By forcing some pages into other queues we eventually force the - * system to balance the queues, potentially recovering other unrelated - * space from active. The idea is to not force this to happen too - * often. + * Deactivate or do nothing, as appropriate. This routine is used + * by madvise() and vop_stdadvise(). * * The object and page must be locked. */ void vm_page_advise(vm_page_t m, int advice) { - int dnw, head; vm_page_assert_locked(m); VM_OBJECT_ASSERT_WLOCKED(m->object); - if (advice == MADV_FREE) { + if (advice == MADV_FREE) /* * Mark the page clean. This will allow the page to be freed * up by the system. However, such pages are often reused @@ -2656,24 +2647,12 @@ vm_page_advise(vm_page_t m, int advice) * nor do we try to put it in the cache (which would cause a * page fault on reuse). * - * But we do make the page is freeable as we can without + * But we do make the page as freeable as we can without * actually taking the step of unmapping it. */ m->dirty = 0; - m->act_count = 0; - } else if (advice != MADV_DONTNEED) + else if (advice != MADV_DONTNEED) return; - dnw = PCPU_GET(dnweight); - PCPU_INC(dnweight); - - /* - * Occasionally leave the page alone. - */ - if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) { - if (m->act_count >= ACT_INIT) - --m->act_count; - return; - } /* * Clear any references to the page. Otherwise, the page daemon will @@ -2684,20 +2663,12 @@ vm_page_advise(vm_page_t m, int advice) if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m)) vm_page_dirty(m); - if (m->dirty || (dnw & 0x0070) == 0) { - /* - * Deactivate the page 3 times out of 32. - */ - head = 0; - } else { - /* - * Cache the page 28 times out of every 32. Note that - * the page is deactivated instead of cached, but placed - * at the head of the queue instead of the tail. - */ - head = 1; - } - _vm_page_deactivate(m, head); + /* + * Place clean pages at the head of the inactive queue rather than the + * tail, thus defeating the queue's LRU operation and ensuring that the + * page will be reused quickly. + */ + _vm_page_deactivate(m, m->dirty == 0); } /* -- cgit v1.1