summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-05-26 18:00:44 +0000
committeralc <alc@FreeBSD.org>2010-05-26 18:00:44 +0000
commit3f1d4b057cf7217e3fe56dcd1bd29db76406ae5f (patch)
tree95cb060d8736de18caf3932cd92f06f852f69659 /sys/i386
parent8dd88ee72437269f6edd3612fb57c46b8adfc020 (diff)
downloadFreeBSD-src-3f1d4b057cf7217e3fe56dcd1bd29db76406ae5f.zip
FreeBSD-src-3f1d4b057cf7217e3fe56dcd1bd29db76406ae5f.tar.gz
Push down page queues lock acquisition in pmap_enter_object() and
pmap_is_referenced(). Eliminate the corresponding page queues lock acquisitions from vm_map_pmap_enter() and mincore(), respectively. In mincore(), this allows some additional cases to complete without ever acquiring the page queues lock. Assert that the page is managed in pmap_is_referenced(). On powerpc/aim, push down the page queues lock acquisition from moea*_is_modified() and moea*_is_referenced() into moea*_query_bit(). Again, this will allow some additional cases to complete without ever acquiring the page queues lock. Reorder a few statements in vm_page_dontneed() so that a race can't lead to an old reference persisting. This scenario is described in detail by a comment. Correct a spelling error in vm_page_dontneed(). Assert that the object is locked in vm_page_clear_dirty(), and restrict the page queues lock assertion to just those cases in which the page is currently writeable. Add object locking to vnode_pager_generic_putpages(). This was the one and only place where vm_page_clear_dirty() was being called without the object being locked. Eliminate an unnecessary vm_page_lock() around vnode_pager_setsize()'s call to vm_page_clear_dirty(). Change vnode_pager_generic_putpages() to the modern-style of function definition. Also, change the name of one of the parameters to follow virtual memory system naming conventions. Reviewed by: kib
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/pmap.c15
-rw-r--r--sys/i386/xen/pmap.c10
2 files changed, 16 insertions, 9 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index d20c15b..2af5598 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -3519,6 +3519,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
psize = atop(end - start);
mpte = NULL;
m = m_start;
+ vm_page_lock_queues();
PMAP_LOCK(pmap);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
va = start + ptoa(diff);
@@ -3532,6 +3533,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
mpte);
m = TAILQ_NEXT(m, listq);
}
+ vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@@ -4377,12 +4379,15 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
boolean_t
pmap_is_referenced(vm_page_t m)
{
+ boolean_t rv;
- if (m->flags & PG_FICTITIOUS)
- return (FALSE);
- if (pmap_is_referenced_pvh(&m->md))
- return (TRUE);
- return (pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_is_referenced: page %p is not managed", m));
+ vm_page_lock_queues();
+ rv = pmap_is_referenced_pvh(&m->md) ||
+ pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
+ vm_page_unlock_queues();
+ return (rv);
}
/*
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index 5e04680..7be7b52 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -2901,6 +2901,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
mpte = NULL;
m = m_start;
+ vm_page_lock_queues();
PMAP_LOCK(pmap);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
mpte = pmap_enter_quick_locked(&mclp, &count, pmap, start + ptoa(diff), m,
@@ -2917,7 +2918,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
error = HYPERVISOR_multicall(mcl, count);
KASSERT(error == 0, ("bad multicall %d", error));
}
-
+ vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@@ -3734,11 +3735,11 @@ pmap_is_referenced(vm_page_t m)
pmap_t pmap;
boolean_t rv;
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_is_referenced: page %p is not managed", m));
rv = FALSE;
- if (m->flags & PG_FICTITIOUS)
- return (rv);
+ vm_page_lock_queues();
sched_pin();
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
@@ -3751,6 +3752,7 @@ pmap_is_referenced(vm_page_t m)
if (*PMAP1)
PT_SET_MA(PADDR1, 0);
sched_unpin();
+ vm_page_unlock_queues();
return (rv);
}
OpenPOWER on IntegriCloud