summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-06-10 16:56:35 +0000
committeralc <alc@FreeBSD.org>2010-06-10 16:56:35 +0000
commit7c212e010d5269026628a1e2e686c89679c23af8 (patch)
tree2e32fd87e6b7bde5898f2e0cb503e19a50ea5f7b /sys/i386
parentc2cccc78fe9cc4c3e971806f8c5ce77d16a4bd39 (diff)
downloadFreeBSD-src-7c212e010d5269026628a1e2e686c89679c23af8.zip
FreeBSD-src-7c212e010d5269026628a1e2e686c89679c23af8.tar.gz
Reduce the scope of the page queues lock and the number of
PG_REFERENCED changes in vm_pageout_object_deactivate_pages(). Simplify this function's inner loop using TAILQ_FOREACH(), and shorten some of its overly long lines. Update a stale comment. Assert that PG_REFERENCED may be cleared only if the object containing the page is locked. Add a comment documenting this. Assert that a caller to vm_page_requeue() holds the page queues lock, and assert that the page is on a page queue. Push down the page queues lock into pmap_ts_referenced() and pmap_page_exists_quick(). (As of now, there are no longer any pmap functions that expect to be called with the page queues lock held.) Neither pmap_ts_referenced() nor pmap_page_exists_quick() should ever be passed an unmanaged page. Assert this rather than returning "0" and "FALSE" respectively. ARM: Simplify pmap_page_exists_quick() by switching to TAILQ_FOREACH(). Push down the page queues lock inside of pmap_clearbit(), simplifying pmap_clear_modify(), pmap_clear_reference(), and pmap_remove_write(). Additionally, this allows for avoiding the acquisition of the page queues lock in some cases. PowerPC/AIM: moea*_page_exits_quick() and moea*_page_wired_mappings() will never be called before pmap initialization is complete. Therefore, the check for moea_initialized can be eliminated. Push down the page queues lock inside of moea*_clear_bit(), simplifying moea*_clear_modify() and moea*_clear_reference(). The last parameter to moea*_clear_bit() is never used. Eliminate it. PowerPC/BookE: Simplify mmu_booke_page_exists_quick()'s control flow. Reviewed by: kib@
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/pmap.c32
-rw-r--r--sys/i386/xen/pmap.c22
2 files changed, 32 insertions, 22 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 68c5edc..b740834 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -4061,30 +4061,35 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
struct md_page *pvh;
pv_entry_t pv;
int loops = 0;
+ boolean_t rv;
- if (m->flags & PG_FICTITIOUS)
- return (FALSE);
-
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_page_exists_quick: page %p is not managed", m));
+ rv = FALSE;
+ vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
if (PV_PMAP(pv) == pmap) {
- return (TRUE);
+ rv = TRUE;
+ break;
}
loops++;
if (loops >= 16)
break;
}
- if (loops < 16) {
+ if (!rv && loops < 16) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
- if (PV_PMAP(pv) == pmap)
- return (TRUE);
+ if (PV_PMAP(pv) == pmap) {
+ rv = TRUE;
+ break;
+ }
loops++;
if (loops >= 16)
break;
}
}
- return (FALSE);
+ vm_page_unlock_queues();
+ return (rv);
}
/*
@@ -4512,11 +4517,11 @@ pmap_ts_referenced(vm_page_t m)
vm_offset_t va;
int rtval = 0;
- if (m->flags & PG_FICTITIOUS)
- return (rtval);
- sched_pin();
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_ts_referenced: page %p is not managed", m));
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ vm_page_lock_queues();
+ sched_pin();
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
va = pv->pv_va;
pmap = PV_PMAP(pv);
@@ -4571,6 +4576,7 @@ pmap_ts_referenced(vm_page_t m)
}
out:
sched_unpin();
+ vm_page_unlock_queues();
return (rtval);
}
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index 1a3a9d5..d4e02d9 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -3449,20 +3449,23 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
pv_entry_t pv;
int loops = 0;
+ boolean_t rv;
- if (m->flags & PG_FICTITIOUS)
- return (FALSE);
-
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_page_exists_quick: page %p is not managed", m));
+ rv = FALSE;
+ vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
if (PV_PMAP(pv) == pmap) {
- return TRUE;
+ rv = TRUE;
+ break;
}
loops++;
if (loops >= 16)
break;
}
- return (FALSE);
+ vm_page_unlock_queues();
+ return (rv);
}
/*
@@ -3839,10 +3842,10 @@ pmap_ts_referenced(vm_page_t m)
pt_entry_t *pte;
int rtval = 0;
- if (m->flags & PG_FICTITIOUS)
- return (rtval);
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_ts_referenced: page %p is not managed", m));
+ vm_page_lock_queues();
sched_pin();
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pvf = pv;
do {
@@ -3867,6 +3870,7 @@ pmap_ts_referenced(vm_page_t m)
PT_SET_MA(PADDR1, 0);
sched_unpin();
+ vm_page_unlock_queues();
return (rtval);
}
OpenPOWER on IntegriCloud