summaryrefslogtreecommitdiffstats
path: root/sys/ia64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-06-10 16:56:35 +0000
committeralc <alc@FreeBSD.org>2010-06-10 16:56:35 +0000
commit7c212e010d5269026628a1e2e686c89679c23af8 (patch)
tree2e32fd87e6b7bde5898f2e0cb503e19a50ea5f7b /sys/ia64
parentc2cccc78fe9cc4c3e971806f8c5ce77d16a4bd39 (diff)
downloadFreeBSD-src-7c212e010d5269026628a1e2e686c89679c23af8.zip
FreeBSD-src-7c212e010d5269026628a1e2e686c89679c23af8.tar.gz
Reduce the scope of the page queues lock and the number of
PG_REFERENCED changes in vm_pageout_object_deactivate_pages(). Simplify this function's inner loop using TAILQ_FOREACH(), and shorten some of its overly long lines. Update a stale comment. Assert that PG_REFERENCED may be cleared only if the object containing the page is locked. Add a comment documenting this. Assert that a caller to vm_page_requeue() holds the page queues lock, and assert that the page is on a page queue. Push down the page queues lock into pmap_ts_referenced() and pmap_page_exists_quick(). (As of now, there are no longer any pmap functions that expect to be called with the page queues lock held.) Neither pmap_ts_referenced() nor pmap_page_exists_quick() should ever be passed an unmanaged page. Assert this rather than returning "0" and "FALSE" respectively. ARM: Simplify pmap_page_exists_quick() by switching to TAILQ_FOREACH(). Push down the page queues lock inside of pmap_clearbit(), simplifying pmap_clear_modify(), pmap_clear_reference(), and pmap_remove_write(). Additionally, this allows for avoiding the acquisition of the page queues lock in some cases. PowerPC/AIM: moea*_page_exits_quick() and moea*_page_wired_mappings() will never be called before pmap initialization is complete. Therefore, the check for moea_initialized can be eliminated. Push down the page queues lock inside of moea*_clear_bit(), simplifying moea*_clear_modify() and moea*_clear_reference(). The last parameter to moea*_clear_bit() is never used. Eliminate it. PowerPC/BookE: Simplify mmu_booke_page_exists_quick()'s control flow. Reviewed by: kib@
Diffstat (limited to 'sys/ia64')
-rw-r--r--sys/ia64/ia64/pmap.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 373d019..56e8a42 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -1837,23 +1837,23 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
pv_entry_t pv;
int loops = 0;
+ boolean_t rv;
- if (m->flags & PG_FICTITIOUS)
- return FALSE;
-
- /*
- * Not found, check current mappings returning immediately if found.
- */
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_page_exists_quick: page %p is not managed", m));
+ rv = FALSE;
+ vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
if (pv->pv_pmap == pmap) {
- return TRUE;
+ rv = TRUE;
+ break;
}
loops++;
if (loops >= 16)
break;
}
- return (FALSE);
+ vm_page_unlock_queues();
+ return (rv);
}
/*
@@ -1949,9 +1949,9 @@ pmap_ts_referenced(vm_page_t m)
pv_entry_t pv;
int count = 0;
- if (m->flags & PG_FICTITIOUS)
- return 0;
-
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_ts_referenced: page %p is not managed", m));
+ vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
PMAP_LOCK(pv->pv_pmap);
oldpmap = pmap_switch(pv->pv_pmap);
@@ -1965,8 +1965,8 @@ pmap_ts_referenced(vm_page_t m)
pmap_switch(oldpmap);
PMAP_UNLOCK(pv->pv_pmap);
}
-
- return count;
+ vm_page_unlock_queues();
+ return (count);
}
/*
OpenPOWER on IntegriCloud