summaryrefslogtreecommitdiffstats
path: root/sys/sparc64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-06-10 16:56:35 +0000
committeralc <alc@FreeBSD.org>2010-06-10 16:56:35 +0000
commit7c212e010d5269026628a1e2e686c89679c23af8 (patch)
tree2e32fd87e6b7bde5898f2e0cb503e19a50ea5f7b /sys/sparc64
parentc2cccc78fe9cc4c3e971806f8c5ce77d16a4bd39 (diff)
downloadFreeBSD-src-7c212e010d5269026628a1e2e686c89679c23af8.zip
FreeBSD-src-7c212e010d5269026628a1e2e686c89679c23af8.tar.gz
Reduce the scope of the page queues lock and the number of
PG_REFERENCED changes in vm_pageout_object_deactivate_pages(). Simplify this function's inner loop using TAILQ_FOREACH(), and shorten some of its overly long lines. Update a stale comment. Assert that PG_REFERENCED may be cleared only if the object containing the page is locked. Add a comment documenting this. Assert that a caller to vm_page_requeue() holds the page queues lock, and assert that the page is on a page queue. Push down the page queues lock into pmap_ts_referenced() and pmap_page_exists_quick(). (As of now, there are no longer any pmap functions that expect to be called with the page queues lock held.) Neither pmap_ts_referenced() nor pmap_page_exists_quick() should ever be passed an unmanaged page. Assert this rather than returning "0" and "FALSE" respectively. ARM: Simplify pmap_page_exists_quick() by switching to TAILQ_FOREACH(). Push down the page queues lock inside of pmap_clearbit(), simplifying pmap_clear_modify(), pmap_clear_reference(), and pmap_remove_write(). Additionally, this allows for avoiding the acquisition of the page queues lock in some cases. PowerPC/AIM: moea*_page_exits_quick() and moea*_page_wired_mappings() will never be called before pmap initialization is complete. Therefore, the check for moea_initialized can be eliminated. Push down the page queues lock inside of moea*_clear_bit(), simplifying moea*_clear_modify() and moea*_clear_reference(). The last parameter to moea*_clear_bit() is never used. Eliminate it. PowerPC/BookE: Simplify mmu_booke_page_exists_quick()'s control flow. Reviewed by: kib@
Diffstat (limited to 'sys/sparc64')
-rw-r--r--sys/sparc64/sparc64/pmap.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 7769bdc..736602c 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1789,20 +1789,25 @@ pmap_page_exists_quick(pmap_t pm, vm_page_t m)
{
struct tte *tp;
int loops;
+ boolean_t rv;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
- return (FALSE);
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_page_exists_quick: page %p is not managed", m));
loops = 0;
+ rv = FALSE;
+ vm_page_lock_queues();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
- if (TTE_GET_PMAP(tp) == pm)
- return (TRUE);
+ if (TTE_GET_PMAP(tp) == pm) {
+ rv = TRUE;
+ break;
+ }
if (++loops >= 16)
break;
}
- return (FALSE);
+ vm_page_unlock_queues();
+ return (rv);
}
/*
@@ -1878,10 +1883,10 @@ pmap_ts_referenced(vm_page_t m)
u_long data;
int count;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
- return (0);
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_ts_referenced: page %p is not managed", m));
count = 0;
+ vm_page_lock_queues();
if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) {
tpf = tp;
do {
@@ -1895,6 +1900,7 @@ pmap_ts_referenced(vm_page_t m)
break;
} while ((tp = tpn) != NULL && tp != tpf);
}
+ vm_page_unlock_queues();
return (count);
}
OpenPOWER on IntegriCloud