summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-06-10 16:56:35 +0000
committeralc <alc@FreeBSD.org>2010-06-10 16:56:35 +0000
commit7c212e010d5269026628a1e2e686c89679c23af8 (patch)
tree2e32fd87e6b7bde5898f2e0cb503e19a50ea5f7b /sys/powerpc
parentc2cccc78fe9cc4c3e971806f8c5ce77d16a4bd39 (diff)
downloadFreeBSD-src-7c212e010d5269026628a1e2e686c89679c23af8.zip
FreeBSD-src-7c212e010d5269026628a1e2e686c89679c23af8.tar.gz
Reduce the scope of the page queues lock and the number of
PG_REFERENCED changes in vm_pageout_object_deactivate_pages(). Simplify this function's inner loop using TAILQ_FOREACH(), and shorten some of its overly long lines. Update a stale comment. Assert that PG_REFERENCED may be cleared only if the object containing the page is locked. Add a comment documenting this. Assert that a caller to vm_page_requeue() holds the page queues lock, and assert that the page is on a page queue. Push down the page queues lock into pmap_ts_referenced() and pmap_page_exists_quick(). (As of now, there are no longer any pmap functions that expect to be called with the page queues lock held.) Neither pmap_ts_referenced() nor pmap_page_exists_quick() should ever be passed an unmanaged page. Assert this rather than returning "0" and "FALSE" respectively. ARM: Simplify pmap_page_exists_quick() by switching to TAILQ_FOREACH(). Push down the page queues lock inside of pmap_clearbit(), simplifying pmap_clear_modify(), pmap_clear_reference(), and pmap_remove_write(). Additionally, this allows for avoiding the acquisition of the page queues lock in some cases. PowerPC/AIM: moea*_page_exits_quick() and moea*_page_wired_mappings() will never be called before pmap initialization is complete. Therefore, the check for moea_initialized can be eliminated. Push down the page queues lock inside of moea*_clear_bit(), simplifying moea*_clear_modify() and moea*_clear_reference(). The last parameter to moea*_clear_bit() is never used. Eliminate it. PowerPC/BookE: Simplify mmu_booke_page_exists_quick()'s control flow. Reviewed by: kib@
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/mmu_oea.c52
-rw-r--r--sys/powerpc/aim/mmu_oea64.c54
-rw-r--r--sys/powerpc/booke/pmap.c34
3 files changed, 60 insertions, 80 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 3d7845d..b2663be 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -286,7 +286,7 @@ static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
vm_prot_t, boolean_t);
static void moea_syncicache(vm_offset_t, vm_size_t);
static boolean_t moea_query_bit(vm_page_t, int);
-static u_int moea_clear_bit(vm_page_t, int, int *);
+static u_int moea_clear_bit(vm_page_t, int);
static void moea_kremove(mmu_t, vm_offset_t);
int moea_pte_spill(vm_offset_t);
@@ -1315,9 +1315,7 @@ moea_clear_reference(mmu_t mmu, vm_page_t m)
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("moea_clear_reference: page %p is not managed", m));
- vm_page_lock_queues();
- moea_clear_bit(m, PTE_REF, NULL);
- vm_page_unlock_queues();
+ moea_clear_bit(m, PTE_REF);
}
void
@@ -1337,9 +1335,7 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
*/
if ((m->flags & PG_WRITEABLE) == 0)
return;
- vm_page_lock_queues();
- moea_clear_bit(m, PTE_CHG, NULL);
- vm_page_unlock_queues();
+ moea_clear_bit(m, PTE_CHG);
}
/*
@@ -1409,14 +1405,10 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
boolean_t
moea_ts_referenced(mmu_t mmu, vm_page_t m)
{
- int count;
-
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
- return (0);
- count = moea_clear_bit(m, PTE_REF, NULL);
-
- return (count);
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("moea_ts_referenced: page %p is not managed", m));
+ return (moea_clear_bit(m, PTE_REF));
}
/*
@@ -1531,19 +1523,23 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
{
int loops;
struct pvo_entry *pvo;
+ boolean_t rv;
- if (!moea_initialized || (m->flags & PG_FICTITIOUS))
- return FALSE;
-
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("moea_page_exists_quick: page %p is not managed", m));
loops = 0;
+ rv = FALSE;
+ vm_page_lock_queues();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
- if (pvo->pvo_pmap == pmap)
- return (TRUE);
+ if (pvo->pvo_pmap == pmap) {
+ rv = TRUE;
+ break;
+ }
if (++loops >= 16)
break;
}
-
- return (FALSE);
+ vm_page_unlock_queues();
+ return (rv);
}
/*
@@ -1557,7 +1553,7 @@ moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
int count;
count = 0;
- if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0)
+ if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
vm_page_lock_queues();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
@@ -2315,17 +2311,17 @@ moea_query_bit(vm_page_t m, int ptebit)
}
static u_int
-moea_clear_bit(vm_page_t m, int ptebit, int *origbit)
+moea_clear_bit(vm_page_t m, int ptebit)
{
u_int count;
struct pvo_entry *pvo;
struct pte *pt;
- int rv;
+
+ vm_page_lock_queues();
/*
* Clear the cached value.
*/
- rv = moea_attr_fetch(m);
moea_attr_clear(m, ptebit);
/*
@@ -2353,15 +2349,11 @@ moea_clear_bit(vm_page_t m, int ptebit, int *origbit)
}
mtx_unlock(&moea_table_mutex);
}
- rv |= pvo->pvo_pte.pte.pte_lo;
pvo->pvo_pte.pte.pte_lo &= ~ptebit;
MOEA_PVO_CHECK(pvo); /* sanity check */
}
- if (origbit != NULL) {
- *origbit = rv;
- }
-
+ vm_page_unlock_queues();
return (count);
}
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 8ff27d0..62d85ce 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -358,7 +358,7 @@ static void moea64_bridge_cpu_bootstrap(mmu_t, int ap);
static void moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t,
vm_prot_t, boolean_t);
static boolean_t moea64_query_bit(vm_page_t, u_int64_t);
-static u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *);
+static u_int moea64_clear_bit(vm_page_t, u_int64_t);
static void moea64_kremove(mmu_t, vm_offset_t);
static void moea64_syncicache(pmap_t pmap, vm_offset_t va,
vm_offset_t pa, vm_size_t sz);
@@ -1510,9 +1510,7 @@ moea64_clear_reference(mmu_t mmu, vm_page_t m)
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("moea64_clear_reference: page %p is not managed", m));
- vm_page_lock_queues();
- moea64_clear_bit(m, LPTE_REF, NULL);
- vm_page_unlock_queues();
+ moea64_clear_bit(m, LPTE_REF);
}
void
@@ -1532,9 +1530,7 @@ moea64_clear_modify(mmu_t mmu, vm_page_t m)
*/
if ((m->flags & PG_WRITEABLE) == 0)
return;
- vm_page_lock_queues();
- moea64_clear_bit(m, LPTE_CHG, NULL);
- vm_page_unlock_queues();
+ moea64_clear_bit(m, LPTE_CHG);
}
/*
@@ -1605,14 +1601,10 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
boolean_t
moea64_ts_referenced(mmu_t mmu, vm_page_t m)
{
- int count;
-
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
- return (0);
- count = moea64_clear_bit(m, LPTE_REF, NULL);
-
- return (count);
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("moea64_ts_referenced: page %p is not managed", m));
+ return (moea64_clear_bit(m, LPTE_REF));
}
/*
@@ -1721,21 +1713,23 @@ moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
{
int loops;
struct pvo_entry *pvo;
+ boolean_t rv;
- if (!moea64_initialized || (m->flags & PG_FICTITIOUS))
- return FALSE;
-
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("moea64_page_exists_quick: page %p is not managed", m));
loops = 0;
+ rv = FALSE;
+ vm_page_lock_queues();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
- if (pvo->pvo_pmap == pmap)
- return (TRUE);
+ if (pvo->pvo_pmap == pmap) {
+ rv = TRUE;
+ break;
+ }
if (++loops >= 16)
break;
}
-
- return (FALSE);
+ vm_page_unlock_queues();
+ return (rv);
}
/*
@@ -1749,7 +1743,7 @@ moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
int count;
count = 0;
- if (!moea64_initialized || (m->flags & PG_FICTITIOUS) != 0)
+ if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
vm_page_lock_queues();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
@@ -2445,19 +2439,17 @@ moea64_query_bit(vm_page_t m, u_int64_t ptebit)
}
static u_int
-moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit)
+moea64_clear_bit(vm_page_t m, u_int64_t ptebit)
{
u_int count;
struct pvo_entry *pvo;
struct lpte *pt;
- uint64_t rv;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ vm_page_lock_queues();
/*
* Clear the cached value.
*/
- rv = moea64_attr_fetch(m);
moea64_attr_clear(m, ptebit);
/*
@@ -2486,16 +2478,12 @@ moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit)
moea64_pte_clear(pt, pvo->pvo_pmap, PVO_VADDR(pvo), ptebit);
}
}
- rv |= pvo->pvo_pte.lpte.pte_lo;
pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
MOEA_PVO_CHECK(pvo); /* sanity check */
UNLOCK_TABLE();
}
- if (origbit != NULL) {
- *origbit = rv;
- }
-
+ vm_page_unlock_queues();
return (count);
}
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 4bc5e93..2d368b4 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -2293,17 +2293,14 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
pv_entry_t pv;
int count;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
- return (0);
-
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("mmu_booke_ts_referenced: page %p is not managed", m));
count = 0;
+ vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
- if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
- if (!PTE_ISVALID(pte))
- goto make_sure_to_unlock;
-
+ if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
+ PTE_ISVALID(pte)) {
if (PTE_ISREFERENCED(pte)) {
mtx_lock_spin(&tlbivax_mutex);
tlb_miss_lock();
@@ -2320,9 +2317,9 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
}
}
}
-make_sure_to_unlock:
PMAP_UNLOCK(pv->pv_pmap);
}
+ vm_page_unlock_queues();
return (count);
}
@@ -2394,20 +2391,23 @@ mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
{
pv_entry_t pv;
int loops;
+ boolean_t rv;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
- return (FALSE);
-
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("mmu_booke_page_exists_quick: page %p is not managed", m));
loops = 0;
+ rv = FALSE;
+ vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
- if (pv->pv_pmap == pmap)
- return (TRUE);
-
+ if (pv->pv_pmap == pmap) {
+ rv = TRUE;
+ break;
+ }
if (++loops >= 16)
break;
}
- return (FALSE);
+ vm_page_unlock_queues();
+ return (rv);
}
/*
OpenPOWER on IntegriCloud