summaryrefslogtreecommitdiffstats
path: root/sys/powerpc/aim
diff options
context:
space:
mode:
authornwhitehorn <nwhitehorn@FreeBSD.org>2010-03-20 14:35:24 +0000
committernwhitehorn <nwhitehorn@FreeBSD.org>2010-03-20 14:35:24 +0000
commit313bbb9fc8304d8f772a47b51e5555a46730c007 (patch)
tree4f5333150872919e4d77be0485359f8f3c63c9d2 /sys/powerpc/aim
parentfaacdebba3f1e504ad3dc174b6abba9f13704a94 (diff)
downloadFreeBSD-src-313bbb9fc8304d8f772a47b51e5555a46730c007.zip
FreeBSD-src-313bbb9fc8304d8f772a47b51e5555a46730c007.tar.gz
Revisit locking in the 64-bit AIM PMAP. The PVO head for a page is
generally protected by the VM page queue mutex. Instead of extending the table lock to cover the PVO heads, add some asserts that the page queue mutex is in fact held. This fixes several LORs and possible deadlocks. This also adds an optimization to moea64_kextract() useful for direct-mapped quantities, like UMA buffers. Being able to use this from inside UMA removes an additional LOR.
Diffstat (limited to 'sys/powerpc/aim')
-rw-r--r--sys/powerpc/aim/mmu_oea64.c36
1 files changed, 20 insertions, 16 deletions
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index bc7e5cf..97fff63 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -1507,10 +1507,10 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
return;
lo = moea64_attr_fetch(m);
SYNC();
- LOCK_TABLE();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
pmap = pvo->pvo_pmap;
PMAP_LOCK(pmap);
+ LOCK_TABLE();
if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
pt = moea64_pvo_to_pte(pvo, -1);
pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
@@ -1523,9 +1523,9 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
pvo->pvo_pmap, PVO_VADDR(pvo));
}
}
+ UNLOCK_TABLE();
PMAP_UNLOCK(pmap);
}
- UNLOCK_TABLE();
if ((lo & LPTE_CHG) != 0) {
moea64_attr_clear(m, LPTE_CHG);
vm_page_dirty(m);
@@ -1604,6 +1604,13 @@ moea64_kextract(mmu_t mmu, vm_offset_t va)
struct pvo_entry *pvo;
vm_paddr_t pa;
+ /*
+ * Shortcut the direct-mapped case when applicable. We never put
+ * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
+ */
+ if (va < VM_MIN_KERNEL_ADDRESS)
+ return (va);
+
PMAP_LOCK(kernel_pmap);
pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
KASSERT(pvo != NULL, ("moea64_kextract: no addr found"));
@@ -1661,17 +1668,15 @@ moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
if (!moea64_initialized || (m->flags & PG_FICTITIOUS))
return FALSE;
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+
loops = 0;
- LOCK_TABLE();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
- if (pvo->pvo_pmap == pmap) {
- UNLOCK_TABLE();
+ if (pvo->pvo_pmap == pmap)
return (TRUE);
- }
if (++loops >= 16)
break;
}
- UNLOCK_TABLE();
return (FALSE);
}
@@ -1690,11 +1695,9 @@ moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
if (!moea64_initialized || (m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- LOCK_TABLE();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
count++;
- UNLOCK_TABLE();
return (count);
}
@@ -1913,7 +1916,6 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
pvo_head = vm_page_to_pvoh(m);
- LOCK_TABLE();
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
next_pvo = LIST_NEXT(pvo, pvo_vlink);
@@ -1923,7 +1925,6 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
moea64_pvo_remove(pvo, -1);
PMAP_UNLOCK(pmap);
}
- UNLOCK_TABLE();
if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) {
moea64_attr_clear(m, LPTE_CHG);
vm_page_dirty(m);
@@ -2334,7 +2335,8 @@ moea64_query_bit(vm_page_t m, u_int64_t ptebit)
if (moea64_attr_fetch(m) & ptebit)
return (TRUE);
- LOCK_TABLE();
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
MOEA_PVO_CHECK(pvo); /* sanity check */
@@ -2344,7 +2346,6 @@ moea64_query_bit(vm_page_t m, u_int64_t ptebit)
*/
if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
moea64_attr_save(m, ptebit);
- UNLOCK_TABLE();
MOEA_PVO_CHECK(pvo); /* sanity check */
return (TRUE);
}
@@ -2364,6 +2365,7 @@ moea64_query_bit(vm_page_t m, u_int64_t ptebit)
* REF/CHG bits from the valid PTE. If the appropriate
* ptebit is set, cache it and return success.
*/
+ LOCK_TABLE();
pt = moea64_pvo_to_pte(pvo, -1);
if (pt != NULL) {
moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
@@ -2375,8 +2377,8 @@ moea64_query_bit(vm_page_t m, u_int64_t ptebit)
return (TRUE);
}
}
+ UNLOCK_TABLE();
}
- UNLOCK_TABLE();
return (FALSE);
}
@@ -2389,6 +2391,8 @@ moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit)
struct lpte *pt;
uint64_t rv;
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+
/*
* Clear the cached value.
*/
@@ -2409,10 +2413,10 @@ moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit)
* valid pte clear the ptebit from the valid pte.
*/
count = 0;
- LOCK_TABLE();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
MOEA_PVO_CHECK(pvo); /* sanity check */
+ LOCK_TABLE();
pt = moea64_pvo_to_pte(pvo, -1);
if (pt != NULL) {
moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
@@ -2424,8 +2428,8 @@ moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit)
rv |= pvo->pvo_pte.lpte.pte_lo;
pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
MOEA_PVO_CHECK(pvo); /* sanity check */
+ UNLOCK_TABLE();
}
- UNLOCK_TABLE();
if (origbit != NULL) {
*origbit = rv;
OpenPOWER on IntegriCloud