From c99a93ed4c887d5d5e598ee50101ab2e7850e931 Mon Sep 17 00:00:00 2001 From: alc Date: Sat, 3 Nov 2012 23:22:49 +0000 Subject: Replace all uses of the page queues lock by a R/W lock that is private to this pmap. Eliminate two redundant #include's. Tested by: marcel --- sys/powerpc/booke/pmap.c | 74 ++++++++++++++++++++++++++---------------------- 1 file changed, 40 insertions(+), 34 deletions(-) (limited to 'sys/powerpc/booke/pmap.c') diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index 3931904..f6e5f9c 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -51,7 +51,6 @@ #include __FBSDID("$FreeBSD$"); -#include #include #include #include @@ -64,6 +63,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -85,7 +85,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include #include @@ -214,6 +213,8 @@ static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); /* Page table management */ /**************************************************************************/ +static struct rwlock_padalign pvh_global_lock; + /* Data for the pv entry allocation mechanism */ static uma_zone_t pvzone; static struct vm_object pvzone_obj; @@ -551,9 +552,9 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { PMAP_UNLOCK(pmap); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); VM_WAIT; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); } mtbl[i] = m; @@ -743,7 +744,7 @@ pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) /* add to pv_list */ PMAP_LOCK_ASSERT(pmap, MA_OWNED); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); @@ -760,7 +761,7 @@ pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + rw_assert(&pvh_global_lock, RA_WLOCKED); /* find pv entry */ TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { @@ -1239,6 +1240,11 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) /* Mark kernel_pmap active on all CPUs */ CPU_FILL(&kernel_pmap->pm_active); + /* + * Initialize the global pv list lock. + */ + rw_init(&pvh_global_lock, "pmap pv global"); + /*******************************************************/ /* Final setup */ /*******************************************************/ @@ -1522,10 +1528,10 @@ mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -1711,14 +1717,14 @@ mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, psize = atop(end - start); m = m_start; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); m = TAILQ_NEXT(m, listq); } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -1727,11 +1733,11 @@ mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); mmu_booke_enter_locked(mmu, pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -1768,7 +1774,7 @@ mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) hold_flag = PTBL_HOLD_FLAG(pmap); //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); for (; va < endva; va += PAGE_SIZE) { pte = pte_find(mmu, pmap, va); @@ -1776,7 +1782,7 @@ mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) pte_remove(mmu, pmap, va, hold_flag); } PMAP_UNLOCK(pmap); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); //debugf("mmu_booke_remove: e\n"); } @@ -1790,7 +1796,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_t m) pv_entry_t pv, pvn; uint8_t hold_flag; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { pvn = TAILQ_NEXT(pv, pv_link); @@ -1800,7 +1806,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_t m) PMAP_UNLOCK(pv->pv_pmap); } vm_page_aflag_clear(m, PGA_WRITEABLE); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); } /* @@ -1958,7 +1964,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m) if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { @@ -1982,7 +1988,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m) PMAP_UNLOCK(pv->pv_pmap); } vm_page_aflag_clear(m, PGA_WRITEABLE); - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); } static void @@ -1998,7 +2004,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) va = trunc_page(va); sz = round_page(sz); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); pmap = PCPU_GET(curpmap); active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; while (sz > 0) { @@ -2025,7 +2031,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) va += PAGE_SIZE; sz -= PAGE_SIZE; } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); } /* @@ -2173,7 +2179,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m) if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return (rv); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && @@ -2185,7 +2191,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m) if (rv) break; } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (rv); } @@ -2214,7 +2220,7 @@ mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_is_referenced: page %p is not managed", m)); rv = FALSE; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && @@ -2226,7 +2232,7 @@ mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) if (rv) break; } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (rv); } @@ -2252,7 +2258,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) */ if ((m->aflags & PGA_WRITEABLE) == 0) return; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && @@ -2271,7 +2277,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) } PMAP_UNLOCK(pv->pv_pmap); } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); } /* @@ -2294,7 +2300,7 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_ts_referenced: page %p is not managed", m)); count = 0; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && @@ -2317,7 +2323,7 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) } PMAP_UNLOCK(pv->pv_pmap); } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (count); } @@ -2332,7 +2338,7 @@ mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_clear_reference: page %p is not managed", m)); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && @@ -2350,7 +2356,7 @@ mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) } PMAP_UNLOCK(pv->pv_pmap); } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); } /* @@ -2395,7 +2401,7 @@ mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) ("mmu_booke_page_exists_quick: page %p is not managed", m)); loops = 0; rv = FALSE; - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { if (pv->pv_pmap == pmap) { rv = TRUE; @@ -2404,7 +2410,7 @@ mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) if (++loops >= 16) break; } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (rv); } @@ -2421,7 +2427,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) if ((m->oflags & VPO_UNMANAGED) != 0) return (count); - vm_page_lock_queues(); + rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) @@ -2429,7 +2435,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) count++; PMAP_UNLOCK(pv->pv_pmap); } - vm_page_unlock_queues(); + rw_wunlock(&pvh_global_lock); return (count); } -- cgit v1.1