From 25782a7fab8e1a1c60517dab118fec0a98648dd6 Mon Sep 17 00:00:00 2001 From: kib Date: Sun, 24 Aug 2014 07:53:15 +0000 Subject: Merge the changes to pmap_enter(9) for sleep-less operation (requested by flag). The ia64 pmap.c changes are direct commit, since ia64 is removed on head. MFC r269368 (by alc): Retire PVO_EXECUTABLE. MFC r269728: Change pmap_enter(9) interface to take flags parameter and superpage mapping size (currently unused). MFC r269759 (by alc): Update the text of a KASSERT() to reflect the changes in r269728. MFC r269822 (by alc): Change {_,}pmap_allocpte() so that they look for the flag PMAP_ENTER_NOSLEEP instead of M_NOWAIT/M_WAITOK when deciding whether to sleep on page table page allocation. MFC r270151 (by alc): Replace KASSERT that no PV list locks are held with a conditional unlock. Reviewed by: alc Approved by: re (gjb) Sponsored by: The FreeBSD Foundation --- sys/powerpc/aim/mmu_oea.c | 49 ++++++++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 22 deletions(-) (limited to 'sys/powerpc/aim/mmu_oea.c') diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c index e9b3a44..c7811ef 100644 --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -258,8 +258,8 @@ static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int); /* * Utility routines. */ -static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, - vm_prot_t, boolean_t); +static int moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, + vm_prot_t, u_int, int8_t); static void moea_syncicache(vm_offset_t, vm_size_t); static boolean_t moea_query_bit(vm_page_t, int); static u_int moea_clear_bit(vm_page_t, int); @@ -274,7 +274,8 @@ void moea_clear_modify(mmu_t, vm_page_t); void moea_copy_page(mmu_t, vm_page_t, vm_page_t); void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize); -void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); +int moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, + int8_t); void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, vm_prot_t); void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); @@ -1104,16 +1105,25 @@ moea_zero_page_idle(mmu_t mmu, vm_page_t m) * target pmap with the protection requested. If specified the page * will be wired down. */ -void +int moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - boolean_t wired) + u_int flags, int8_t psind) { + int error; - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - moea_enter_locked(pmap, va, m, prot, wired); - rw_wunlock(&pvh_global_lock); - PMAP_UNLOCK(pmap); + for (;;) { + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pmap); + error = moea_enter_locked(pmap, va, m, prot, flags, psind); + rw_wunlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); + if (error != ENOMEM) + return (KERN_SUCCESS); + if ((flags & PMAP_ENTER_NOSLEEP) != 0) + return (KERN_RESOURCE_SHORTAGE); + VM_OBJECT_ASSERT_UNLOCKED(m->object); + VM_WAIT; + } } /* @@ -1123,9 +1133,9 @@ moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, * * The page queues and pmap must be locked. */ -static void +static int moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - boolean_t wired) + u_int flags, int8_t psind __unused) { struct pvo_head *pvo_head; uma_zone_t zone; @@ -1167,10 +1177,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, } else pte_lo |= PTE_BR; - if (prot & VM_PROT_EXECUTE) - pvo_flags |= PVO_EXECUTABLE; - - if (wired) + if ((flags & PMAP_ENTER_WIRED) != 0) pvo_flags |= PVO_WIRED; error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), @@ -1185,6 +1192,8 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, if (pmap != kernel_pmap && error == ENOENT && (pte_lo & (PTE_I | PTE_G)) == 0) moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); + + return (error); } /* @@ -1214,7 +1223,7 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, PMAP_LOCK(pm); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { moea_enter_locked(pm, start + ptoa(diff), m, prot & - (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); + (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0); m = TAILQ_NEXT(m, listq); } rw_wunlock(&pvh_global_lock); @@ -1229,7 +1238,7 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, rw_wlock(&pvh_global_lock); PMAP_LOCK(pm); moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), - FALSE); + 0, 0); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pm); } @@ -1725,8 +1734,6 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); - if ((prot & VM_PROT_EXECUTE) == 0) - pvo->pvo_vaddr &= ~PVO_EXECUTABLE; /* * Grab the PTE pointer before we diddle with the cached PTE @@ -1968,8 +1975,6 @@ moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, pvo->pvo_pmap = pm; LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink); pvo->pvo_vaddr &= ~ADDR_POFF; - if (flags & VM_PROT_EXECUTE) - pvo->pvo_vaddr |= PVO_EXECUTABLE; if (flags & PVO_WIRED) pvo->pvo_vaddr |= PVO_WIRED; if (pvo_head != &moea_pvo_kunmanaged) -- cgit v1.1