diff options
author | kib <kib@FreeBSD.org> | 2014-08-24 07:53:15 +0000 |
---|---|---|
committer | kib <kib@FreeBSD.org> | 2014-08-24 07:53:15 +0000 |
commit | 25782a7fab8e1a1c60517dab118fec0a98648dd6 (patch) | |
tree | 007faf87a4ebb0e25b4a426bc07ab2babbead757 /sys/powerpc/aim/mmu_oea64.c | |
parent | bee605bad28d58f69b83b3197efb0bd49b38de99 (diff) | |
download | FreeBSD-src-25782a7fab8e1a1c60517dab118fec0a98648dd6.zip FreeBSD-src-25782a7fab8e1a1c60517dab118fec0a98648dd6.tar.gz |
Merge the changes to pmap_enter(9) for sleep-less operation (requested
by flag). The ia64 pmap.c changes are direct commit, since ia64 is
removed on head.
MFC r269368 (by alc):
Retire PVO_EXECUTABLE.
MFC r269728:
Change pmap_enter(9) interface to take flags parameter and superpage
mapping size (currently unused).
MFC r269759 (by alc):
Update the text of a KASSERT() to reflect the changes in r269728.
MFC r269822 (by alc):
Change {_,}pmap_allocpte() so that they look for the flag
PMAP_ENTER_NOSLEEP instead of M_NOWAIT/M_WAITOK when deciding whether
to sleep on page table page allocation.
MFC r270151 (by alc):
Replace KASSERT that no PV list locks are held with a conditional
unlock.
Reviewed by: alc
Approved by: re (gjb)
Sponsored by: The FreeBSD Foundation
Diffstat (limited to 'sys/powerpc/aim/mmu_oea64.c')
-rw-r--r-- | sys/powerpc/aim/mmu_oea64.c | 47 |
1 files changed, 29 insertions, 18 deletions
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index b47b94d..ceca204 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -267,7 +267,7 @@ int moea64_large_page_shift = 0; * PVO calls. */ static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *, - vm_offset_t, vm_offset_t, uint64_t, int); + vm_offset_t, vm_offset_t, uint64_t, int, int8_t); static void moea64_pvo_remove(mmu_t, struct pvo_entry *); static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); @@ -288,7 +288,8 @@ void moea64_clear_modify(mmu_t, vm_page_t); void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize); -void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); +int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, + u_int flags, int8_t psind); void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, vm_prot_t); void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); @@ -627,7 +628,7 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone, NULL, pa, pa, pte_lo, - PVO_WIRED | PVO_LARGE); + PVO_WIRED | PVO_LARGE, 0); } } PMAP_UNLOCK(kernel_pmap); @@ -1245,9 +1246,9 @@ moea64_zero_page_idle(mmu_t mmu, vm_page_t m) * will be wired down. */ -void +int moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, - vm_prot_t prot, boolean_t wired) + vm_prot_t prot, u_int flags, int8_t psind) { struct pvo_head *pvo_head; uma_zone_t zone; @@ -1291,15 +1292,23 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, if ((prot & VM_PROT_EXECUTE) == 0) pte_lo |= LPTE_NOEXEC; - if (wired) + if ((flags & PMAP_ENTER_WIRED) != 0) pvo_flags |= PVO_WIRED; - LOCK_TABLE_WR(); - PMAP_LOCK(pmap); - error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va, - VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); - PMAP_UNLOCK(pmap); - UNLOCK_TABLE_WR(); + for (;;) { + LOCK_TABLE_WR(); + PMAP_LOCK(pmap); + error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va, + VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags, psind); + PMAP_UNLOCK(pmap); + UNLOCK_TABLE_WR(); + if (error != ENOMEM) + break; + if ((flags & PMAP_ENTER_NOSLEEP) != 0) + return (KERN_RESOURCE_SHORTAGE); + VM_OBJECT_ASSERT_UNLOCKED(m->object); + VM_WAIT; + } /* * Flush the page from the instruction cache if this page is @@ -1310,6 +1319,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_aflag_set(m, PGA_EXECUTABLE); moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); } + return (KERN_SUCCESS); } static void @@ -1374,7 +1384,7 @@ moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, m = m_start; while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { moea64_enter(mmu, pm, start + ptoa(diff), m, prot & - (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); + (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0); m = TAILQ_NEXT(m, listq); } } @@ -1384,8 +1394,8 @@ moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot) { - moea64_enter(mmu, pm, va, m, - prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); + moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), + PMAP_ENTER_NOSLEEP, 0); } vm_paddr_t @@ -1473,7 +1483,8 @@ moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) PMAP_LOCK(kernel_pmap); moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone, - NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP); + NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP, + 0); if (needed_lock) PMAP_UNLOCK(kernel_pmap); @@ -1695,7 +1706,7 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) LOCK_TABLE_WR(); PMAP_LOCK(kernel_pmap); error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone, - NULL, va, pa, pte_lo, PVO_WIRED); + NULL, va, pa, pte_lo, PVO_WIRED, 0); PMAP_UNLOCK(kernel_pmap); UNLOCK_TABLE_WR(); @@ -2193,7 +2204,7 @@ moea64_bootstrap_alloc(vm_size_t size, u_int align) static int moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, - uint64_t pte_lo, int flags) + uint64_t pte_lo, int flags, int8_t psind __unused) { struct pvo_entry *pvo; uint64_t vsid; |