diff options
author | kib <kib@FreeBSD.org> | 2014-08-24 07:53:15 +0000 |
---|---|---|
committer | kib <kib@FreeBSD.org> | 2014-08-24 07:53:15 +0000 |
commit | 25782a7fab8e1a1c60517dab118fec0a98648dd6 (patch) | |
tree | 007faf87a4ebb0e25b4a426bc07ab2babbead757 | |
parent | bee605bad28d58f69b83b3197efb0bd49b38de99 (diff) | |
download | FreeBSD-src-25782a7fab8e1a1c60517dab118fec0a98648dd6.zip FreeBSD-src-25782a7fab8e1a1c60517dab118fec0a98648dd6.tar.gz |
Merge the changes to pmap_enter(9) for sleep-less operation (requested
by flag). The ia64 pmap.c changes are direct commit, since ia64 is
removed on head.
MFC r269368 (by alc):
Retire PVO_EXECUTABLE.
MFC r269728:
Change pmap_enter(9) interface to take flags parameter and superpage
mapping size (currently unused).
MFC r269759 (by alc):
Update the text of a KASSERT() to reflect the changes in r269728.
MFC r269822 (by alc):
Change {_,}pmap_allocpte() so that they look for the flag
PMAP_ENTER_NOSLEEP instead of M_NOWAIT/M_WAITOK when deciding whether
to sleep on page table page allocation.
MFC r270151 (by alc):
Replace KASSERT that no PV list locks are held with a conditional
unlock.
Reviewed by: alc
Approved by: re (gjb)
Sponsored by: The FreeBSD Foundation
-rw-r--r-- | sys/amd64/amd64/pmap.c | 27 | ||||
-rw-r--r-- | sys/arm/arm/pmap-v6.c | 73 | ||||
-rw-r--r-- | sys/arm/arm/pmap.c | 41 | ||||
-rw-r--r-- | sys/i386/i386/pmap.c | 52 | ||||
-rw-r--r-- | sys/i386/xen/pmap.c | 51 | ||||
-rw-r--r-- | sys/ia64/ia64/pmap.c | 16 | ||||
-rw-r--r-- | sys/mips/mips/pmap.c | 53 | ||||
-rw-r--r-- | sys/powerpc/aim/mmu_oea.c | 49 | ||||
-rw-r--r-- | sys/powerpc/aim/mmu_oea64.c | 47 | ||||
-rw-r--r-- | sys/powerpc/booke/pmap.c | 78 | ||||
-rw-r--r-- | sys/powerpc/include/pmap.h | 1 | ||||
-rw-r--r-- | sys/powerpc/powerpc/mmu_if.m | 8 | ||||
-rw-r--r-- | sys/powerpc/powerpc/pmap_dispatch.c | 12 | ||||
-rw-r--r-- | sys/sparc64/sparc64/pmap.c | 26 | ||||
-rw-r--r-- | sys/vm/pmap.h | 11 | ||||
-rw-r--r-- | sys/vm/vm_fault.c | 6 | ||||
-rw-r--r-- | sys/vm/vm_kern.c | 11 |
17 files changed, 320 insertions, 242 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 7aab65b..1eb7768 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -4116,9 +4116,9 @@ setpte: * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ -void -pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, - vm_prot_t prot, boolean_t wired) +int +pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, + u_int flags, int8_t psind __unused) { struct rwlock *lock; pd_entry_t *pde; @@ -4127,6 +4127,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, pv_entry_t pv; vm_paddr_t opa, pa; vm_page_t mpte, om; + boolean_t nosleep; PG_A = pmap_accessed_bit(pmap); PG_G = pmap_global_bit(pmap); @@ -4143,18 +4144,18 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) - VM_OBJECT_ASSERT_WLOCKED(m->object); + VM_OBJECT_ASSERT_LOCKED(m->object); pa = VM_PAGE_TO_PHYS(m); newpte = (pt_entry_t)(pa | PG_A | PG_V); - if ((access & VM_PROT_WRITE) != 0) + if ((flags & VM_PROT_WRITE) != 0) newpte |= PG_M; if ((prot & VM_PROT_WRITE) != 0) newpte |= PG_RW; KASSERT((newpte & (PG_M | PG_RW)) != PG_M, - ("pmap_enter: access includes VM_PROT_WRITE but prot doesn't")); + ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't")); if ((prot & VM_PROT_EXECUTE) == 0) newpte |= pg_nx; - if (wired) + if ((flags & PMAP_ENTER_WIRED) != 0) newpte |= PG_W; if (va < VM_MAXUSER_ADDRESS) newpte |= PG_U; @@ -4196,7 +4197,16 @@ retry: * Here if the pte page isn't mapped, or if it has been * deallocated. */ - mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va), &lock); + nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0; + mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va), + nosleep ? NULL : &lock); + if (mpte == NULL && nosleep) { + if (lock != NULL) + rw_wunlock(lock); + rw_runlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); + return (KERN_RESOURCE_SHORTAGE); + } goto retry; } else panic("pmap_enter: invalid page directory va=%#lx", va); @@ -4328,6 +4338,7 @@ unchanged: rw_wunlock(lock); rw_runlock(&pvh_global_lock); PMAP_UNLOCK(pmap); + return (KERN_SUCCESS); } /* diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c index 2690092..197a2eb 100644 --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -231,8 +231,8 @@ static boolean_t pmap_pv_insert_section(pmap_t, vm_offset_t, static struct pv_entry *pmap_remove_pv(struct vm_page *, pmap_t, vm_offset_t); static int pmap_pvh_wired_mappings(struct md_page *, int); -static void pmap_enter_locked(pmap_t, vm_offset_t, vm_prot_t, - vm_page_t, vm_prot_t, boolean_t, int); +static int pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t, + vm_prot_t, u_int); static vm_paddr_t pmap_extract_locked(pmap_t pmap, vm_offset_t va); static void pmap_alloc_l1(pmap_t); static void pmap_free_l1(pmap_t); @@ -2944,35 +2944,38 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) * insert this page into the given map NOW. */ -void -pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, - vm_prot_t prot, boolean_t wired) +int +pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, + u_int flags, int8_t psind __unused) { struct l2_bucket *l2b; + int rv; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); - pmap_enter_locked(pmap, va, access, m, prot, wired, M_WAITOK); - /* - * If both the l2b_occupancy and the reservation are fully - * populated, then attempt promotion. - */ - l2b = pmap_get_l2_bucket(pmap, va); - if ((l2b != NULL) && (l2b->l2b_occupancy == L2_PTE_NUM_TOTAL) && - sp_enabled && (m->flags & PG_FICTITIOUS) == 0 && - vm_reserv_level_iffullpop(m) == 0) - pmap_promote_section(pmap, va); - + rv = pmap_enter_locked(pmap, va, m, prot, flags); + if (rv == KERN_SUCCESS) { + /* + * If both the l2b_occupancy and the reservation are fully + * populated, then attempt promotion. + */ + l2b = pmap_get_l2_bucket(pmap, va); + if (l2b != NULL && l2b->l2b_occupancy == L2_PTE_NUM_TOTAL && + sp_enabled && (m->flags & PG_FICTITIOUS) == 0 && + vm_reserv_level_iffullpop(m) == 0) + pmap_promote_section(pmap, va); + } PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); + return (rv); } /* * The pvh global and pmap locks must be held. */ -static void -pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, - vm_prot_t prot, boolean_t wired, int flags) +static int +pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, + u_int flags) { struct l2_bucket *l2b = NULL; struct vm_page *om; @@ -2990,9 +2993,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, pa = systempage.pv_pa; m = NULL; } else { - KASSERT((m->oflags & VPO_UNMANAGED) != 0 || - vm_page_xbusied(m) || (flags & M_NOWAIT) != 0, - ("pmap_enter_locked: page %p is not busy", m)); + if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) + VM_OBJECT_ASSERT_LOCKED(m->object); pa = VM_PAGE_TO_PHYS(m); } @@ -3013,12 +3015,12 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, if (prot & VM_PROT_WRITE) nflags |= PVF_WRITE; - if (wired) + if ((flags & PMAP_ENTER_WIRED) != 0) nflags |= PVF_WIRED; PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, " - "prot = %x, wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, - prot, wired)); + "prot = %x, flags = %x\n", (uint32_t) pmap, va, (uint32_t) m, + prot, flags)); if (pmap == pmap_kernel()) { l2b = pmap_get_l2_bucket(pmap, va); @@ -3028,7 +3030,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, do_l2b_alloc: l2b = pmap_alloc_l2_bucket(pmap, va); if (l2b == NULL) { - if (flags & M_WAITOK) { + if ((flags & PMAP_ENTER_NOSLEEP) == 0) { PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); VM_WAIT; @@ -3036,7 +3038,7 @@ do_l2b_alloc: PMAP_LOCK(pmap); goto do_l2b_alloc; } - return; + return (KERN_RESOURCE_SHORTAGE); } } @@ -3195,6 +3197,7 @@ validate: if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap)) cpu_icache_sync_range(va, PAGE_SIZE); + return (KERN_SUCCESS); } /* @@ -3216,13 +3219,12 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_offset_t va; vm_page_t m; vm_pindex_t diff, psize; - vm_prot_t access; VM_OBJECT_ASSERT_LOCKED(m_start->object); psize = atop(end - start); m = m_start; - access = prot = prot & (VM_PROT_READ | VM_PROT_EXECUTE); + prot &= VM_PROT_READ | VM_PROT_EXECUTE; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { @@ -3232,8 +3234,8 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, pmap_enter_section(pmap, va, m, prot)) m = &m[L1_S_SIZE / PAGE_SIZE - 1]; else - pmap_enter_locked(pmap, va, access, m, prot, - FALSE, M_NOWAIT); + pmap_enter_locked(pmap, va, m, prot, + PMAP_ENTER_NOSLEEP); m = TAILQ_NEXT(m, listq); } PMAP_UNLOCK(pmap); @@ -3252,12 +3254,11 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { - vm_prot_t access; - access = prot = prot & (VM_PROT_READ | VM_PROT_EXECUTE); + prot &= VM_PROT_READ | VM_PROT_EXECUTE; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); - pmap_enter_locked(pmap, va, access, m, prot, FALSE, M_NOWAIT); + pmap_enter_locked(pmap, va, m, prot, PMAP_ENTER_NOSLEEP); PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); } @@ -3488,8 +3489,8 @@ pmap_pinit(pmap_t pmap) pmap->pm_stats.resident_count = 1; if (vector_page < KERNBASE) { pmap_enter(pmap, vector_page, - VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa), - VM_PROT_READ, 1); + PHYS_TO_VM_PAGE(systempage.pv_pa), VM_PROT_READ, + PMAP_ENTER_WIRED, 0); } return (1); } diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c index d19306d..802d5ee 100644 --- a/sys/arm/arm/pmap.c +++ b/sys/arm/arm/pmap.c @@ -199,8 +199,8 @@ extern int last_fault_code; static void pmap_free_pv_entry (pv_entry_t); static pv_entry_t pmap_get_pv_entry(void); -static void pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t, - vm_prot_t, boolean_t, int); +static int pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t, + vm_prot_t, u_int); static vm_paddr_t pmap_extract_locked(pmap_t pmap, vm_offset_t va); static void pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t); static void pmap_alloc_l1(pmap_t); @@ -3204,24 +3204,26 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) * insert this page into the given map NOW. */ -void -pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, - vm_prot_t prot, boolean_t wired) +int +pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, + u_int flags, int8_t psind __unused) { + int rv; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); - pmap_enter_locked(pmap, va, m, prot, wired, M_WAITOK); + rv = pmap_enter_locked(pmap, va, m, prot, flags); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); + return (rv); } /* * The pvh global and pmap locks must be held. */ -static void +static int pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - boolean_t wired, int flags) + u_int flags) { struct l2_bucket *l2b = NULL; struct vm_page *opg; @@ -3237,9 +3239,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, pa = systempage.pv_pa; m = NULL; } else { - KASSERT((m->oflags & VPO_UNMANAGED) != 0 || - vm_page_xbusied(m) || (flags & M_NOWAIT) != 0, - ("pmap_enter_locked: page %p is not busy", m)); + if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) + VM_OBJECT_ASSERT_LOCKED(m->object); pa = VM_PAGE_TO_PHYS(m); } nflags = 0; @@ -3247,10 +3248,10 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, nflags |= PVF_WRITE; if (prot & VM_PROT_EXECUTE) nflags |= PVF_EXEC; - if (wired) + if ((flags & PMAP_ENTER_WIRED) != 0) nflags |= PVF_WIRED; PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, " - "wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired)); + "flags = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, flags)); if (pmap == pmap_kernel()) { l2b = pmap_get_l2_bucket(pmap, va); @@ -3260,7 +3261,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, do_l2b_alloc: l2b = pmap_alloc_l2_bucket(pmap, va); if (l2b == NULL) { - if (flags & M_WAITOK) { + if ((flags & PMAP_ENTER_NOSLEEP) == 0) { PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); VM_WAIT; @@ -3268,7 +3269,7 @@ do_l2b_alloc: PMAP_LOCK(pmap); goto do_l2b_alloc; } - return; + return (KERN_RESOURCE_SHORTAGE); } } @@ -3482,6 +3483,7 @@ do_l2b_alloc: if (m) pmap_fix_cache(m, pmap, va); } + return (KERN_SUCCESS); } /* @@ -3511,7 +3513,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { pmap_enter_locked(pmap, start + ptoa(diff), m, prot & - (VM_PROT_READ | VM_PROT_EXECUTE), FALSE, M_NOWAIT); + (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP); m = TAILQ_NEXT(m, listq); } rw_wunlock(&pvh_global_lock); @@ -3534,7 +3536,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), - FALSE, M_NOWAIT); + PMAP_ENTER_NOSLEEP); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -3746,9 +3748,8 @@ pmap_pinit(pmap_t pmap) bzero(&pmap->pm_stats, sizeof pmap->pm_stats); pmap->pm_stats.resident_count = 1; if (vector_page < KERNBASE) { - pmap_enter(pmap, vector_page, - VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa), - VM_PROT_READ, 1); + pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa), + VM_PROT_READ, PMAP_ENTER_WIRED | VM_PROT_READ, 0); } return (1); } diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index e5f564a..e191d3a 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -331,9 +331,9 @@ static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde); static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde); -static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); +static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags); -static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags); +static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags); static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free); static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); static void pmap_pte_release(pt_entry_t *pte); @@ -1818,21 +1818,17 @@ pmap_pinit(pmap_t pmap) * mapped correctly. */ static vm_page_t -_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags) +_pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags) { vm_paddr_t ptepa; vm_page_t m; - KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || - (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, - ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); - /* * Allocate a page table page. */ if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { - if (flags & M_WAITOK) { + if ((flags & PMAP_ENTER_NOSLEEP) == 0) { PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); VM_WAIT; @@ -1864,16 +1860,12 @@ _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags) } static vm_page_t -pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) +pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags) { u_int ptepindex; pd_entry_t ptepa; vm_page_t m; - KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || - (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, - ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); - /* * Calculate pagetable page index */ @@ -1906,7 +1898,7 @@ retry: * been deallocated. */ m = _pmap_allocpte(pmap, ptepindex, flags); - if (m == NULL && (flags & M_WAITOK)) + if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0) goto retry; } return (m); @@ -3458,9 +3450,9 @@ setpte: * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ -void -pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, - vm_prot_t prot, boolean_t wired) +int +pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, + u_int flags, int8_t psind) { pd_entry_t *pde; pt_entry_t *pte; @@ -3468,17 +3460,18 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, pv_entry_t pv; vm_paddr_t opa, pa; vm_page_t mpte, om; - boolean_t invlva; + boolean_t invlva, wired; va = trunc_page(va); + mpte = NULL; + wired = (flags & PMAP_ENTER_WIRED) != 0; + KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va)); if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) - VM_OBJECT_ASSERT_WLOCKED(m->object); - - mpte = NULL; + VM_OBJECT_ASSERT_LOCKED(m->object); rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); @@ -3489,7 +3482,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, * resident, we are creating it here. */ if (va < VM_MAXUSER_ADDRESS) { - mpte = pmap_allocpte(pmap, va, M_WAITOK); + mpte = pmap_allocpte(pmap, va, flags); + if (mpte == NULL) { + KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, + ("pmap_allocpte failed with sleep allowed")); + sched_unpin(); + rw_wunlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); + return (KERN_RESOURCE_SHORTAGE); + } } pde = pmap_pde(pmap, va); @@ -3607,7 +3608,7 @@ validate: */ if ((origpte & ~(PG_M|PG_A)) != newpte) { newpte |= PG_A; - if ((access & VM_PROT_WRITE) != 0) + if ((flags & VM_PROT_WRITE) != 0) newpte |= PG_M; if (origpte & PG_V) { invlva = FALSE; @@ -3652,6 +3653,7 @@ validate: sched_unpin(); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); + return (KERN_SUCCESS); } /* @@ -3817,7 +3819,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, mpte->wire_count++; } else { mpte = _pmap_allocpte(pmap, ptepindex, - M_NOWAIT); + PMAP_ENTER_NOSLEEP); if (mpte == NULL) return (mpte); } @@ -4102,7 +4104,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, */ if ((ptetemp & PG_MANAGED) != 0) { dstmpte = pmap_allocpte(dst_pmap, addr, - M_NOWAIT); + PMAP_ENTER_NOSLEEP); if (dstmpte == NULL) goto out; dst_pte = pmap_pte_quick(dst_pmap, addr); diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c index fa9bda8..fb7781d 100644 --- a/sys/i386/xen/pmap.c +++ b/sys/i386/xen/pmap.c @@ -298,9 +298,9 @@ static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); -static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); +static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags); -static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags); +static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags); static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free); static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); static void pmap_pte_release(pt_entry_t *pte); @@ -1546,21 +1546,17 @@ pmap_pinit(pmap_t pmap) * mapped correctly. */ static vm_page_t -_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags) +_pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags) { vm_paddr_t ptema; vm_page_t m; - KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || - (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, - ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); - /* * Allocate a page table page. */ if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { - if (flags & M_WAITOK) { + if ((flags & PMAP_ENTER_NOSLEEP) == 0) { PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); VM_WAIT; @@ -1595,16 +1591,12 @@ _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags) } static vm_page_t -pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) +pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags) { u_int ptepindex; pd_entry_t ptema; vm_page_t m; - KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || - (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, - ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); - /* * Calculate pagetable page index */ @@ -1644,7 +1636,7 @@ retry: CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x", pmap, va, flags); m = _pmap_allocpte(pmap, ptepindex, flags); - if (m == NULL && (flags & M_WAITOK)) + if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0) goto retry; KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex)); @@ -2643,9 +2635,9 @@ retry: * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ -void -pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, - vm_prot_t prot, boolean_t wired) +int +pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, + u_int flags, int8_t psind __unused) { pd_entry_t *pde; pt_entry_t *pte; @@ -2653,19 +2645,21 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, pv_entry_t pv; vm_paddr_t opa, pa; vm_page_t mpte, om; - boolean_t invlva; + boolean_t invlva, wired; - CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d", - pmap, va, access, VM_PAGE_TO_MACH(m), prot, wired); + CTR5(KTR_PMAP, + "pmap_enter: pmap=%08p va=0x%08x ma=0x%08x prot=0x%x flags=0x%x", + pmap, va, VM_PAGE_TO_MACH(m), prot, flags); va = trunc_page(va); KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va)); if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) - VM_OBJECT_ASSERT_WLOCKED(m->object); + VM_OBJECT_ASSERT_LOCKED(m->object); mpte = NULL; + wired = (flags & PMAP_ENTER_WIRED) != 0; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); @@ -2676,7 +2670,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, * resident, we are creating it here. */ if (va < VM_MAXUSER_ADDRESS) { - mpte = pmap_allocpte(pmap, va, M_WAITOK); + mpte = pmap_allocpte(pmap, va, flags); + if (mpte == NULL) { + KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, + ("pmap_allocpte failed with sleep allowed")); + sched_unpin(); + rw_wunlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); + return (KERN_RESOURCE_SHORTAGE); + } } pde = pmap_pde(pmap, va); @@ -2842,6 +2844,7 @@ validate: sched_unpin(); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); + return (KERN_SUCCESS); } /* @@ -2996,7 +2999,7 @@ pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_o mpte->wire_count++; } else { mpte = _pmap_allocpte(pmap, ptepindex, - M_NOWAIT); + PMAP_ENTER_NOSLEEP); if (mpte == NULL) return (mpte); } @@ -3287,7 +3290,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, */ if ((ptetemp & PG_MANAGED) != 0) { dstmpte = pmap_allocpte(dst_pmap, addr, - M_NOWAIT); + PMAP_ENTER_NOSLEEP); if (dstmpte == NULL) goto out; dst_pte = pmap_pte_quick(dst_pmap, addr); diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c index 3fb437c..e3f368c 100644 --- a/sys/ia64/ia64/pmap.c +++ b/sys/ia64/ia64/pmap.c @@ -1692,20 +1692,21 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ -void -pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, - vm_prot_t prot, boolean_t wired) +int +pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, + u_int flags, int8_t psind __unused) { pmap_t oldpmap; vm_offset_t pa; vm_offset_t opa; struct ia64_lpte origpte; struct ia64_lpte *pte; - boolean_t icache_inval, managed; + boolean_t icache_inval, managed, wired; - CTR6(KTR_PMAP, "pmap_enter(pm=%p, va=%#lx, acc=%#x, m=%p, prot=%#x, " - "wired=%u)", pmap, va, access, m, prot, wired); + CTR5(KTR_PMAP, "pmap_enter(pm=%p, va=%#lx, m=%p, prot=%#x, " + "flags=%u)", pmap, va, m, prot, flags); + wired = (flags & PMAP_ENTER_WIRED) != 0; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); @@ -1722,6 +1723,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, pmap_switch(oldpmap); PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); + if ((flags & PMAP_ENTER_NOSLEEP) != 0) + return (KERN_RESOURCE_SHORTAGE); VM_WAIT; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); @@ -1815,6 +1818,7 @@ validate: rw_wunlock(&pvh_global_lock); pmap_switch(oldpmap); PMAP_UNLOCK(pmap); + return (KERN_SUCCESS); } /* diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c index 6bc5ba9..973791b 100644 --- a/sys/mips/mips/pmap.c +++ b/sys/mips/mips/pmap.c @@ -177,8 +177,8 @@ static void pmap_invalidate_all(pmap_t pmap); static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va); static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m); -static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); -static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags); +static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags); +static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags); static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t); static pt_entry_t init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot); @@ -1094,20 +1094,16 @@ pmap_pinit(pmap_t pmap) * mapped correctly. */ static vm_page_t -_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags) +_pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags) { vm_offset_t pageva; vm_page_t m; - KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || - (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, - ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); - /* * Find or fabricate a new pagetable page */ if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) { - if (flags & M_WAITOK) { + if ((flags & PMAP_ENTER_NOSLEEP) == 0) { PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); pmap_grow_direct_page_cache(); @@ -1164,16 +1160,12 @@ _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags) } static vm_page_t -pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) +pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags) { unsigned ptepindex; pd_entry_t *pde; vm_page_t m; - KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || - (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, - ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); - /* * Calculate pagetable page index */ @@ -1197,7 +1189,7 @@ retry: * deallocated. */ m = _pmap_allocpte(pmap, ptepindex, flags); - if (m == NULL && (flags & M_WAITOK)) + if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0) goto retry; } return (m); @@ -1994,9 +1986,9 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ -void -pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, - vm_prot_t prot, boolean_t wired) +int +pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, + u_int flags, int8_t psind __unused) { vm_paddr_t pa, opa; pt_entry_t *pte; @@ -2009,11 +2001,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); - KASSERT((m->oflags & VPO_UNMANAGED) != 0 || vm_page_xbusied(m), - ("pmap_enter: page %p is not busy", m)); + if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) + VM_OBJECT_ASSERT_LOCKED(m->object); pa = VM_PAGE_TO_PHYS(m); - newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, access, prot); - if (wired) + newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, flags, prot); + if ((flags & PMAP_ENTER_WIRED) != 0) newpte |= PTE_W; if (is_kernel_pmap(pmap)) newpte |= PTE_G; @@ -2032,7 +2024,14 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, * creating it here. */ if (va < VM_MAXUSER_ADDRESS) { - mpte = pmap_allocpte(pmap, va, M_WAITOK); + mpte = pmap_allocpte(pmap, va, flags); + if (mpte == NULL) { + KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, + ("pmap_allocpte failed with sleep allowed")); + rw_wunlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); + return (KERN_RESOURCE_SHORTAGE); + } } pte = pmap_pte(pmap, va); @@ -2057,9 +2056,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, * are valid mappings in them. Hence, if a user page is * wired, the PT page will be also. */ - if (wired && !pte_test(&origpte, PTE_W)) + if (pte_test(&newpte, PTE_W) && !pte_test(&origpte, PTE_W)) pmap->pm_stats.wired_count++; - else if (!wired && pte_test(&origpte, PTE_W)) + else if (!pte_test(&newpte, PTE_W) && pte_test(&origpte, + PTE_W)) pmap->pm_stats.wired_count--; KASSERT(!pte_test(&origpte, PTE_D | PTE_RO), @@ -2123,7 +2123,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, /* * Increment counters */ - if (wired) + if (pte_test(&newpte, PTE_W)) pmap->pm_stats.wired_count++; validate: @@ -2170,6 +2170,7 @@ validate: } rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); + return (KERN_SUCCESS); } /* @@ -2235,7 +2236,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, mpte->wire_count++; } else { mpte = _pmap_allocpte(pmap, ptepindex, - M_NOWAIT); + PMAP_ENTER_NOSLEEP); if (mpte == NULL) return (mpte); } diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c index e9b3a44..c7811ef 100644 --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -258,8 +258,8 @@ static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int); /* * Utility routines. */ -static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, - vm_prot_t, boolean_t); +static int moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, + vm_prot_t, u_int, int8_t); static void moea_syncicache(vm_offset_t, vm_size_t); static boolean_t moea_query_bit(vm_page_t, int); static u_int moea_clear_bit(vm_page_t, int); @@ -274,7 +274,8 @@ void moea_clear_modify(mmu_t, vm_page_t); void moea_copy_page(mmu_t, vm_page_t, vm_page_t); void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize); -void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); +int moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, + int8_t); void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, vm_prot_t); void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); @@ -1104,16 +1105,25 @@ moea_zero_page_idle(mmu_t mmu, vm_page_t m) * target pmap with the protection requested. If specified the page * will be wired down. */ -void +int moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - boolean_t wired) + u_int flags, int8_t psind) { + int error; - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - moea_enter_locked(pmap, va, m, prot, wired); - rw_wunlock(&pvh_global_lock); - PMAP_UNLOCK(pmap); + for (;;) { + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pmap); + error = moea_enter_locked(pmap, va, m, prot, flags, psind); + rw_wunlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); + if (error != ENOMEM) + return (KERN_SUCCESS); + if ((flags & PMAP_ENTER_NOSLEEP) != 0) + return (KERN_RESOURCE_SHORTAGE); + VM_OBJECT_ASSERT_UNLOCKED(m->object); + VM_WAIT; + } } /* @@ -1123,9 +1133,9 @@ moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, * * The page queues and pmap must be locked. */ -static void +static int moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - boolean_t wired) + u_int flags, int8_t psind __unused) { struct pvo_head *pvo_head; uma_zone_t zone; @@ -1167,10 +1177,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, } else pte_lo |= PTE_BR; - if (prot & VM_PROT_EXECUTE) - pvo_flags |= PVO_EXECUTABLE; - - if (wired) + if ((flags & PMAP_ENTER_WIRED) != 0) pvo_flags |= PVO_WIRED; error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), @@ -1185,6 +1192,8 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, if (pmap != kernel_pmap && error == ENOENT && (pte_lo & (PTE_I | PTE_G)) == 0) moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); + + return (error); } /* @@ -1214,7 +1223,7 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, PMAP_LOCK(pm); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { moea_enter_locked(pm, start + ptoa(diff), m, prot & - (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); + (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0); m = TAILQ_NEXT(m, listq); } rw_wunlock(&pvh_global_lock); @@ -1229,7 +1238,7 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, rw_wlock(&pvh_global_lock); PMAP_LOCK(pm); moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), - FALSE); + 0, 0); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pm); } @@ -1725,8 +1734,6 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); - if ((prot & VM_PROT_EXECUTE) == 0) - pvo->pvo_vaddr &= ~PVO_EXECUTABLE; /* * Grab the PTE pointer before we diddle with the cached PTE @@ -1968,8 +1975,6 @@ moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, pvo->pvo_pmap = pm; LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink); pvo->pvo_vaddr &= ~ADDR_POFF; - if (flags & VM_PROT_EXECUTE) - pvo->pvo_vaddr |= PVO_EXECUTABLE; if (flags & PVO_WIRED) pvo->pvo_vaddr |= PVO_WIRED; if (pvo_head != &moea_pvo_kunmanaged) diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index b47b94d..ceca204 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -267,7 +267,7 @@ int moea64_large_page_shift = 0; * PVO calls. */ static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *, - vm_offset_t, vm_offset_t, uint64_t, int); + vm_offset_t, vm_offset_t, uint64_t, int, int8_t); static void moea64_pvo_remove(mmu_t, struct pvo_entry *); static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); @@ -288,7 +288,8 @@ void moea64_clear_modify(mmu_t, vm_page_t); void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize); -void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); +int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, + u_int flags, int8_t psind); void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, vm_prot_t); void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); @@ -627,7 +628,7 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone, NULL, pa, pa, pte_lo, - PVO_WIRED | PVO_LARGE); + PVO_WIRED | PVO_LARGE, 0); } } PMAP_UNLOCK(kernel_pmap); @@ -1245,9 +1246,9 @@ moea64_zero_page_idle(mmu_t mmu, vm_page_t m) * will be wired down. */ -void +int moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, - vm_prot_t prot, boolean_t wired) + vm_prot_t prot, u_int flags, int8_t psind) { struct pvo_head *pvo_head; uma_zone_t zone; @@ -1291,15 +1292,23 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, if ((prot & VM_PROT_EXECUTE) == 0) pte_lo |= LPTE_NOEXEC; - if (wired) + if ((flags & PMAP_ENTER_WIRED) != 0) pvo_flags |= PVO_WIRED; - LOCK_TABLE_WR(); - PMAP_LOCK(pmap); - error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va, - VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); - PMAP_UNLOCK(pmap); - UNLOCK_TABLE_WR(); + for (;;) { + LOCK_TABLE_WR(); + PMAP_LOCK(pmap); + error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va, + VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags, psind); + PMAP_UNLOCK(pmap); + UNLOCK_TABLE_WR(); + if (error != ENOMEM) + break; + if ((flags & PMAP_ENTER_NOSLEEP) != 0) + return (KERN_RESOURCE_SHORTAGE); + VM_OBJECT_ASSERT_UNLOCKED(m->object); + VM_WAIT; + } /* * Flush the page from the instruction cache if this page is @@ -1310,6 +1319,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_aflag_set(m, PGA_EXECUTABLE); moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); } + return (KERN_SUCCESS); } static void @@ -1374,7 +1384,7 @@ moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, m = m_start; while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { moea64_enter(mmu, pm, start + ptoa(diff), m, prot & - (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); + (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0); m = TAILQ_NEXT(m, listq); } } @@ -1384,8 +1394,8 @@ moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot) { - moea64_enter(mmu, pm, va, m, - prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); + moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), + PMAP_ENTER_NOSLEEP, 0); } vm_paddr_t @@ -1473,7 +1483,8 @@ moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) PMAP_LOCK(kernel_pmap); moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone, - NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP); + NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP, + 0); if (needed_lock) PMAP_UNLOCK(kernel_pmap); @@ -1695,7 +1706,7 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) LOCK_TABLE_WR(); PMAP_LOCK(kernel_pmap); error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone, - NULL, va, pa, pte_lo, PVO_WIRED); + NULL, va, pa, pte_lo, PVO_WIRED, 0); PMAP_UNLOCK(kernel_pmap); UNLOCK_TABLE_WR(); @@ -2193,7 +2204,7 @@ moea64_bootstrap_alloc(vm_size_t size, u_int align) static int moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, - uint64_t pte_lo, int flags) + uint64_t pte_lo, int flags, int8_t psind __unused) { struct pvo_entry *pvo; uint64_t vsid; diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index 2f60b72..0862b99 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -146,8 +146,8 @@ static struct mtx copy_page_mutex; /* PMAP */ /**************************************************************************/ -static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, - vm_prot_t, boolean_t); +static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, + vm_prot_t, u_int flags, int8_t psind); unsigned int kptbl_min; /* Index of the first kernel ptbl. */ unsigned int kernel_ptbls; /* Number of KVA ptbls. */ @@ -228,14 +228,14 @@ static struct ptbl_buf *ptbl_buf_alloc(void); static void ptbl_buf_free(struct ptbl_buf *); static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); -static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); +static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t); static void ptbl_free(mmu_t, pmap_t, unsigned int); static void ptbl_hold(mmu_t, pmap_t, unsigned int); static int ptbl_unhold(mmu_t, pmap_t, unsigned int); static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); -static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); +static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t); static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); static pv_entry_t pv_alloc(void); @@ -273,8 +273,8 @@ static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); static void mmu_booke_copy_pages(mmu_t, vm_page_t *, vm_offset_t, vm_page_t *, vm_offset_t, int); -static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, - vm_prot_t, boolean_t); +static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, + vm_prot_t, u_int flags, int8_t psind); static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, vm_prot_t); static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, @@ -558,14 +558,14 @@ ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) /* Allocate page table. */ static pte_t * -ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) +ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep) { vm_page_t mtbl[PTBL_PAGES]; vm_page_t m; struct ptbl_buf *pbuf; unsigned int pidx; pte_t *ptbl; - int i; + int i, j; CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, (pmap == kernel_pmap), pdir_idx); @@ -588,9 +588,15 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) pidx = (PTBL_PAGES * pdir_idx) + i; while ((m = vm_page_alloc(NULL, pidx, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { - PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); + if (nosleep) { + ptbl_free_pmap_ptbl(pmap, ptbl); + for (j = 0; j < i; j++) + vm_page_free(mtbl[j]); + atomic_subtract_int(&cnt.v_wire_count, i); + return (NULL); + } VM_WAIT; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); @@ -885,8 +891,9 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) /* * Insert PTE for a given page and virtual address. */ -static void -pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) +static int +pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags, + boolean_t nosleep) { unsigned int pdir_idx = PDIR_IDX(va); unsigned int ptbl_idx = PTBL_IDX(va); @@ -900,7 +907,11 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) if (ptbl == NULL) { /* Allocate page table pages. */ - ptbl = ptbl_alloc(mmu, pmap, pdir_idx); + ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep); + if (ptbl == NULL) { + KASSERT(nosleep, ("nosleep and NULL ptbl")); + return (ENOMEM); + } } else { /* * Check if there is valid mapping for requested @@ -949,6 +960,7 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) tlb_miss_unlock(); mtx_unlock_spin(&tlbivax_mutex); + return (0); } /* Return the pa for the given pmap/va. */ @@ -1576,35 +1588,37 @@ mmu_booke_release(mmu_t mmu, pmap_t pmap) * target physical map with the protection requested. If specified the page * will be wired down. */ -static void +static int mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, - vm_prot_t prot, boolean_t wired) + vm_prot_t prot, u_int flags, int8_t psind) { + int error; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); - mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); + error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); + return (error); } -static void +static int mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, - vm_prot_t prot, boolean_t wired) + vm_prot_t prot, u_int pmap_flags, int8_t psind __unused) { pte_t *pte; vm_paddr_t pa; uint32_t flags; - int su, sync; + int error, su, sync; pa = VM_PAGE_TO_PHYS(m); su = (pmap == kernel_pmap); sync = 0; //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " - // "pa=0x%08x prot=0x%08x wired=%d)\n", + // "pa=0x%08x prot=0x%08x flags=%#x)\n", // (u_int32_t)pmap, su, pmap->pm_tid, - // (u_int32_t)m, va, pa, prot, wired); + // (u_int32_t)m, va, pa, prot, flags); if (su) { KASSERT(((va >= virtual_avail) && @@ -1634,7 +1648,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); /* Wiring change, just update stats. */ - if (wired) { + if ((pmap_flags & PMAP_ENTER_WIRED) != 0) { if (!PTE_ISWIRED(pte)) { flags |= PTE_WIRED; pmap->pm_stats.wired_count++; @@ -1730,12 +1744,16 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, } /* If its wired update stats. */ - if (wired) { - pmap->pm_stats.wired_count++; + if ((pmap_flags & PMAP_ENTER_WIRED) != 0) flags |= PTE_WIRED; - } - pte_enter(mmu, pmap, m, va, flags); + error = pte_enter(mmu, pmap, m, va, flags, + (pmap_flags & PMAP_ENTER_NOSLEEP) != 0); + if (error != 0) + return (KERN_RESOURCE_SHORTAGE); + + if ((flags & PMAP_ENTER_WIRED) != 0) + pmap->pm_stats.wired_count++; /* Flush the real memory from the instruction cache. */ if (prot & VM_PROT_EXECUTE) @@ -1746,6 +1764,8 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, __syncicache((void *)va, PAGE_SIZE); sync = 0; } + + return (KERN_SUCCESS); } /* @@ -1775,7 +1795,8 @@ mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, - prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); + prot & (VM_PROT_READ | VM_PROT_EXECUTE), + PMAP_ENTER_NOSLEEP, 0); m = TAILQ_NEXT(m, listq); } rw_wunlock(&pvh_global_lock); @@ -1790,7 +1811,8 @@ mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); mmu_booke_enter_locked(mmu, pmap, va, m, - prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); + prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, + 0); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } @@ -2074,7 +2096,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) m = PHYS_TO_VM_PAGE(pa); PMAP_LOCK(pmap); pte_enter(mmu, pmap, m, addr, - PTE_SR | PTE_VALID | PTE_UR); + PTE_SR | PTE_VALID | PTE_UR, FALSE); __syncicache((void *)addr, PAGE_SIZE); pte_remove(mmu, pmap, addr, PTBL_UNHOLD); PMAP_UNLOCK(pmap); diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h index 2c9c786..663cd1a 100644 --- a/sys/powerpc/include/pmap.h +++ b/sys/powerpc/include/pmap.h @@ -112,7 +112,6 @@ RB_PROTOTYPE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare); #define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */ #define PVO_WIRED 0x010UL /* PVO entry is wired */ #define PVO_MANAGED 0x020UL /* PVO entry is managed */ -#define PVO_EXECUTABLE 0x040UL /* PVO entry is executable */ #define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during bootstrap */ #define PVO_LARGE 0x200UL /* large page */ diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m index 67222a5..65a4046 100644 --- a/sys/powerpc/powerpc/mmu_if.m +++ b/sys/powerpc/powerpc/mmu_if.m @@ -240,15 +240,17 @@ METHOD void copy_pages { * @param _va mapping virtual address * @param _p mapping physical page * @param _prot mapping page protection - * @param _wired TRUE if page will be wired + * @param _flags pmap_enter flags + * @param _psind superpage size index */ -METHOD void enter { +METHOD int enter { mmu_t _mmu; pmap_t _pmap; vm_offset_t _va; vm_page_t _p; vm_prot_t _prot; - boolean_t _wired; + u_int _flags; + int8_t _psind; }; diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c index 773ede1..1957692 100644 --- a/sys/powerpc/powerpc/pmap_dispatch.c +++ b/sys/powerpc/powerpc/pmap_dispatch.c @@ -143,14 +143,14 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize); } -void -pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t p, - vm_prot_t prot, boolean_t wired) +int +pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot, + u_int flags, int8_t psind) { - CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %#x, %p, %#x, %u)", pmap, va, - access, p, prot, wired); - MMU_ENTER(mmu_obj, pmap, va, p, prot, wired); + CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %x, %d)", pmap, va, + p, prot, flags, psind); + return (MMU_ENTER(mmu_obj, pmap, va, p, prot, flags, psind)); } void diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c index 180e5fd..c11e741 100644 --- a/sys/sparc64/sparc64/pmap.c +++ b/sys/sparc64/sparc64/pmap.c @@ -149,8 +149,8 @@ static int pmap_protect_tte(struct pmap *pm1, struct pmap *pm2, * * The page queues and pmap must be locked. */ -static void pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, - vm_prot_t prot, boolean_t wired); +static int pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, + vm_prot_t prot, u_int flags, int8_t psind); extern int tl1_dmmu_miss_direct_patch_tsb_phys_1[]; extern int tl1_dmmu_miss_direct_patch_tsb_phys_end_1[]; @@ -1459,16 +1459,18 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) * target pmap with the protection requested. If specified the page * will be wired down. */ -void -pmap_enter(pmap_t pm, vm_offset_t va, vm_prot_t access, vm_page_t m, - vm_prot_t prot, boolean_t wired) +int +pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, + u_int flags, int8_t psind) { + int rv; rw_wlock(&tte_list_global_lock); PMAP_LOCK(pm); - pmap_enter_locked(pm, va, m, prot, wired); + rv = pmap_enter_locked(pm, va, m, prot, flags, psind); rw_wunlock(&tte_list_global_lock); PMAP_UNLOCK(pm); + return (rv); } /* @@ -1478,14 +1480,15 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_prot_t access, vm_page_t m, * * The page queues and pmap must be locked. */ -static void +static int pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, - boolean_t wired) + u_int flags, int8_t psind __unused) { struct tte *tp; vm_paddr_t pa; vm_page_t real; u_long data; + boolean_t wired; rw_assert(&tte_list_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pm, MA_OWNED); @@ -1493,6 +1496,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, VM_OBJECT_ASSERT_LOCKED(m->object); PMAP_STATS_INC(pmap_nenter); pa = VM_PAGE_TO_PHYS(m); + wired = (flags & PMAP_ENTER_WIRED) != 0; /* * If this is a fake page from the device_pager, but it covers actual @@ -1606,6 +1610,8 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, tsb_tte_enter(pm, m, va, TS_8K, data); } + + return (KERN_SUCCESS); } /* @@ -1635,7 +1641,7 @@ pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end, PMAP_LOCK(pm); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { pmap_enter_locked(pm, start + ptoa(diff), m, prot & - (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); + (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0); m = TAILQ_NEXT(m, listq); } rw_wunlock(&tte_list_global_lock); @@ -1649,7 +1655,7 @@ pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot) rw_wlock(&tte_list_global_lock); PMAP_LOCK(pm); pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), - FALSE); + 0, 0); rw_wunlock(&tte_list_global_lock); PMAP_UNLOCK(pm); } diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index 0c45e33..2e7b19d 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -97,6 +97,13 @@ struct thread; */ extern vm_offset_t kernel_vm_end; +/* + * Flags for pmap_enter(). The bits in the low-order byte are reserved + * for the protection code (vm_prot_t) that describes the fault type. + */ +#define PMAP_ENTER_NOSLEEP 0x0100 +#define PMAP_ENTER_WIRED 0x0200 + void pmap_activate(struct thread *td); void pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice); @@ -108,8 +115,8 @@ void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); void pmap_copy_page(vm_page_t, vm_page_t); void pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], vm_offset_t b_offset, int xfersize); -void pmap_enter(pmap_t, vm_offset_t, vm_prot_t, vm_page_t, - vm_prot_t, boolean_t); +int pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, + vm_prot_t prot, u_int flags, int8_t psind); void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot); void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 7ac8a86..70e1d73 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -907,7 +907,8 @@ vnode_locked: * back on the active queue until later so that the pageout daemon * won't find it (yet). */ - pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired); + pmap_enter(fs.map->pmap, vaddr, fs.m, prot, + fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0); if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0) vm_fault_prefault(fs.map->pmap, vaddr, fs.entry); VM_OBJECT_WLOCK(fs.object); @@ -1369,7 +1370,8 @@ again: * mapping is being replaced by a write-enabled * mapping, then wire that new mapping. */ - pmap_enter(dst_map->pmap, vaddr, access, dst_m, prot, upgrade); + pmap_enter(dst_map->pmap, vaddr, dst_m, prot, + access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); /* * Mark it no longer busy, and put it on the active list. diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index dd075c1..4dfb7f5 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -202,8 +202,8 @@ retry: if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); m->valid = VM_PAGE_BITS_ALL; - pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL, - TRUE); + pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, + VM_PROT_ALL | PMAP_ENTER_WIRED, 0); } VM_OBJECT_WUNLOCK(object); return (addr); @@ -255,7 +255,8 @@ retry: if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); m->valid = VM_PAGE_BITS_ALL; - pmap_enter(kernel_pmap, tmp, VM_PROT_ALL, m, VM_PROT_ALL, true); + pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL, + VM_PROT_ALL | PMAP_ENTER_WIRED, 0); tmp += PAGE_SIZE; } VM_OBJECT_WUNLOCK(object); @@ -378,8 +379,8 @@ retry: KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("kmem_malloc: page %p is managed", m)); m->valid = VM_PAGE_BITS_ALL; - pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL, - TRUE); + pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, + VM_PROT_ALL | PMAP_ENTER_WIRED, 0); } VM_OBJECT_WUNLOCK(object); |