summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2014-08-24 07:53:15 +0000
committerkib <kib@FreeBSD.org>2014-08-24 07:53:15 +0000
commit25782a7fab8e1a1c60517dab118fec0a98648dd6 (patch)
tree007faf87a4ebb0e25b4a426bc07ab2babbead757 /sys/powerpc
parentbee605bad28d58f69b83b3197efb0bd49b38de99 (diff)
downloadFreeBSD-src-25782a7fab8e1a1c60517dab118fec0a98648dd6.zip
FreeBSD-src-25782a7fab8e1a1c60517dab118fec0a98648dd6.tar.gz
Merge the changes to pmap_enter(9) for sleep-less operation (requested
by flag). The ia64 pmap.c changes are direct commit, since ia64 is removed on head. MFC r269368 (by alc): Retire PVO_EXECUTABLE. MFC r269728: Change pmap_enter(9) interface to take flags parameter and superpage mapping size (currently unused). MFC r269759 (by alc): Update the text of a KASSERT() to reflect the changes in r269728. MFC r269822 (by alc): Change {_,}pmap_allocpte() so that they look for the flag PMAP_ENTER_NOSLEEP instead of M_NOWAIT/M_WAITOK when deciding whether to sleep on page table page allocation. MFC r270151 (by alc): Replace KASSERT that no PV list locks are held with a conditional unlock. Reviewed by: alc Approved by: re (gjb) Sponsored by: The FreeBSD Foundation
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/mmu_oea.c49
-rw-r--r--sys/powerpc/aim/mmu_oea64.c47
-rw-r--r--sys/powerpc/booke/pmap.c78
-rw-r--r--sys/powerpc/include/pmap.h1
-rw-r--r--sys/powerpc/powerpc/mmu_if.m8
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c12
6 files changed, 117 insertions, 78 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index e9b3a44..c7811ef 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -258,8 +258,8 @@ static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int);
/*
* Utility routines.
*/
-static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
- vm_prot_t, boolean_t);
+static int moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
+ vm_prot_t, u_int, int8_t);
static void moea_syncicache(vm_offset_t, vm_size_t);
static boolean_t moea_query_bit(vm_page_t, int);
static u_int moea_clear_bit(vm_page_t, int);
@@ -274,7 +274,8 @@ void moea_clear_modify(mmu_t, vm_page_t);
void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
-void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
+int moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int,
+ int8_t);
void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
vm_prot_t);
void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
@@ -1104,16 +1105,25 @@ moea_zero_page_idle(mmu_t mmu, vm_page_t m)
* target pmap with the protection requested. If specified the page
* will be wired down.
*/
-void
+int
moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- boolean_t wired)
+ u_int flags, int8_t psind)
{
+ int error;
- rw_wlock(&pvh_global_lock);
- PMAP_LOCK(pmap);
- moea_enter_locked(pmap, va, m, prot, wired);
- rw_wunlock(&pvh_global_lock);
- PMAP_UNLOCK(pmap);
+ for (;;) {
+ rw_wlock(&pvh_global_lock);
+ PMAP_LOCK(pmap);
+ error = moea_enter_locked(pmap, va, m, prot, flags, psind);
+ rw_wunlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+ if (error != ENOMEM)
+ return (KERN_SUCCESS);
+ if ((flags & PMAP_ENTER_NOSLEEP) != 0)
+ return (KERN_RESOURCE_SHORTAGE);
+ VM_OBJECT_ASSERT_UNLOCKED(m->object);
+ VM_WAIT;
+ }
}
/*
@@ -1123,9 +1133,9 @@ moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*
* The page queues and pmap must be locked.
*/
-static void
+static int
moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- boolean_t wired)
+ u_int flags, int8_t psind __unused)
{
struct pvo_head *pvo_head;
uma_zone_t zone;
@@ -1167,10 +1177,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
} else
pte_lo |= PTE_BR;
- if (prot & VM_PROT_EXECUTE)
- pvo_flags |= PVO_EXECUTABLE;
-
- if (wired)
+ if ((flags & PMAP_ENTER_WIRED) != 0)
pvo_flags |= PVO_WIRED;
error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
@@ -1185,6 +1192,8 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (pmap != kernel_pmap && error == ENOENT &&
(pte_lo & (PTE_I | PTE_G)) == 0)
moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
+
+ return (error);
}
/*
@@ -1214,7 +1223,7 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
PMAP_LOCK(pm);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
moea_enter_locked(pm, start + ptoa(diff), m, prot &
- (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+ (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0);
m = TAILQ_NEXT(m, listq);
}
rw_wunlock(&pvh_global_lock);
@@ -1229,7 +1238,7 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pm);
moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
- FALSE);
+ 0, 0);
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pm);
}
@@ -1725,8 +1734,6 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
- if ((prot & VM_PROT_EXECUTE) == 0)
- pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
/*
* Grab the PTE pointer before we diddle with the cached PTE
@@ -1968,8 +1975,6 @@ moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
pvo->pvo_pmap = pm;
LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink);
pvo->pvo_vaddr &= ~ADDR_POFF;
- if (flags & VM_PROT_EXECUTE)
- pvo->pvo_vaddr |= PVO_EXECUTABLE;
if (flags & PVO_WIRED)
pvo->pvo_vaddr |= PVO_WIRED;
if (pvo_head != &moea_pvo_kunmanaged)
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index b47b94d..ceca204 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -267,7 +267,7 @@ int moea64_large_page_shift = 0;
* PVO calls.
*/
static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *,
- vm_offset_t, vm_offset_t, uint64_t, int);
+ vm_offset_t, vm_offset_t, uint64_t, int, int8_t);
static void moea64_pvo_remove(mmu_t, struct pvo_entry *);
static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
@@ -288,7 +288,8 @@ void moea64_clear_modify(mmu_t, vm_page_t);
void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
-void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
+int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
+ u_int flags, int8_t psind);
void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
vm_prot_t);
void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
@@ -627,7 +628,7 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone,
NULL, pa, pa, pte_lo,
- PVO_WIRED | PVO_LARGE);
+ PVO_WIRED | PVO_LARGE, 0);
}
}
PMAP_UNLOCK(kernel_pmap);
@@ -1245,9 +1246,9 @@ moea64_zero_page_idle(mmu_t mmu, vm_page_t m)
* will be wired down.
*/
-void
+int
moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
- vm_prot_t prot, boolean_t wired)
+ vm_prot_t prot, u_int flags, int8_t psind)
{
struct pvo_head *pvo_head;
uma_zone_t zone;
@@ -1291,15 +1292,23 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
if ((prot & VM_PROT_EXECUTE) == 0)
pte_lo |= LPTE_NOEXEC;
- if (wired)
+ if ((flags & PMAP_ENTER_WIRED) != 0)
pvo_flags |= PVO_WIRED;
- LOCK_TABLE_WR();
- PMAP_LOCK(pmap);
- error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
- VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags);
- PMAP_UNLOCK(pmap);
- UNLOCK_TABLE_WR();
+ for (;;) {
+ LOCK_TABLE_WR();
+ PMAP_LOCK(pmap);
+ error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
+ VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags, psind);
+ PMAP_UNLOCK(pmap);
+ UNLOCK_TABLE_WR();
+ if (error != ENOMEM)
+ break;
+ if ((flags & PMAP_ENTER_NOSLEEP) != 0)
+ return (KERN_RESOURCE_SHORTAGE);
+ VM_OBJECT_ASSERT_UNLOCKED(m->object);
+ VM_WAIT;
+ }
/*
* Flush the page from the instruction cache if this page is
@@ -1310,6 +1319,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_page_aflag_set(m, PGA_EXECUTABLE);
moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
}
+ return (KERN_SUCCESS);
}
static void
@@ -1374,7 +1384,7 @@ moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
m = m_start;
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
- (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+ (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0);
m = TAILQ_NEXT(m, listq);
}
}
@@ -1384,8 +1394,8 @@ moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
vm_prot_t prot)
{
- moea64_enter(mmu, pm, va, m,
- prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+ moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
+ PMAP_ENTER_NOSLEEP, 0);
}
vm_paddr_t
@@ -1473,7 +1483,8 @@ moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
PMAP_LOCK(kernel_pmap);
moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone,
- NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP);
+ NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP,
+ 0);
if (needed_lock)
PMAP_UNLOCK(kernel_pmap);
@@ -1695,7 +1706,7 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
LOCK_TABLE_WR();
PMAP_LOCK(kernel_pmap);
error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone,
- NULL, va, pa, pte_lo, PVO_WIRED);
+ NULL, va, pa, pte_lo, PVO_WIRED, 0);
PMAP_UNLOCK(kernel_pmap);
UNLOCK_TABLE_WR();
@@ -2193,7 +2204,7 @@ moea64_bootstrap_alloc(vm_size_t size, u_int align)
static int
moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa,
- uint64_t pte_lo, int flags)
+ uint64_t pte_lo, int flags, int8_t psind __unused)
{
struct pvo_entry *pvo;
uint64_t vsid;
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 2f60b72..0862b99 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -146,8 +146,8 @@ static struct mtx copy_page_mutex;
/* PMAP */
/**************************************************************************/
-static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
- vm_prot_t, boolean_t);
+static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
+ vm_prot_t, u_int flags, int8_t psind);
unsigned int kptbl_min; /* Index of the first kernel ptbl. */
unsigned int kernel_ptbls; /* Number of KVA ptbls. */
@@ -228,14 +228,14 @@ static struct ptbl_buf *ptbl_buf_alloc(void);
static void ptbl_buf_free(struct ptbl_buf *);
static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
-static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int);
+static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
static void ptbl_free(mmu_t, pmap_t, unsigned int);
static void ptbl_hold(mmu_t, pmap_t, unsigned int);
static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
-static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t);
+static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
static pv_entry_t pv_alloc(void);
@@ -273,8 +273,8 @@ static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
vm_offset_t, vm_page_t *, vm_offset_t, int);
-static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
- vm_prot_t, boolean_t);
+static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
+ vm_prot_t, u_int flags, int8_t psind);
static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
vm_page_t, vm_prot_t);
static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
@@ -558,14 +558,14 @@ ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
/* Allocate page table. */
static pte_t *
-ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
+ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
{
vm_page_t mtbl[PTBL_PAGES];
vm_page_t m;
struct ptbl_buf *pbuf;
unsigned int pidx;
pte_t *ptbl;
- int i;
+ int i, j;
CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
(pmap == kernel_pmap), pdir_idx);
@@ -588,9 +588,15 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
pidx = (PTBL_PAGES * pdir_idx) + i;
while ((m = vm_page_alloc(NULL, pidx,
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
-
PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);
+ if (nosleep) {
+ ptbl_free_pmap_ptbl(pmap, ptbl);
+ for (j = 0; j < i; j++)
+ vm_page_free(mtbl[j]);
+ atomic_subtract_int(&cnt.v_wire_count, i);
+ return (NULL);
+ }
VM_WAIT;
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
@@ -885,8 +891,9 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
/*
* Insert PTE for a given page and virtual address.
*/
-static void
-pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
+static int
+pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
+ boolean_t nosleep)
{
unsigned int pdir_idx = PDIR_IDX(va);
unsigned int ptbl_idx = PTBL_IDX(va);
@@ -900,7 +907,11 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
if (ptbl == NULL) {
/* Allocate page table pages. */
- ptbl = ptbl_alloc(mmu, pmap, pdir_idx);
+ ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
+ if (ptbl == NULL) {
+ KASSERT(nosleep, ("nosleep and NULL ptbl"));
+ return (ENOMEM);
+ }
} else {
/*
* Check if there is valid mapping for requested
@@ -949,6 +960,7 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
tlb_miss_unlock();
mtx_unlock_spin(&tlbivax_mutex);
+ return (0);
}
/* Return the pa for the given pmap/va. */
@@ -1576,35 +1588,37 @@ mmu_booke_release(mmu_t mmu, pmap_t pmap)
* target physical map with the protection requested. If specified the page
* will be wired down.
*/
-static void
+static int
mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
- vm_prot_t prot, boolean_t wired)
+ vm_prot_t prot, u_int flags, int8_t psind)
{
+ int error;
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
- mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
+ error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
+ return (error);
}
-static void
+static int
mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
- vm_prot_t prot, boolean_t wired)
+ vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
{
pte_t *pte;
vm_paddr_t pa;
uint32_t flags;
- int su, sync;
+ int error, su, sync;
pa = VM_PAGE_TO_PHYS(m);
su = (pmap == kernel_pmap);
sync = 0;
//debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
- // "pa=0x%08x prot=0x%08x wired=%d)\n",
+ // "pa=0x%08x prot=0x%08x flags=%#x)\n",
// (u_int32_t)pmap, su, pmap->pm_tid,
- // (u_int32_t)m, va, pa, prot, wired);
+ // (u_int32_t)m, va, pa, prot, flags);
if (su) {
KASSERT(((va >= virtual_avail) &&
@@ -1634,7 +1648,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
/* Wiring change, just update stats. */
- if (wired) {
+ if ((pmap_flags & PMAP_ENTER_WIRED) != 0) {
if (!PTE_ISWIRED(pte)) {
flags |= PTE_WIRED;
pmap->pm_stats.wired_count++;
@@ -1730,12 +1744,16 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
}
/* If its wired update stats. */
- if (wired) {
- pmap->pm_stats.wired_count++;
+ if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
flags |= PTE_WIRED;
- }
- pte_enter(mmu, pmap, m, va, flags);
+ error = pte_enter(mmu, pmap, m, va, flags,
+ (pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
+ if (error != 0)
+ return (KERN_RESOURCE_SHORTAGE);
+
+ if ((flags & PMAP_ENTER_WIRED) != 0)
+ pmap->pm_stats.wired_count++;
/* Flush the real memory from the instruction cache. */
if (prot & VM_PROT_EXECUTE)
@@ -1746,6 +1764,8 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
__syncicache((void *)va, PAGE_SIZE);
sync = 0;
}
+
+ return (KERN_SUCCESS);
}
/*
@@ -1775,7 +1795,8 @@ mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
PMAP_LOCK(pmap);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
- prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+ prot & (VM_PROT_READ | VM_PROT_EXECUTE),
+ PMAP_ENTER_NOSLEEP, 0);
m = TAILQ_NEXT(m, listq);
}
rw_wunlock(&pvh_global_lock);
@@ -1790,7 +1811,8 @@ mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
mmu_booke_enter_locked(mmu, pmap, va, m,
- prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+ prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP,
+ 0);
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
}
@@ -2074,7 +2096,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
m = PHYS_TO_VM_PAGE(pa);
PMAP_LOCK(pmap);
pte_enter(mmu, pmap, m, addr,
- PTE_SR | PTE_VALID | PTE_UR);
+ PTE_SR | PTE_VALID | PTE_UR, FALSE);
__syncicache((void *)addr, PAGE_SIZE);
pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
PMAP_UNLOCK(pmap);
diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h
index 2c9c786..663cd1a 100644
--- a/sys/powerpc/include/pmap.h
+++ b/sys/powerpc/include/pmap.h
@@ -112,7 +112,6 @@ RB_PROTOTYPE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
#define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */
#define PVO_WIRED 0x010UL /* PVO entry is wired */
#define PVO_MANAGED 0x020UL /* PVO entry is managed */
-#define PVO_EXECUTABLE 0x040UL /* PVO entry is executable */
#define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during
bootstrap */
#define PVO_LARGE 0x200UL /* large page */
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
index 67222a5..65a4046 100644
--- a/sys/powerpc/powerpc/mmu_if.m
+++ b/sys/powerpc/powerpc/mmu_if.m
@@ -240,15 +240,17 @@ METHOD void copy_pages {
* @param _va mapping virtual address
* @param _p mapping physical page
* @param _prot mapping page protection
- * @param _wired TRUE if page will be wired
+ * @param _flags pmap_enter flags
+ * @param _psind superpage size index
*/
-METHOD void enter {
+METHOD int enter {
mmu_t _mmu;
pmap_t _pmap;
vm_offset_t _va;
vm_page_t _p;
vm_prot_t _prot;
- boolean_t _wired;
+ u_int _flags;
+ int8_t _psind;
};
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index 773ede1..1957692 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -143,14 +143,14 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
}
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t p,
- vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot,
+ u_int flags, int8_t psind)
{
- CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %#x, %p, %#x, %u)", pmap, va,
- access, p, prot, wired);
- MMU_ENTER(mmu_obj, pmap, va, p, prot, wired);
+ CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %x, %d)", pmap, va,
+ p, prot, flags, psind);
+ return (MMU_ENTER(mmu_obj, pmap, va, p, prot, flags, psind));
}
void
OpenPOWER on IntegriCloud