summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/mmu_oea.c42
-rw-r--r--sys/powerpc/aim/mmu_oea64.c95
-rw-r--r--sys/powerpc/booke/pmap.c35
-rw-r--r--sys/powerpc/powerpc/mmu_if.m32
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c16
5 files changed, 124 insertions, 96 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index c7811ef..f2cdf7a 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -269,7 +269,6 @@ int moea_pte_spill(vm_offset_t);
/*
* Kernel MMU interface
*/
-void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
void moea_clear_modify(mmu_t, vm_page_t);
void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
@@ -298,6 +297,7 @@ void moea_release(mmu_t, pmap_t);
void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
void moea_remove_all(mmu_t, vm_page_t);
void moea_remove_write(mmu_t, vm_page_t);
+void moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
void moea_zero_page(mmu_t, vm_page_t);
void moea_zero_page_area(mmu_t, vm_page_t, int, int);
void moea_zero_page_idle(mmu_t, vm_page_t);
@@ -319,7 +319,6 @@ vm_offset_t moea_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
struct pmap_md * moea_scan_md(mmu_t mmu, struct pmap_md *prev);
static mmu_method_t moea_methods[] = {
- MMUMETHOD(mmu_change_wiring, moea_change_wiring),
MMUMETHOD(mmu_clear_modify, moea_clear_modify),
MMUMETHOD(mmu_copy_page, moea_copy_page),
MMUMETHOD(mmu_copy_pages, moea_copy_pages),
@@ -346,6 +345,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_remove_all, moea_remove_all),
MMUMETHOD(mmu_remove_write, moea_remove_write),
MMUMETHOD(mmu_sync_icache, moea_sync_icache),
+ MMUMETHOD(mmu_unwire, moea_unwire),
MMUMETHOD(mmu_zero_page, moea_zero_page),
MMUMETHOD(mmu_zero_page_area, moea_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle),
@@ -1015,23 +1015,19 @@ moea_deactivate(mmu_t mmu, struct thread *td)
}
void
-moea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
+moea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
{
- struct pvo_entry *pvo;
+ struct pvo_entry key, *pvo;
PMAP_LOCK(pm);
- pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
-
- if (pvo != NULL) {
- if (wired) {
- if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
- pm->pm_stats.wired_count++;
- pvo->pvo_vaddr |= PVO_WIRED;
- } else {
- if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
- pm->pm_stats.wired_count--;
- pvo->pvo_vaddr &= ~PVO_WIRED;
- }
+ key.pvo_vaddr = sva;
+ for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
+ pvo != NULL && PVO_VADDR(pvo) < eva;
+ pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+ if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
+ panic("moea_unwire: pvo %p is missing PVO_WIRED", pvo);
+ pvo->pvo_vaddr &= ~PVO_WIRED;
+ pm->pm_stats.wired_count--;
}
PMAP_UNLOCK(pm);
}
@@ -1941,7 +1937,21 @@ moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
if ((pvo->pvo_pte.pte.pte_lo & PTE_RPGN) == pa &&
(pvo->pvo_pte.pte.pte_lo & PTE_PP) ==
(pte_lo & PTE_PP)) {
+ /*
+ * The PTE is not changing. Instead, this may
+ * be a request to change the mapping's wired
+ * attribute.
+ */
mtx_unlock(&moea_table_mutex);
+ if ((flags & PVO_WIRED) != 0 &&
+ (pvo->pvo_vaddr & PVO_WIRED) == 0) {
+ pvo->pvo_vaddr |= PVO_WIRED;
+ pm->pm_stats.wired_count++;
+ } else if ((flags & PVO_WIRED) == 0 &&
+ (pvo->pvo_vaddr & PVO_WIRED) != 0) {
+ pvo->pvo_vaddr &= ~PVO_WIRED;
+ pm->pm_stats.wired_count--;
+ }
return (0);
}
moea_pvo_remove(pvo, -1);
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index ceca204..ef66064 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -283,7 +283,6 @@ static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
/*
* Kernel MMU interface
*/
-void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
void moea64_clear_modify(mmu_t, vm_page_t);
void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
@@ -313,6 +312,7 @@ void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
void moea64_remove_pages(mmu_t, pmap_t);
void moea64_remove_all(mmu_t, vm_page_t);
void moea64_remove_write(mmu_t, vm_page_t);
+void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
void moea64_zero_page(mmu_t, vm_page_t);
void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
void moea64_zero_page_idle(mmu_t, vm_page_t);
@@ -332,7 +332,6 @@ vm_offset_t moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
struct pmap_md * moea64_scan_md(mmu_t mmu, struct pmap_md *prev);
static mmu_method_t moea64_methods[] = {
- MMUMETHOD(mmu_change_wiring, moea64_change_wiring),
MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
MMUMETHOD(mmu_copy_page, moea64_copy_page),
MMUMETHOD(mmu_copy_pages, moea64_copy_pages),
@@ -360,6 +359,7 @@ static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_remove_all, moea64_remove_all),
MMUMETHOD(mmu_remove_write, moea64_remove_write),
MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
+ MMUMETHOD(mmu_unwire, moea64_unwire),
MMUMETHOD(mmu_zero_page, moea64_zero_page),
MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
@@ -1025,55 +1025,38 @@ moea64_deactivate(mmu_t mmu, struct thread *td)
}
void
-moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
+moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
{
- struct pvo_entry *pvo;
+ struct pvo_entry key, *pvo;
uintptr_t pt;
- uint64_t vsid;
- int i, ptegidx;
- LOCK_TABLE_WR();
+ LOCK_TABLE_RD();
PMAP_LOCK(pm);
- pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
-
- if (pvo != NULL) {
+ key.pvo_vaddr = sva;
+ for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
+ pvo != NULL && PVO_VADDR(pvo) < eva;
+ pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
pt = MOEA64_PVO_TO_PTE(mmu, pvo);
-
- if (wired) {
- if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
- pm->pm_stats.wired_count++;
- pvo->pvo_vaddr |= PVO_WIRED;
- pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
- } else {
- if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
- pm->pm_stats.wired_count--;
- pvo->pvo_vaddr &= ~PVO_WIRED;
- pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
- }
-
+ if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
+ panic("moea64_unwire: pvo %p is missing PVO_WIRED",
+ pvo);
+ pvo->pvo_vaddr &= ~PVO_WIRED;
+ if ((pvo->pvo_pte.lpte.pte_hi & LPTE_WIRED) == 0)
+ panic("moea64_unwire: pte %p is missing LPTE_WIRED",
+ &pvo->pvo_pte.lpte);
+ pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
if (pt != -1) {
- /* Update wiring flag in page table. */
- MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
- pvo->pvo_vpn);
- } else if (wired) {
/*
- * If we are wiring the page, and it wasn't in the
- * page table before, add it.
+ * The PTE's wired attribute is not a hardware
+ * feature, so there is no need to invalidate any TLB
+ * entries.
*/
- vsid = PVO_VSID(pvo);
- ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
- pvo->pvo_vaddr & PVO_LARGE);
-
- i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
-
- if (i >= 0) {
- PVO_PTEGIDX_CLR(pvo);
- PVO_PTEGIDX_SET(pvo, i);
- }
+ MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
+ pvo->pvo_vpn);
}
-
+ pm->pm_stats.wired_count--;
}
- UNLOCK_TABLE_WR();
+ UNLOCK_TABLE_RD();
PMAP_UNLOCK(pm);
}
@@ -2207,6 +2190,7 @@ moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
uint64_t pte_lo, int flags, int8_t psind __unused)
{
struct pvo_entry *pvo;
+ uintptr_t pt;
uint64_t vsid;
int first;
u_int ptegidx;
@@ -2249,13 +2233,42 @@ moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
(pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP))
== (pte_lo & (LPTE_NOEXEC | LPTE_PP))) {
+ /*
+ * The physical page and protection are not
+ * changing. Instead, this may be a request
+ * to change the mapping's wired attribute.
+ */
+ pt = -1;
+ if ((flags & PVO_WIRED) != 0 &&
+ (pvo->pvo_vaddr & PVO_WIRED) == 0) {
+ pt = MOEA64_PVO_TO_PTE(mmu, pvo);
+ pvo->pvo_vaddr |= PVO_WIRED;
+ pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
+ pm->pm_stats.wired_count++;
+ } else if ((flags & PVO_WIRED) == 0 &&
+ (pvo->pvo_vaddr & PVO_WIRED) != 0) {
+ pt = MOEA64_PVO_TO_PTE(mmu, pvo);
+ pvo->pvo_vaddr &= ~PVO_WIRED;
+ pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
+ pm->pm_stats.wired_count--;
+ }
if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) {
+ KASSERT(pt == -1,
+ ("moea64_pvo_enter: valid pt"));
/* Re-insert if spilled */
i = MOEA64_PTE_INSERT(mmu, ptegidx,
&pvo->pvo_pte.lpte);
if (i >= 0)
PVO_PTEGIDX_SET(pvo, i);
moea64_pte_overflow--;
+ } else if (pt != -1) {
+ /*
+ * The PTE's wired attribute is not a
+ * hardware feature, so there is no
+ * need to invalidate any TLB entries.
+ */
+ MOEA64_PTE_CHANGE(mmu, pt,
+ &pvo->pvo_pte.lpte, pvo->pvo_vpn);
}
return (0);
}
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 0862b99..a65eff6 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -266,7 +266,6 @@ void pmap_bootstrap_ap(volatile uint32_t *);
/*
* Kernel MMU interface
*/
-static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
static void mmu_booke_clear_modify(mmu_t, vm_page_t);
static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
vm_size_t, vm_offset_t);
@@ -306,6 +305,7 @@ static void mmu_booke_release(mmu_t, pmap_t);
static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
static void mmu_booke_remove_all(mmu_t, vm_page_t);
static void mmu_booke_remove_write(mmu_t, vm_page_t);
+static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
static void mmu_booke_zero_page(mmu_t, vm_page_t);
static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
static void mmu_booke_zero_page_idle(mmu_t, vm_page_t);
@@ -330,7 +330,6 @@ static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *);
static mmu_method_t mmu_booke_methods[] = {
/* pmap dispatcher interface */
- MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring),
MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
MMUMETHOD(mmu_copy, mmu_booke_copy),
MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
@@ -361,6 +360,7 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
+ MMUMETHOD(mmu_unwire, mmu_booke_unwire),
MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle),
@@ -2432,28 +2432,33 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
}
/*
- * Change wiring attribute for a map/virtual-address pair.
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range must
+ * have the wired attribute set. In contrast, invalid mappings cannot have
+ * the wired attribute set, so they are ignored.
+ *
+ * The wired attribute of the page table entry is not a hardware feature, so
+ * there is no need to invalidate any TLB entries.
*/
static void
-mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
+mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
+ vm_offset_t va;
pte_t *pte;
PMAP_LOCK(pmap);
- if ((pte = pte_find(mmu, pmap, va)) != NULL) {
- if (wired) {
- if (!PTE_ISWIRED(pte)) {
- pte->flags |= PTE_WIRED;
- pmap->pm_stats.wired_count++;
- }
- } else {
- if (PTE_ISWIRED(pte)) {
- pte->flags &= ~PTE_WIRED;
- pmap->pm_stats.wired_count--;
- }
+ for (va = sva; va < eva; va += PAGE_SIZE) {
+ if ((pte = pte_find(mmu, pmap, va)) != NULL &&
+ PTE_ISVALID(pte)) {
+ if (!PTE_ISWIRED(pte))
+ panic("mmu_booke_unwire: pte %p isn't wired",
+ pte);
+ pte->flags &= ~PTE_WIRED;
+ pmap->pm_stats.wired_count--;
}
}
PMAP_UNLOCK(pmap);
+
}
/*
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
index 65a4046..5c44b71 100644
--- a/sys/powerpc/powerpc/mmu_if.m
+++ b/sys/powerpc/powerpc/mmu_if.m
@@ -152,22 +152,6 @@ METHOD void advise {
/**
- * @brief Change the wiring attribute for the page in the given physical
- * map and virtual address.
- *
- * @param _pmap physical map of page
- * @param _va page virtual address
- * @param _wired TRUE to increment wired count, FALSE to decrement
- */
-METHOD void change_wiring {
- mmu_t _mmu;
- pmap_t _pmap;
- vm_offset_t _va;
- boolean_t _wired;
-};
-
-
-/**
* @brief Clear the 'modified' bit on the given physical page
*
* @param _pg physical page
@@ -630,6 +614,22 @@ METHOD void remove_pages {
/**
+ * @brief Clear the wired attribute from the mappings for the specified range
+ * of addresses in the given pmap.
+ *
+ * @param _pmap physical map
+ * @param _start virtual range start
+ * @param _end virtual range end
+ */
+METHOD void unwire {
+ mmu_t _mmu;
+ pmap_t _pmap;
+ vm_offset_t _start;
+ vm_offset_t _end;
+};
+
+
+/**
* @brief Zero a physical page. It is not assumed that the page is mapped,
* so a temporary (or direct) mapping may need to be used.
*
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index 1957692..7f3f913 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -100,14 +100,6 @@ pmap_advise(pmap_t pmap, vm_offset_t start, vm_offset_t end, int advice)
}
void
-pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
-{
-
- CTR4(KTR_PMAP, "%s(%p, %#x, %u)", __func__, pmap, va, wired);
- MMU_CHANGE_WIRING(mmu_obj, pmap, va, wired);
-}
-
-void
pmap_clear_modify(vm_page_t m)
{
@@ -361,6 +353,14 @@ pmap_remove_write(vm_page_t m)
}
void
+pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
+{
+
+ CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
+ MMU_UNWIRE(mmu_obj, pmap, start, end);
+}
+
+void
pmap_zero_page(vm_page_t m)
{
OpenPOWER on IntegriCloud