summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2014-07-13 16:27:57 +0000
committeralc <alc@FreeBSD.org>2014-07-13 16:27:57 +0000
commit1c51afe84c8e7ee057d192f3ac03b0de8decf044 (patch)
treed57be68e67c187f2012e95d62973db4981de9c70 /sys/powerpc
parentc2307b7d217885f63b555772df5ea0c4e4ea2fe5 (diff)
downloadFreeBSD-src-1c51afe84c8e7ee057d192f3ac03b0de8decf044.zip
FreeBSD-src-1c51afe84c8e7ee057d192f3ac03b0de8decf044.tar.gz
Implement pmap_unwire(). See r268327 for the motivation behind this change.
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/mmu_oea.c20
-rw-r--r--sys/powerpc/aim/mmu_oea64.c37
-rw-r--r--sys/powerpc/booke/pmap.c32
-rw-r--r--sys/powerpc/powerpc/mmu_if.m16
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c8
5 files changed, 113 insertions, 0 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 86bafe3..b00b4dd 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -297,6 +297,7 @@ void moea_release(mmu_t, pmap_t);
void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
void moea_remove_all(mmu_t, vm_page_t);
void moea_remove_write(mmu_t, vm_page_t);
+void moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
void moea_zero_page(mmu_t, vm_page_t);
void moea_zero_page_area(mmu_t, vm_page_t, int, int);
void moea_zero_page_idle(mmu_t, vm_page_t);
@@ -345,6 +346,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_remove_all, moea_remove_all),
MMUMETHOD(mmu_remove_write, moea_remove_write),
MMUMETHOD(mmu_sync_icache, moea_sync_icache),
+ MMUMETHOD(mmu_unwire, moea_unwire),
MMUMETHOD(mmu_zero_page, moea_zero_page),
MMUMETHOD(mmu_zero_page_area, moea_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle),
@@ -1036,6 +1038,24 @@ moea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
}
void
+moea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
+{
+ struct pvo_entry key, *pvo;
+
+ PMAP_LOCK(pm);
+ key.pvo_vaddr = sva;
+ for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
+ pvo != NULL && PVO_VADDR(pvo) < eva;
+ pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+ if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
+ panic("moea_unwire: pvo %p is missing PVO_WIRED", pvo);
+ pvo->pvo_vaddr &= ~PVO_WIRED;
+ pm->pm_stats.wired_count--;
+ }
+ PMAP_UNLOCK(pm);
+}
+
+void
moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
{
vm_offset_t dst;
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index b47b94d..9509484 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -312,6 +312,7 @@ void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
void moea64_remove_pages(mmu_t, pmap_t);
void moea64_remove_all(mmu_t, vm_page_t);
void moea64_remove_write(mmu_t, vm_page_t);
+void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
void moea64_zero_page(mmu_t, vm_page_t);
void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
void moea64_zero_page_idle(mmu_t, vm_page_t);
@@ -359,6 +360,7 @@ static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_remove_all, moea64_remove_all),
MMUMETHOD(mmu_remove_write, moea64_remove_write),
MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
+ MMUMETHOD(mmu_unwire, moea64_unwire),
MMUMETHOD(mmu_zero_page, moea64_zero_page),
MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
@@ -1076,6 +1078,41 @@ moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
PMAP_UNLOCK(pm);
}
+void
+moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
+{
+ struct pvo_entry key, *pvo;
+ uintptr_t pt;
+
+ LOCK_TABLE_RD();
+ PMAP_LOCK(pm);
+ key.pvo_vaddr = sva;
+ for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
+ pvo != NULL && PVO_VADDR(pvo) < eva;
+ pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+ if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
+ panic("moea64_unwire: pvo %p is missing PVO_WIRED",
+ pvo);
+ pvo->pvo_vaddr &= ~PVO_WIRED;
+ if ((pvo->pvo_pte.lpte.pte_hi & LPTE_WIRED) == 0)
+ panic("moea64_unwire: pte %p is missing LPTE_WIRED",
+ &pvo->pvo_pte.lpte);
+ pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
+ if ((pt = MOEA64_PVO_TO_PTE(mmu, pvo)) != -1) {
+ /*
+ * The PTE's wired attribute is not a hardware
+ * feature, so there is no need to invalidate any TLB
+ * entries.
+ */
+ MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
+ pvo->pvo_vpn);
+ }
+ pm->pm_stats.wired_count--;
+ }
+ UNLOCK_TABLE_RD();
+ PMAP_UNLOCK(pm);
+}
+
/*
* This goes through and sets the physical address of our
* special scratch PTE to the PA we want to zero or copy. Because
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 57df896..d640d90 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -306,6 +306,7 @@ static void mmu_booke_release(mmu_t, pmap_t);
static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
static void mmu_booke_remove_all(mmu_t, vm_page_t);
static void mmu_booke_remove_write(mmu_t, vm_page_t);
+static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
static void mmu_booke_zero_page(mmu_t, vm_page_t);
static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
static void mmu_booke_zero_page_idle(mmu_t, vm_page_t);
@@ -361,6 +362,7 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
+ MMUMETHOD(mmu_unwire, mmu_booke_unwire),
MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle),
@@ -2435,6 +2437,36 @@ mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
}
/*
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range must
+ * have the wired attribute set. In contrast, invalid mappings cannot have
+ * the wired attribute set, so they are ignored.
+ *
+ * The wired attribute of the page table entry is not a hardware feature, so
+ * there is no need to invalidate any TLB entries.
+ */
+static void
+mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ vm_offset_t va;
+ pte_t *pte;
+
+ PMAP_LOCK(pmap);
+ for (va = sva; va < eva; va += PAGE_SIZE) {
+ if ((pte = pte_find(mmu, pmap, va)) != NULL &&
+ PTE_ISVALID(pte)) {
+ if (!PTE_ISWIRED(pte))
+ panic("mmu_booke_unwire: pte %p isn't wired",
+ pte);
+ pte->flags &= ~PTE_WIRED;
+ pmap->pm_stats.wired_count--;
+ }
+ }
+ PMAP_UNLOCK(pmap);
+
+}
+
+/*
* Return true if the pmap's pv is one of the first 16 pvs linked to from this
* page. This count may be changed upwards or downwards in the future; it is
* only necessary that true be returned for a small subset of pmaps for proper
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
index 67222a5..8ef69d1 100644
--- a/sys/powerpc/powerpc/mmu_if.m
+++ b/sys/powerpc/powerpc/mmu_if.m
@@ -628,6 +628,22 @@ METHOD void remove_pages {
/**
+ * @brief Clear the wired attribute from the mappings for the specified range
+ * of addresses in the given pmap.
+ *
+ * @param _pmap physical map
+ * @param _start virtual range start
+ * @param _end virtual range end
+ */
+METHOD void unwire {
+ mmu_t _mmu;
+ pmap_t _pmap;
+ vm_offset_t _start;
+ vm_offset_t _end;
+};
+
+
+/**
* @brief Zero a physical page. It is not assumed that the page is mapped,
* so a temporary (or direct) mapping may need to be used.
*
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index 773ede1..bc58b11 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -361,6 +361,14 @@ pmap_remove_write(vm_page_t m)
}
void
+pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
+{
+
+ CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
+ MMU_UNWIRE(mmu_obj, pmap, start, end);
+}
+
+void
pmap_zero_page(vm_page_t m)
{
OpenPOWER on IntegriCloud