summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authormarcel <marcel@FreeBSD.org>2009-10-21 18:38:02 +0000
committermarcel <marcel@FreeBSD.org>2009-10-21 18:38:02 +0000
commit51bb720939567fa381c6a03839b51f9c80bc67ef (patch)
tree9665f89431ede73407ae0ad11ebcc8198166085e /sys/powerpc
parent5598d561ce3ef08a3a8c5d518ca09e28883ff050 (diff)
downloadFreeBSD-src-51bb720939567fa381c6a03839b51f9c80bc67ef.zip
FreeBSD-src-51bb720939567fa381c6a03839b51f9c80bc67ef.tar.gz
o Introduce vm_sync_icache() for making the I-cache coherent with
the memory or D-cache, depending on the semantics of the platform. vm_sync_icache() is basically a wrapper around pmap_sync_icache(), that translates the vm_map_t argumument to pmap_t. o Introduce pmap_sync_icache() to all PMAP implementation. For powerpc it replaces the pmap_page_executable() function, added to solve the I-cache problem in uiomove_fromphys(). o In proc_rwmem() call vm_sync_icache() when writing to a page that has execute permissions. This assures that when breakpoints are written, the I-cache will be coherent and the process will actually hit the breakpoint. o This also fixes the Book-E PMAP implementation that was missing necessary locking while trying to deal with the I-cache coherency in pmap_enter() (read: mmu_booke_enter_locked). The key property of this change is that the I-cache is made coherent *after* writes have been done. Doing it in the PMAP layer when adding or changing a mapping means that the I-cache is made coherent *before* any writes happen. The difference is key when the I-cache prefetches.
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/mmu_oea.c34
-rw-r--r--sys/powerpc/aim/mmu_oea64.c50
-rw-r--r--sys/powerpc/booke/pmap.c72
-rw-r--r--sys/powerpc/include/pmap.h1
-rw-r--r--sys/powerpc/powerpc/mmu_if.m18
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c10
-rw-r--r--sys/powerpc/powerpc/uio_machdep.c3
7 files changed, 117 insertions, 71 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index bbf2e04..1ca230c 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -330,7 +330,7 @@ void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
vm_offset_t moea_kextract(mmu_t, vm_offset_t);
void moea_kenter(mmu_t, vm_offset_t, vm_offset_t);
boolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
-boolean_t moea_page_executable(mmu_t, vm_page_t);
+static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_change_wiring, moea_change_wiring),
@@ -357,6 +357,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_remove, moea_remove),
MMUMETHOD(mmu_remove_all, moea_remove_all),
MMUMETHOD(mmu_remove_write, moea_remove_write),
+ MMUMETHOD(mmu_sync_icache, moea_sync_icache),
MMUMETHOD(mmu_zero_page, moea_zero_page),
MMUMETHOD(mmu_zero_page_area, moea_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle),
@@ -371,7 +372,6 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_kextract, moea_kextract),
MMUMETHOD(mmu_kenter, moea_kenter),
MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
- MMUMETHOD(mmu_page_executable, moea_page_executable),
{ 0, 0 }
};
@@ -2359,12 +2359,6 @@ moea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
return (EFAULT);
}
-boolean_t
-moea_page_executable(mmu_t mmu, vm_page_t pg)
-{
- return ((moea_attr_fetch(pg) & PTE_EXEC) == PTE_EXEC);
-}
-
/*
* Map a set of physical memory pages into the kernel virtual
* address space. Return a pointer to where it is mapped. This
@@ -2424,3 +2418,27 @@ moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
kmem_free(kernel_map, base, size);
}
}
+
+static void
+moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+ struct pvo_entry *pvo;
+ vm_offset_t lim;
+ vm_paddr_t pa;
+ vm_size_t len;
+
+ PMAP_LOCK(pm);
+ while (sz > 0) {
+ lim = round_page(va);
+ len = MIN(lim - va, sz);
+ pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
+ if (pvo != NULL) {
+ pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) |
+ (va & ADDR_POFF);
+ moea_syncicache(pa, len);
+ }
+ va += len;
+ sz -= len;
+ }
+ PMAP_UNLOCK(pm);
+}
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 4dad3dc..67e457c 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -369,7 +369,7 @@ static boolean_t moea64_query_bit(vm_page_t, u_int64_t);
static u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *);
static void moea64_kremove(mmu_t, vm_offset_t);
static void moea64_syncicache(pmap_t pmap, vm_offset_t va,
- vm_offset_t pa);
+ vm_offset_t pa, vm_size_t sz);
static void tlbia(void);
/*
@@ -410,7 +410,7 @@ void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
vm_offset_t moea64_kextract(mmu_t, vm_offset_t);
void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t);
boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
-boolean_t moea64_page_executable(mmu_t, vm_page_t);
+static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
static mmu_method_t moea64_bridge_methods[] = {
MMUMETHOD(mmu_change_wiring, moea64_change_wiring),
@@ -437,6 +437,7 @@ static mmu_method_t moea64_bridge_methods[] = {
MMUMETHOD(mmu_remove, moea64_remove),
MMUMETHOD(mmu_remove_all, moea64_remove_all),
MMUMETHOD(mmu_remove_write, moea64_remove_write),
+ MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
MMUMETHOD(mmu_zero_page, moea64_zero_page),
MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
@@ -451,7 +452,6 @@ static mmu_method_t moea64_bridge_methods[] = {
MMUMETHOD(mmu_kextract, moea64_kextract),
MMUMETHOD(mmu_kenter, moea64_kenter),
MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
- MMUMETHOD(mmu_page_executable, moea64_page_executable),
{ 0, 0 }
};
@@ -1264,12 +1264,12 @@ moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* mapped executable and cacheable.
*/
if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
- moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m));
+ moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
}
}
static void
-moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa)
+moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz)
{
/*
* This is much trickier than on older systems because
@@ -1285,16 +1285,16 @@ moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa)
* If PMAP is not bootstrapped, we are likely to be
* in real mode.
*/
- __syncicache((void *)pa,PAGE_SIZE);
+ __syncicache((void *)pa, sz);
} else if (pmap == kernel_pmap) {
- __syncicache((void *)va,PAGE_SIZE);
+ __syncicache((void *)va, sz);
} else {
/* Use the scratch page to set up a temp mapping */
mtx_lock(&moea64_scratchpage_mtx);
moea64_set_scratchpage_pa(1,pa);
- __syncicache((void *)moea64_scratchpage_va[1],PAGE_SIZE);
+ __syncicache((void *)moea64_scratchpage_va[1], sz);
mtx_unlock(&moea64_scratchpage_mtx);
}
@@ -1817,8 +1817,9 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
pvo->pvo_pmap, pvo->pvo_vaddr);
if ((pvo->pvo_pte.lpte.pte_lo &
(LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
- moea64_syncicache(pm, sva,
- pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
+ moea64_syncicache(pm, sva,
+ pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
+ PAGE_SIZE);
}
}
UNLOCK_TABLE();
@@ -2406,12 +2407,6 @@ moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
return (EFAULT);
}
-boolean_t
-moea64_page_executable(mmu_t mmu, vm_page_t pg)
-{
- return (!moea64_query_bit(pg, LPTE_NOEXEC));
-}
-
/*
* Map a set of physical memory pages into the kernel virtual
* address space. Return a pointer to where it is mapped. This
@@ -2454,3 +2449,26 @@ moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
kmem_free(kernel_map, base, size);
}
+static void
+moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+ struct pvo_entry *pvo;
+ vm_offset_t lim;
+ vm_paddr_t pa;
+ vm_size_t len;
+
+ PMAP_LOCK(pm);
+ while (sz > 0) {
+ lim = round_page(va);
+ len = MIN(lim - va, sz);
+ pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
+ if (pvo != NULL) {
+ pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) |
+ (va & ADDR_POFF);
+ moea64_syncicache(pm, va, pa, len);
+ }
+ va += len;
+ sz -= len;
+ }
+ PMAP_UNLOCK(pm);
+}
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 78a7250..26302a5 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -319,7 +319,8 @@ static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t);
static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
static void mmu_booke_kremove(mmu_t, vm_offset_t);
static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
-static boolean_t mmu_booke_page_executable(mmu_t, vm_page_t);
+static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
+ vm_size_t);
static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *,
vm_size_t, vm_size_t *);
static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *,
@@ -357,6 +358,7 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_remove, mmu_booke_remove),
MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
+ MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle),
@@ -370,7 +372,6 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_kenter, mmu_booke_kenter),
MMUMETHOD(mmu_kextract, mmu_booke_kextract),
/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */
- MMUMETHOD(mmu_page_executable, mmu_booke_page_executable),
MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
/* dumpsys() support */
@@ -1682,21 +1683,6 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
__syncicache((void *)va, PAGE_SIZE);
sync = 0;
}
-
- if (sync) {
- /* Create a temporary mapping. */
- pmap = PCPU_GET(curpmap);
-
- va = 0;
- pte = pte_find(mmu, pmap, va);
- KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__));
-
- flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M;
-
- pte_enter(mmu, pmap, m, va, flags);
- __syncicache((void *)va, PAGE_SIZE);
- pte_remove(mmu, pmap, va, PTBL_UNHOLD);
- }
}
/*
@@ -1991,25 +1977,47 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
vm_page_flag_clear(m, PG_WRITEABLE);
}
-static boolean_t
-mmu_booke_page_executable(mmu_t mmu, vm_page_t m)
+static void
+mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
{
- pv_entry_t pv;
pte_t *pte;
- boolean_t executable;
+ pmap_t pmap;
+ vm_page_t m;
+ vm_offset_t addr;
+ vm_paddr_t pa;
+ int active, valid;
+
+ va = trunc_page(va);
+ sz = round_page(sz);
- executable = FALSE;
- TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
- PMAP_LOCK(pv->pv_pmap);
- pte = pte_find(mmu, pv->pv_pmap, pv->pv_va);
- if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX))
- executable = TRUE;
- PMAP_UNLOCK(pv->pv_pmap);
- if (executable)
- break;
+ vm_page_lock_queues();
+ pmap = PCPU_GET(curpmap);
+ active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
+ while (sz > 0) {
+ PMAP_LOCK(pm);
+ pte = pte_find(mmu, pm, va);
+ valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
+ if (valid)
+ pa = PTE_PA(pte);
+ PMAP_UNLOCK(pm);
+ if (valid) {
+ if (!active) {
+ /* Create a mapping in the active pmap. */
+ addr = 0;
+ m = PHYS_TO_VM_PAGE(pa);
+ PMAP_LOCK(pmap);
+ pte_enter(mmu, pmap, m, addr,
+ PTE_SR | PTE_VALID | PTE_UR);
+ __syncicache((void *)addr, PAGE_SIZE);
+ pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
+ PMAP_UNLOCK(pmap);
+ } else
+ __syncicache((void *)va, PAGE_SIZE);
+ }
+ va += PAGE_SIZE;
+ sz -= PAGE_SIZE;
}
-
- return (executable);
+ vm_page_unlock_queues();
}
/*
diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h
index d4fce7f..a23052e 100644
--- a/sys/powerpc/include/pmap.h
+++ b/sys/powerpc/include/pmap.h
@@ -171,7 +171,6 @@ void pmap_bootstrap(vm_offset_t, vm_offset_t);
void pmap_kenter(vm_offset_t va, vm_offset_t pa);
void pmap_kremove(vm_offset_t);
void *pmap_mapdev(vm_offset_t, vm_size_t);
-boolean_t pmap_page_executable(vm_page_t);
void pmap_unmapdev(vm_offset_t, vm_size_t);
void pmap_deactivate(struct thread *);
vm_offset_t pmap_kextract(vm_offset_t);
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
index 4a5a37c..5b8ba14 100644
--- a/sys/powerpc/powerpc/mmu_if.m
+++ b/sys/powerpc/powerpc/mmu_if.m
@@ -789,15 +789,21 @@ METHOD boolean_t dev_direct_mapped {
/**
- * @brief Evaluate if a physical page has an executable mapping
+ * @brief Enforce instruction cache coherency. Typically called after a
+ * region of memory has been modified and before execution of or within
+ * that region is attempted. Setting breakpoints in a process through
+ * ptrace(2) is one example of when the instruction cache needs to be
+ * made coherent.
*
- * @param _pg physical page
- *
- * @retval bool TRUE if a physical mapping exists for the given page.
+ * @param _pm the physical map of the virtual address
+ * @param _va the virtual address of the modified region
+ * @param _sz the size of the modified region
*/
-METHOD boolean_t page_executable {
+METHOD void sync_icache {
mmu_t _mmu;
- vm_page_t _pg;
+ pmap_t _pm;
+ vm_offset_t _va;
+ vm_size_t _sz;
};
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index b34c7eb..2b45e17 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -457,12 +457,12 @@ pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size)
return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
}
-boolean_t
-pmap_page_executable(vm_page_t pg)
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, pg);
- return (MMU_PAGE_EXECUTABLE(mmu_obj, pg));
+
+ CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
+ return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
}
vm_offset_t
diff --git a/sys/powerpc/powerpc/uio_machdep.c b/sys/powerpc/powerpc/uio_machdep.c
index 2a88fd2..6d17114 100644
--- a/sys/powerpc/powerpc/uio_machdep.c
+++ b/sys/powerpc/powerpc/uio_machdep.c
@@ -107,9 +107,6 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
sf_buf_free(sf);
goto out;
}
- if (uio->uio_rw == UIO_WRITE &&
- pmap_page_executable(m))
- __syncicache(cp, cnt);
break;
case UIO_SYSSPACE:
if (uio->uio_rw == UIO_READ)
OpenPOWER on IntegriCloud