summaryrefslogtreecommitdiffstats
path: root/sys/powerpc/aim
diff options
context:
space:
mode:
authormarcel <marcel@FreeBSD.org>2009-10-21 18:38:02 +0000
committermarcel <marcel@FreeBSD.org>2009-10-21 18:38:02 +0000
commit51bb720939567fa381c6a03839b51f9c80bc67ef (patch)
tree9665f89431ede73407ae0ad11ebcc8198166085e /sys/powerpc/aim
parent5598d561ce3ef08a3a8c5d518ca09e28883ff050 (diff)
downloadFreeBSD-src-51bb720939567fa381c6a03839b51f9c80bc67ef.zip
FreeBSD-src-51bb720939567fa381c6a03839b51f9c80bc67ef.tar.gz
o Introduce vm_sync_icache() for making the I-cache coherent with
the memory or D-cache, depending on the semantics of the platform. vm_sync_icache() is basically a wrapper around pmap_sync_icache(), that translates the vm_map_t argumument to pmap_t. o Introduce pmap_sync_icache() to all PMAP implementation. For powerpc it replaces the pmap_page_executable() function, added to solve the I-cache problem in uiomove_fromphys(). o In proc_rwmem() call vm_sync_icache() when writing to a page that has execute permissions. This assures that when breakpoints are written, the I-cache will be coherent and the process will actually hit the breakpoint. o This also fixes the Book-E PMAP implementation that was missing necessary locking while trying to deal with the I-cache coherency in pmap_enter() (read: mmu_booke_enter_locked). The key property of this change is that the I-cache is made coherent *after* writes have been done. Doing it in the PMAP layer when adding or changing a mapping means that the I-cache is made coherent *before* any writes happen. The difference is key when the I-cache prefetches.
Diffstat (limited to 'sys/powerpc/aim')
-rw-r--r--sys/powerpc/aim/mmu_oea.c34
-rw-r--r--sys/powerpc/aim/mmu_oea64.c50
2 files changed, 60 insertions, 24 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index bbf2e04..1ca230c 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -330,7 +330,7 @@ void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
vm_offset_t moea_kextract(mmu_t, vm_offset_t);
void moea_kenter(mmu_t, vm_offset_t, vm_offset_t);
boolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
-boolean_t moea_page_executable(mmu_t, vm_page_t);
+static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_change_wiring, moea_change_wiring),
@@ -357,6 +357,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_remove, moea_remove),
MMUMETHOD(mmu_remove_all, moea_remove_all),
MMUMETHOD(mmu_remove_write, moea_remove_write),
+ MMUMETHOD(mmu_sync_icache, moea_sync_icache),
MMUMETHOD(mmu_zero_page, moea_zero_page),
MMUMETHOD(mmu_zero_page_area, moea_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle),
@@ -371,7 +372,6 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_kextract, moea_kextract),
MMUMETHOD(mmu_kenter, moea_kenter),
MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
- MMUMETHOD(mmu_page_executable, moea_page_executable),
{ 0, 0 }
};
@@ -2359,12 +2359,6 @@ moea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
return (EFAULT);
}
-boolean_t
-moea_page_executable(mmu_t mmu, vm_page_t pg)
-{
- return ((moea_attr_fetch(pg) & PTE_EXEC) == PTE_EXEC);
-}
-
/*
* Map a set of physical memory pages into the kernel virtual
* address space. Return a pointer to where it is mapped. This
@@ -2424,3 +2418,27 @@ moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
kmem_free(kernel_map, base, size);
}
}
+
+static void
+moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+ struct pvo_entry *pvo;
+ vm_offset_t lim;
+ vm_paddr_t pa;
+ vm_size_t len;
+
+ PMAP_LOCK(pm);
+ while (sz > 0) {
+ lim = round_page(va);
+ len = MIN(lim - va, sz);
+ pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
+ if (pvo != NULL) {
+ pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) |
+ (va & ADDR_POFF);
+ moea_syncicache(pa, len);
+ }
+ va += len;
+ sz -= len;
+ }
+ PMAP_UNLOCK(pm);
+}
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 4dad3dc..67e457c 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -369,7 +369,7 @@ static boolean_t moea64_query_bit(vm_page_t, u_int64_t);
static u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *);
static void moea64_kremove(mmu_t, vm_offset_t);
static void moea64_syncicache(pmap_t pmap, vm_offset_t va,
- vm_offset_t pa);
+ vm_offset_t pa, vm_size_t sz);
static void tlbia(void);
/*
@@ -410,7 +410,7 @@ void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
vm_offset_t moea64_kextract(mmu_t, vm_offset_t);
void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t);
boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
-boolean_t moea64_page_executable(mmu_t, vm_page_t);
+static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
static mmu_method_t moea64_bridge_methods[] = {
MMUMETHOD(mmu_change_wiring, moea64_change_wiring),
@@ -437,6 +437,7 @@ static mmu_method_t moea64_bridge_methods[] = {
MMUMETHOD(mmu_remove, moea64_remove),
MMUMETHOD(mmu_remove_all, moea64_remove_all),
MMUMETHOD(mmu_remove_write, moea64_remove_write),
+ MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
MMUMETHOD(mmu_zero_page, moea64_zero_page),
MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
@@ -451,7 +452,6 @@ static mmu_method_t moea64_bridge_methods[] = {
MMUMETHOD(mmu_kextract, moea64_kextract),
MMUMETHOD(mmu_kenter, moea64_kenter),
MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
- MMUMETHOD(mmu_page_executable, moea64_page_executable),
{ 0, 0 }
};
@@ -1264,12 +1264,12 @@ moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* mapped executable and cacheable.
*/
if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
- moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m));
+ moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
}
}
static void
-moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa)
+moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz)
{
/*
* This is much trickier than on older systems because
@@ -1285,16 +1285,16 @@ moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa)
* If PMAP is not bootstrapped, we are likely to be
* in real mode.
*/
- __syncicache((void *)pa,PAGE_SIZE);
+ __syncicache((void *)pa, sz);
} else if (pmap == kernel_pmap) {
- __syncicache((void *)va,PAGE_SIZE);
+ __syncicache((void *)va, sz);
} else {
/* Use the scratch page to set up a temp mapping */
mtx_lock(&moea64_scratchpage_mtx);
moea64_set_scratchpage_pa(1,pa);
- __syncicache((void *)moea64_scratchpage_va[1],PAGE_SIZE);
+ __syncicache((void *)moea64_scratchpage_va[1], sz);
mtx_unlock(&moea64_scratchpage_mtx);
}
@@ -1817,8 +1817,9 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
pvo->pvo_pmap, pvo->pvo_vaddr);
if ((pvo->pvo_pte.lpte.pte_lo &
(LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
- moea64_syncicache(pm, sva,
- pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
+ moea64_syncicache(pm, sva,
+ pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
+ PAGE_SIZE);
}
}
UNLOCK_TABLE();
@@ -2406,12 +2407,6 @@ moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
return (EFAULT);
}
-boolean_t
-moea64_page_executable(mmu_t mmu, vm_page_t pg)
-{
- return (!moea64_query_bit(pg, LPTE_NOEXEC));
-}
-
/*
* Map a set of physical memory pages into the kernel virtual
* address space. Return a pointer to where it is mapped. This
@@ -2454,3 +2449,26 @@ moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
kmem_free(kernel_map, base, size);
}
+static void
+moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+ struct pvo_entry *pvo;
+ vm_offset_t lim;
+ vm_paddr_t pa;
+ vm_size_t len;
+
+ PMAP_LOCK(pm);
+ while (sz > 0) {
+ lim = round_page(va);
+ len = MIN(lim - va, sz);
+ pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
+ if (pvo != NULL) {
+ pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) |
+ (va & ADDR_POFF);
+ moea64_syncicache(pm, va, pa, len);
+ }
+ va += len;
+ sz -= len;
+ }
+ PMAP_UNLOCK(pm);
+}
OpenPOWER on IntegriCloud