summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authormarcel <marcel@FreeBSD.org>2009-10-21 18:38:02 +0000
committermarcel <marcel@FreeBSD.org>2009-10-21 18:38:02 +0000
commit51bb720939567fa381c6a03839b51f9c80bc67ef (patch)
tree9665f89431ede73407ae0ad11ebcc8198166085e /sys
parent5598d561ce3ef08a3a8c5d518ca09e28883ff050 (diff)
downloadFreeBSD-src-51bb720939567fa381c6a03839b51f9c80bc67ef.zip
FreeBSD-src-51bb720939567fa381c6a03839b51f9c80bc67ef.tar.gz
o Introduce vm_sync_icache() for making the I-cache coherent with
the memory or D-cache, depending on the semantics of the platform. vm_sync_icache() is basically a wrapper around pmap_sync_icache(), that translates the vm_map_t argumument to pmap_t. o Introduce pmap_sync_icache() to all PMAP implementation. For powerpc it replaces the pmap_page_executable() function, added to solve the I-cache problem in uiomove_fromphys(). o In proc_rwmem() call vm_sync_icache() when writing to a page that has execute permissions. This assures that when breakpoints are written, the I-cache will be coherent and the process will actually hit the breakpoint. o This also fixes the Book-E PMAP implementation that was missing necessary locking while trying to deal with the I-cache coherency in pmap_enter() (read: mmu_booke_enter_locked). The key property of this change is that the I-cache is made coherent *after* writes have been done. Doing it in the PMAP layer when adding or changing a mapping means that the I-cache is made coherent *before* any writes happen. The difference is key when the I-cache prefetches.
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/pmap.c5
-rw-r--r--sys/arm/arm/pmap.c10
-rw-r--r--sys/arm/mv/mv_machdep.c2
-rw-r--r--sys/i386/i386/pmap.c5
-rw-r--r--sys/i386/xen/pmap.c5
-rw-r--r--sys/ia64/ia64/pmap.c27
-rw-r--r--sys/kern/sys_process.c4
-rw-r--r--sys/mips/mips/pmap.c5
-rw-r--r--sys/powerpc/aim/mmu_oea.c34
-rw-r--r--sys/powerpc/aim/mmu_oea64.c50
-rw-r--r--sys/powerpc/booke/pmap.c72
-rw-r--r--sys/powerpc/include/pmap.h1
-rw-r--r--sys/powerpc/powerpc/mmu_if.m18
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c10
-rw-r--r--sys/powerpc/powerpc/uio_machdep.c3
-rw-r--r--sys/sparc64/sparc64/pmap.c5
-rw-r--r--sys/sun4v/sun4v/pmap.c5
-rw-r--r--sys/vm/pmap.h1
-rw-r--r--sys/vm/vm_extern.h1
-rw-r--r--sys/vm/vm_glue.c7
20 files changed, 196 insertions, 74 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index dd81d6b..c928294 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -4810,6 +4810,11 @@ if (oldpmap) /* XXX FIXME */
critical_exit();
}
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+}
+
/*
* Increase the starting virtual address of the given mapping if a
* different alignment might result in more superpage mappings.
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 5e55f8e..3b70a41 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -2863,14 +2863,14 @@ pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa))) {
vm_page_lock_queues();
if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva) {
- /* release vm_page lock for pv_entry UMA */
+ /* release vm_page lock for pv_entry UMA */
vm_page_unlock_queues();
if ((pve = pmap_get_pv_entry()) == NULL)
panic("pmap_kenter_internal: no pv entries");
vm_page_lock_queues();
PMAP_LOCK(pmap_kernel());
pmap_enter_pv(m, pve, pmap_kernel(), va,
- PVF_WRITE | PVF_UNMAN);
+ PVF_WRITE | PVF_UNMAN);
pmap_fix_cache(m, pmap_kernel(), va);
PMAP_UNLOCK(pmap_kernel());
} else {
@@ -4567,6 +4567,12 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
}
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+}
+
+
/*
* Increase the starting virtual address of the given mapping if a
* different alignment might result in more superpage mappings.
diff --git a/sys/arm/mv/mv_machdep.c b/sys/arm/mv/mv_machdep.c
index 2dc20ce..c348e24 100644
--- a/sys/arm/mv/mv_machdep.c
+++ b/sys/arm/mv/mv_machdep.c
@@ -408,7 +408,7 @@ initarm(void *mdp, void *unused __unused)
availmem_regions_sz = i;
} else {
/* Fall back to hardcoded boothowto flags and metadata. */
- boothowto = RB_VERBOSE | RB_SINGLE;
+ boothowto = 0; // RB_VERBOSE | RB_SINGLE;
lastaddr = fake_preload_metadata();
/*
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 7e3bc37..74edbbc 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -4859,6 +4859,11 @@ pmap_activate(struct thread *td)
critical_exit();
}
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+}
+
/*
* Increase the starting virtual address of the given mapping if a
* different alignment might result in more superpage mappings.
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index 1d9c9c1..5bf1331 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -4175,6 +4175,11 @@ pmap_activate(struct thread *td)
critical_exit();
}
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+}
+
/*
* Increase the starting virtual address of the given mapping if a
* different alignment might result in more superpage mappings.
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index a5a2bc3..a6fac1f 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -2276,6 +2276,33 @@ out:
return (prevpm);
}
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+ pmap_t oldpm;
+ struct ia64_lpte *pte;
+ vm_offset_t lim;
+ vm_size_t len;
+
+ sz += va & 31;
+ va &= ~31;
+ sz = (sz + 31) & ~31;
+
+ PMAP_LOCK(pm);
+ oldpm = pmap_switch(pm);
+ while (sz > 0) {
+ lim = round_page(va);
+ len = MIN(lim - va, sz);
+ pte = pmap_find_vhpt(va);
+ if (pte != NULL && pmap_present(pte))
+ ia64_sync_icache(va, len);
+ va += len;
+ sz -= len;
+ }
+ pmap_switch(oldpm);
+ PMAP_UNLOCK(pm);
+}
+
/*
* Increase the starting virtual address of the given mapping if a
* different alignment might result in more superpage mappings.
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index b8803af..88a5b89 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -327,6 +327,10 @@ proc_rwmem(struct proc *p, struct uio *uio)
*/
error = uiomove_fromphys(&m, page_offset, len, uio);
+ /* Make the I-cache coherent for breakpoints. */
+ if (!error && writing && (out_prot & VM_PROT_EXECUTE))
+ vm_sync_icache(map, uva, len);
+
/*
* Release the page.
*/
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index 7b106dc..25b0b3a 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -2903,6 +2903,11 @@ pmap_activate(struct thread *td)
critical_exit();
}
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+}
+
/*
* Increase the starting virtual address of the given mapping if a
* different alignment might result in more superpage mappings.
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index bbf2e04..1ca230c 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -330,7 +330,7 @@ void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
vm_offset_t moea_kextract(mmu_t, vm_offset_t);
void moea_kenter(mmu_t, vm_offset_t, vm_offset_t);
boolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
-boolean_t moea_page_executable(mmu_t, vm_page_t);
+static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_change_wiring, moea_change_wiring),
@@ -357,6 +357,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_remove, moea_remove),
MMUMETHOD(mmu_remove_all, moea_remove_all),
MMUMETHOD(mmu_remove_write, moea_remove_write),
+ MMUMETHOD(mmu_sync_icache, moea_sync_icache),
MMUMETHOD(mmu_zero_page, moea_zero_page),
MMUMETHOD(mmu_zero_page_area, moea_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle),
@@ -371,7 +372,6 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_kextract, moea_kextract),
MMUMETHOD(mmu_kenter, moea_kenter),
MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
- MMUMETHOD(mmu_page_executable, moea_page_executable),
{ 0, 0 }
};
@@ -2359,12 +2359,6 @@ moea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
return (EFAULT);
}
-boolean_t
-moea_page_executable(mmu_t mmu, vm_page_t pg)
-{
- return ((moea_attr_fetch(pg) & PTE_EXEC) == PTE_EXEC);
-}
-
/*
* Map a set of physical memory pages into the kernel virtual
* address space. Return a pointer to where it is mapped. This
@@ -2424,3 +2418,27 @@ moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
kmem_free(kernel_map, base, size);
}
}
+
+static void
+moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+ struct pvo_entry *pvo;
+ vm_offset_t lim;
+ vm_paddr_t pa;
+ vm_size_t len;
+
+ PMAP_LOCK(pm);
+ while (sz > 0) {
+ lim = round_page(va);
+ len = MIN(lim - va, sz);
+ pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
+ if (pvo != NULL) {
+ pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) |
+ (va & ADDR_POFF);
+ moea_syncicache(pa, len);
+ }
+ va += len;
+ sz -= len;
+ }
+ PMAP_UNLOCK(pm);
+}
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 4dad3dc..67e457c 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -369,7 +369,7 @@ static boolean_t moea64_query_bit(vm_page_t, u_int64_t);
static u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *);
static void moea64_kremove(mmu_t, vm_offset_t);
static void moea64_syncicache(pmap_t pmap, vm_offset_t va,
- vm_offset_t pa);
+ vm_offset_t pa, vm_size_t sz);
static void tlbia(void);
/*
@@ -410,7 +410,7 @@ void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
vm_offset_t moea64_kextract(mmu_t, vm_offset_t);
void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t);
boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
-boolean_t moea64_page_executable(mmu_t, vm_page_t);
+static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
static mmu_method_t moea64_bridge_methods[] = {
MMUMETHOD(mmu_change_wiring, moea64_change_wiring),
@@ -437,6 +437,7 @@ static mmu_method_t moea64_bridge_methods[] = {
MMUMETHOD(mmu_remove, moea64_remove),
MMUMETHOD(mmu_remove_all, moea64_remove_all),
MMUMETHOD(mmu_remove_write, moea64_remove_write),
+ MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
MMUMETHOD(mmu_zero_page, moea64_zero_page),
MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
@@ -451,7 +452,6 @@ static mmu_method_t moea64_bridge_methods[] = {
MMUMETHOD(mmu_kextract, moea64_kextract),
MMUMETHOD(mmu_kenter, moea64_kenter),
MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
- MMUMETHOD(mmu_page_executable, moea64_page_executable),
{ 0, 0 }
};
@@ -1264,12 +1264,12 @@ moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* mapped executable and cacheable.
*/
if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
- moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m));
+ moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
}
}
static void
-moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa)
+moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz)
{
/*
* This is much trickier than on older systems because
@@ -1285,16 +1285,16 @@ moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa)
* If PMAP is not bootstrapped, we are likely to be
* in real mode.
*/
- __syncicache((void *)pa,PAGE_SIZE);
+ __syncicache((void *)pa, sz);
} else if (pmap == kernel_pmap) {
- __syncicache((void *)va,PAGE_SIZE);
+ __syncicache((void *)va, sz);
} else {
/* Use the scratch page to set up a temp mapping */
mtx_lock(&moea64_scratchpage_mtx);
moea64_set_scratchpage_pa(1,pa);
- __syncicache((void *)moea64_scratchpage_va[1],PAGE_SIZE);
+ __syncicache((void *)moea64_scratchpage_va[1], sz);
mtx_unlock(&moea64_scratchpage_mtx);
}
@@ -1817,8 +1817,9 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
pvo->pvo_pmap, pvo->pvo_vaddr);
if ((pvo->pvo_pte.lpte.pte_lo &
(LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
- moea64_syncicache(pm, sva,
- pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
+ moea64_syncicache(pm, sva,
+ pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
+ PAGE_SIZE);
}
}
UNLOCK_TABLE();
@@ -2406,12 +2407,6 @@ moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
return (EFAULT);
}
-boolean_t
-moea64_page_executable(mmu_t mmu, vm_page_t pg)
-{
- return (!moea64_query_bit(pg, LPTE_NOEXEC));
-}
-
/*
* Map a set of physical memory pages into the kernel virtual
* address space. Return a pointer to where it is mapped. This
@@ -2454,3 +2449,26 @@ moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
kmem_free(kernel_map, base, size);
}
+static void
+moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+ struct pvo_entry *pvo;
+ vm_offset_t lim;
+ vm_paddr_t pa;
+ vm_size_t len;
+
+ PMAP_LOCK(pm);
+ while (sz > 0) {
+ lim = round_page(va);
+ len = MIN(lim - va, sz);
+ pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
+ if (pvo != NULL) {
+ pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) |
+ (va & ADDR_POFF);
+ moea64_syncicache(pm, va, pa, len);
+ }
+ va += len;
+ sz -= len;
+ }
+ PMAP_UNLOCK(pm);
+}
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 78a7250..26302a5 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -319,7 +319,8 @@ static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t);
static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
static void mmu_booke_kremove(mmu_t, vm_offset_t);
static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
-static boolean_t mmu_booke_page_executable(mmu_t, vm_page_t);
+static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
+ vm_size_t);
static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *,
vm_size_t, vm_size_t *);
static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *,
@@ -357,6 +358,7 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_remove, mmu_booke_remove),
MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
+ MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle),
@@ -370,7 +372,6 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_kenter, mmu_booke_kenter),
MMUMETHOD(mmu_kextract, mmu_booke_kextract),
/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */
- MMUMETHOD(mmu_page_executable, mmu_booke_page_executable),
MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
/* dumpsys() support */
@@ -1682,21 +1683,6 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
__syncicache((void *)va, PAGE_SIZE);
sync = 0;
}
-
- if (sync) {
- /* Create a temporary mapping. */
- pmap = PCPU_GET(curpmap);
-
- va = 0;
- pte = pte_find(mmu, pmap, va);
- KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__));
-
- flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M;
-
- pte_enter(mmu, pmap, m, va, flags);
- __syncicache((void *)va, PAGE_SIZE);
- pte_remove(mmu, pmap, va, PTBL_UNHOLD);
- }
}
/*
@@ -1991,25 +1977,47 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
vm_page_flag_clear(m, PG_WRITEABLE);
}
-static boolean_t
-mmu_booke_page_executable(mmu_t mmu, vm_page_t m)
+static void
+mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
{
- pv_entry_t pv;
pte_t *pte;
- boolean_t executable;
+ pmap_t pmap;
+ vm_page_t m;
+ vm_offset_t addr;
+ vm_paddr_t pa;
+ int active, valid;
+
+ va = trunc_page(va);
+ sz = round_page(sz);
- executable = FALSE;
- TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
- PMAP_LOCK(pv->pv_pmap);
- pte = pte_find(mmu, pv->pv_pmap, pv->pv_va);
- if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX))
- executable = TRUE;
- PMAP_UNLOCK(pv->pv_pmap);
- if (executable)
- break;
+ vm_page_lock_queues();
+ pmap = PCPU_GET(curpmap);
+ active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
+ while (sz > 0) {
+ PMAP_LOCK(pm);
+ pte = pte_find(mmu, pm, va);
+ valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
+ if (valid)
+ pa = PTE_PA(pte);
+ PMAP_UNLOCK(pm);
+ if (valid) {
+ if (!active) {
+ /* Create a mapping in the active pmap. */
+ addr = 0;
+ m = PHYS_TO_VM_PAGE(pa);
+ PMAP_LOCK(pmap);
+ pte_enter(mmu, pmap, m, addr,
+ PTE_SR | PTE_VALID | PTE_UR);
+ __syncicache((void *)addr, PAGE_SIZE);
+ pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
+ PMAP_UNLOCK(pmap);
+ } else
+ __syncicache((void *)va, PAGE_SIZE);
+ }
+ va += PAGE_SIZE;
+ sz -= PAGE_SIZE;
}
-
- return (executable);
+ vm_page_unlock_queues();
}
/*
diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h
index d4fce7f..a23052e 100644
--- a/sys/powerpc/include/pmap.h
+++ b/sys/powerpc/include/pmap.h
@@ -171,7 +171,6 @@ void pmap_bootstrap(vm_offset_t, vm_offset_t);
void pmap_kenter(vm_offset_t va, vm_offset_t pa);
void pmap_kremove(vm_offset_t);
void *pmap_mapdev(vm_offset_t, vm_size_t);
-boolean_t pmap_page_executable(vm_page_t);
void pmap_unmapdev(vm_offset_t, vm_size_t);
void pmap_deactivate(struct thread *);
vm_offset_t pmap_kextract(vm_offset_t);
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
index 4a5a37c..5b8ba14 100644
--- a/sys/powerpc/powerpc/mmu_if.m
+++ b/sys/powerpc/powerpc/mmu_if.m
@@ -789,15 +789,21 @@ METHOD boolean_t dev_direct_mapped {
/**
- * @brief Evaluate if a physical page has an executable mapping
+ * @brief Enforce instruction cache coherency. Typically called after a
+ * region of memory has been modified and before execution of or within
+ * that region is attempted. Setting breakpoints in a process through
+ * ptrace(2) is one example of when the instruction cache needs to be
+ * made coherent.
*
- * @param _pg physical page
- *
- * @retval bool TRUE if a physical mapping exists for the given page.
+ * @param _pm the physical map of the virtual address
+ * @param _va the virtual address of the modified region
+ * @param _sz the size of the modified region
*/
-METHOD boolean_t page_executable {
+METHOD void sync_icache {
mmu_t _mmu;
- vm_page_t _pg;
+ pmap_t _pm;
+ vm_offset_t _va;
+ vm_size_t _sz;
};
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index b34c7eb..2b45e17 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -457,12 +457,12 @@ pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size)
return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
}
-boolean_t
-pmap_page_executable(vm_page_t pg)
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, pg);
- return (MMU_PAGE_EXECUTABLE(mmu_obj, pg));
+
+ CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
+ return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
}
vm_offset_t
diff --git a/sys/powerpc/powerpc/uio_machdep.c b/sys/powerpc/powerpc/uio_machdep.c
index 2a88fd2..6d17114 100644
--- a/sys/powerpc/powerpc/uio_machdep.c
+++ b/sys/powerpc/powerpc/uio_machdep.c
@@ -107,9 +107,6 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
sf_buf_free(sf);
goto out;
}
- if (uio->uio_rw == UIO_WRITE &&
- pmap_page_executable(m))
- __syncicache(cp, cnt);
break;
case UIO_SYSSPACE:
if (uio->uio_rw == UIO_READ)
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 664a856..5956818 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -2001,6 +2001,11 @@ pmap_activate(struct thread *td)
mtx_unlock_spin(&sched_lock);
}
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+}
+
/*
* Increase the starting virtual address of the given mapping if a
* different alignment might result in more superpage mappings.
diff --git a/sys/sun4v/sun4v/pmap.c b/sys/sun4v/sun4v/pmap.c
index a754ce9..d3b8c79 100644
--- a/sys/sun4v/sun4v/pmap.c
+++ b/sys/sun4v/sun4v/pmap.c
@@ -424,6 +424,11 @@ pmap_activate(struct thread *td)
critical_exit();
}
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+}
+
/*
* Increase the starting virtual address of the given mapping if a
* different alignment might result in more superpage mappings.
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 22d6118..02fda07 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -133,6 +133,7 @@ void pmap_remove(pmap_t, vm_offset_t, vm_offset_t);
void pmap_remove_all(vm_page_t m);
void pmap_remove_pages(pmap_t);
void pmap_remove_write(vm_page_t m);
+void pmap_sync_icache(pmap_t, vm_offset_t, vm_size_t);
void pmap_zero_page(vm_page_t);
void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_zero_page_idle(vm_page_t);
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 65b6c8e..ff48983 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -63,6 +63,7 @@ int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace
void vm_waitproc(struct proc *);
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);
void vm_set_page_size(void);
+void vm_sync_icache(vm_map_t, vm_offset_t, vm_size_t);
struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t);
struct vmspace *vmspace_fork(struct vmspace *, vm_ooffset_t *);
int vmspace_exec(struct proc *, vm_offset_t, vm_offset_t);
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 851c733..8882565 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -309,6 +309,13 @@ vm_imgact_unmap_page(struct sf_buf *sf)
vm_page_unlock_queues();
}
+void
+vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
+{
+
+ pmap_sync_icache(map->pmap, va, sz);
+}
+
struct kstack_cache_entry {
vm_object_t ksobj;
struct kstack_cache_entry *next_ks_entry;
OpenPOWER on IntegriCloud