summaryrefslogtreecommitdiffstats
path: root/sys/arm
diff options
context:
space:
mode:
authorcognet <cognet@FreeBSD.org>2005-05-24 21:47:10 +0000
committercognet <cognet@FreeBSD.org>2005-05-24 21:47:10 +0000
commit734781d7b2f4eae840a8bf4be6091405d4fad45a (patch)
tree4687ea51839267f84857fd8deec55288916410ea /sys/arm
parent98e12d6f01d35b2a2328f659f8c880739eae2a97 (diff)
downloadFreeBSD-src-734781d7b2f4eae840a8bf4be6091405d4fad45a.zip
FreeBSD-src-734781d7b2f4eae840a8bf4be6091405d4fad45a.tar.gz
Write back affected pages in pmap_qremove() as well. This removes the need
to change the DACR when switching to a kernel thread, thus making userland thread => kernel thread => same userland thread switch cheaper by totally avoiding data cache and TLB invalidation.
Diffstat (limited to 'sys/arm')
-rw-r--r--sys/arm/arm/pmap.c16
-rw-r--r--sys/arm/arm/swtch.S2
2 files changed, 12 insertions, 6 deletions
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 8d4d24b..c3ab3e0 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -2965,12 +2965,12 @@ pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
}
static void
-pmap_wb_page(vm_page_t m)
+pmap_wb_page(vm_page_t m, boolean_t do_inv)
{
struct pv_entry *pv;
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
- pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, FALSE,
+ pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, do_inv,
(pv->pv_flags & PVF_WRITE) == 0);
}
@@ -2988,7 +2988,7 @@ pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
int i;
for (i = 0; i < count; i++) {
- pmap_wb_page(m[i]);
+ pmap_wb_page(m[i], TRUE);
pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]),
KENTER_CACHE);
va += PAGE_SIZE;
@@ -3003,10 +3003,15 @@ pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
void
pmap_qremove(vm_offset_t va, int count)
{
+ vm_paddr_t pa;
int i;
for (i = 0; i < count; i++) {
- pmap_kremove(va);
+ pa = vtophys(va);
+ if (pa) {
+ pmap_wb_page(PHYS_TO_VM_PAGE(pa), TRUE);
+ pmap_kremove(va);
+ }
va += PAGE_SIZE;
}
}
@@ -3516,7 +3521,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
VM_OBJECT_UNLOCK(m->object);
mtx_lock(&Giant);
pmap_enter(pmap, va, m, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
- pmap_dcache_wbinv_all(pmap);
+ pmap_idcache_wbinv_all(pmap);
mtx_unlock(&Giant);
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
@@ -4277,6 +4282,7 @@ pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst)
void
pmap_copy_page(vm_page_t src, vm_page_t dst)
{
+ cpu_dcache_wbinv_all();
pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
}
diff --git a/sys/arm/arm/swtch.S b/sys/arm/arm/swtch.S
index 4470065..63b20b6 100644
--- a/sys/arm/arm/swtch.S
+++ b/sys/arm/arm/swtch.S
@@ -298,7 +298,6 @@ ENTRY(cpu_switch)
ldr r5, [r9, #(PCB_DACR)] /* r5 = new DACR */
mov r2, #DOMAIN_CLIENT
cmp r5, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */
- mcreq p15, 0, r5, c3, c0, 0 /* Update DACR for new context */
beq .Lcs_context_switched /* Yup. Don't flush cache */
mrc p15, 0, r0, c3, c0, 0 /* r0 = old DACR */
/*
@@ -462,6 +461,7 @@ ENTRY(fork_trampoline)
mrs r0, cpsr
orr r0, r0, #(I32_bit)
msr cpsr_c, r0
+ DO_AST
PULLFRAME
movs pc, lr /* Exit */
OpenPOWER on IntegriCloud