summaryrefslogtreecommitdiffstats
path: root/sys/powerpc/aim
diff options
context:
space:
mode:
authornwhitehorn <nwhitehorn@FreeBSD.org>2012-03-28 17:25:29 +0000
committernwhitehorn <nwhitehorn@FreeBSD.org>2012-03-28 17:25:29 +0000
commit03485ae3df8c78f88692d456b678e26fa30776e8 (patch)
tree1c36b5c871b903f15d41fd95983a137c86e0b392 /sys/powerpc/aim
parentb5e3c2bb77a0125c2119063c2135a094534fa050 (diff)
downloadFreeBSD-src-03485ae3df8c78f88692d456b678e26fa30776e8.zip
FreeBSD-src-03485ae3df8c78f88692d456b678e26fa30776e8.tar.gz
More PMAP performance improvements: skip 256 MB segments entirely if they
are are not mapped during ranged operations and reduce the scope of the tlbie lock only to the actual tlbie instruction instead of the entire sequence. There are a few more optimization possibilities here as well.
Diffstat (limited to 'sys/powerpc/aim')
-rw-r--r--sys/powerpc/aim/mmu_oea64.c24
-rw-r--r--sys/powerpc/aim/moea64_native.c13
2 files changed, 26 insertions, 11 deletions
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 794f891..41361b7 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -1981,10 +1981,18 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
LOCK_TABLE_RD();
PMAP_LOCK(pm);
if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) {
- for (; sva < eva; sva += PAGE_SIZE) {
+ while (sva < eva) {
+ #ifdef __powerpc64__
+ if (pm != kernel_pmap &&
+ user_va_to_slb_entry(pm, sva) == NULL) {
+ sva = roundup2(sva + 1, SEGMENT_LENGTH);
+ continue;
+ }
+ #endif
pvo = moea64_pvo_find_va(pm, sva);
if (pvo != NULL)
moea64_pvo_protect(mmu, pm, pvo, prot);
+ sva += PAGE_SIZE;
}
} else {
LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) {
@@ -2095,10 +2103,18 @@ moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
LOCK_TABLE_WR();
PMAP_LOCK(pm);
if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) {
- for (; sva < eva; sva += PAGE_SIZE) {
+ while (sva < eva) {
+ #ifdef __powerpc64__
+ if (pm != kernel_pmap &&
+ user_va_to_slb_entry(pm, sva) == NULL) {
+ sva = roundup2(sva + 1, SEGMENT_LENGTH);
+ continue;
+ }
+ #endif
pvo = moea64_pvo_find_va(pm, sva);
if (pvo != NULL)
moea64_pvo_remove(mmu, pvo);
+ sva += PAGE_SIZE;
}
} else {
LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) {
@@ -2566,7 +2582,7 @@ moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
ppa = trunc_page(pa);
offset = pa & PAGE_MASK;
- size = roundup(offset + size, PAGE_SIZE);
+ size = roundup2(offset + size, PAGE_SIZE);
va = kmem_alloc_nofault(kernel_map, size);
@@ -2597,7 +2613,7 @@ moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
base = trunc_page(va);
offset = va & PAGE_MASK;
- size = roundup(offset + size, PAGE_SIZE);
+ size = roundup2(offset + size, PAGE_SIZE);
kmem_free(kernel_map, base, size);
}
diff --git a/sys/powerpc/aim/moea64_native.c b/sys/powerpc/aim/moea64_native.c
index 97766da..081194f 100644
--- a/sys/powerpc/aim/moea64_native.c
+++ b/sys/powerpc/aim/moea64_native.c
@@ -103,6 +103,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/sched.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@@ -152,15 +153,13 @@ TLBIE(uint64_t vpn) {
vpn &= ~(0xffffULL << 48);
#ifdef __powerpc64__
+ sched_pin();
+ __asm __volatile("ptesync");
mtx_lock(&tlbie_mutex);
- __asm __volatile("\
- ptesync; \
- tlbie %0; \
- eieio; \
- tlbsync; \
- ptesync;"
- :: "r"(vpn) : "memory");
+ __asm __volatile("tlbie %0" :: "r"(vpn) : "memory");
mtx_unlock(&tlbie_mutex);
+ __asm __volatile("eieio; tlbsync; ptesync");
+ sched_unpin();
#else
vpn_hi = (uint32_t)(vpn >> 32);
vpn_lo = (uint32_t)vpn;
OpenPOWER on IntegriCloud