summaryrefslogtreecommitdiffstats
path: root/sys/powerpc/aim
diff options
context:
space:
mode:
authornwhitehorn <nwhitehorn@FreeBSD.org>2012-03-15 19:36:52 +0000
committernwhitehorn <nwhitehorn@FreeBSD.org>2012-03-15 19:36:52 +0000
commit61f538e71928779b51472db5713d94aa55dc5384 (patch)
tree5056fcbc4ab96ef62d0208097a77873aca6f25b7 /sys/powerpc/aim
parentfdd82a50b91d948ea7021103ba3e73f28599520e (diff)
downloadFreeBSD-src-61f538e71928779b51472db5713d94aa55dc5384.zip
FreeBSD-src-61f538e71928779b51472db5713d94aa55dc5384.tar.gz
Improve algorithm for deciding whether to loop through all process pages
or look them up individually in pmap_remove() and apply the same logic in the other ranged operation (pmap_protect). This speeds up make installworld by a factor of 2 on powerpc64. MFC after: 1 week
Diffstat (limited to 'sys/powerpc/aim')
-rw-r--r--sys/powerpc/aim/mmu_oea64.c98
1 files changed, 58 insertions, 40 deletions
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 1bd88c4..c66e26b 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -1909,16 +1909,51 @@ moea64_pinit0(mmu_t mmu, pmap_t pm)
/*
* Set the physical protection on the specified range of this map as requested.
*/
+static void
+moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
+{
+ uintptr_t pt;
+
+ /*
+ * Grab the PTE pointer before we diddle with the cached PTE
+ * copy.
+ */
+ LOCK_TABLE();
+ pt = MOEA64_PVO_TO_PTE(mmu, pvo);
+
+ /*
+ * Change the protection of the page.
+ */
+ pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
+ pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
+ pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
+
+ /*
+ * If the PVO is in the page table, update that pte as well.
+ */
+ if (pt != -1) {
+ MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
+ pvo->pvo_vpn);
+ if ((pvo->pvo_pte.lpte.pte_lo &
+ (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
+ moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
+ pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
+ PAGE_SIZE);
+ }
+ }
+ UNLOCK_TABLE();
+}
+
void
moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
vm_prot_t prot)
{
- struct pvo_entry *pvo;
- uintptr_t pt;
-
- CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
- eva, prot);
+ struct pvo_entry *pvo, *tpvo;
+ CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
+ sva, eva, prot);
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
("moea64_protect: non current pmap"));
@@ -1930,41 +1965,18 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
vm_page_lock_queues();
PMAP_LOCK(pm);
- for (; sva < eva; sva += PAGE_SIZE) {
- pvo = moea64_pvo_find_va(pm, sva);
- if (pvo == NULL)
- continue;
-
- /*
- * Grab the PTE pointer before we diddle with the cached PTE
- * copy.
- */
- LOCK_TABLE();
- pt = MOEA64_PVO_TO_PTE(mmu, pvo);
-
- /*
- * Change the protection of the page.
- */
- pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
- pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
- pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
- if ((prot & VM_PROT_EXECUTE) == 0)
- pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
-
- /*
- * If the PVO is in the page table, update that pte as well.
- */
- if (pt != -1) {
- MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
- pvo->pvo_vpn);
- if ((pvo->pvo_pte.lpte.pte_lo &
- (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
- moea64_syncicache(mmu, pm, sva,
- pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
- PAGE_SIZE);
- }
+ if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) {
+ for (; sva < eva; sva += PAGE_SIZE) {
+ pvo = moea64_pvo_find_va(pm, sva);
+ if (pvo != NULL)
+ moea64_pvo_protect(mmu, pm, pvo, prot);
+ }
+ } else {
+ LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) {
+ if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva)
+ continue;
+ moea64_pvo_protect(mmu, pm, pvo, prot);
}
- UNLOCK_TABLE();
}
vm_page_unlock_queues();
PMAP_UNLOCK(pm);
@@ -2041,9 +2053,15 @@ moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
{
struct pvo_entry *pvo, *tpvo;
+ /*
+ * Perform an unsynchronized read. This is, however, safe.
+ */
+ if (pm->pm_stats.resident_count == 0)
+ return;
+
vm_page_lock_queues();
PMAP_LOCK(pm);
- if ((eva - sva)/PAGE_SIZE < 10) {
+ if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) {
for (; sva < eva; sva += PAGE_SIZE) {
pvo = moea64_pvo_find_va(pm, sva);
if (pvo != NULL)
OpenPOWER on IntegriCloud