summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorups <ups@FreeBSD.org>2006-06-15 01:01:06 +0000
committerups <ups@FreeBSD.org>2006-06-15 01:01:06 +0000
commitb3a7439a45bf95ef2c21dfad6ba1a051467efad1 (patch)
treeef8795aacb2aaea54cc725dafc9bd2ea5f8fdba3
parent63bddd18cc9d5d6e46d95bda0c636f912901812d (diff)
downloadFreeBSD-src-b3a7439a45bf95ef2c21dfad6ba1a051467efad1.zip
FreeBSD-src-b3a7439a45bf95ef2c21dfad6ba1a051467efad1.tar.gz
Remove mpte optimization from pmap_enter_quick().
There is a race with the current locking scheme and removing it should have no measurable performance impact. This fixes page faults leading to panics in pmap_enter_quick_locked() on amd64/i386. Reviewed by: alc,jhb,peter,ps
-rw-r--r--sys/amd64/amd64/pmap.c8
-rw-r--r--sys/arm/arm/pmap.c6
-rw-r--r--sys/i386/i386/pmap.c8
-rw-r--r--sys/ia64/ia64/pmap.c6
-rw-r--r--sys/powerpc/aim/mmu_oea.c9
-rw-r--r--sys/powerpc/powerpc/mmu_if.m6
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c9
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c7
-rw-r--r--sys/sparc64/sparc64/pmap.c6
-rw-r--r--sys/vm/pmap.h4
-rw-r--r--sys/vm/vm_fault.c6
11 files changed, 28 insertions, 47 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 9206f7d..44ed928 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2356,15 +2356,13 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
* but is *MUCH* faster than pmap_enter...
*/
-vm_page_t
-pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- vm_page_t mpte)
+void
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
PMAP_LOCK(pmap);
- mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte);
+ (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
PMAP_UNLOCK(pmap);
- return (mpte);
}
static vm_page_t
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 2d03111..7ebc5ea 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -3572,16 +3572,14 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
* but is *MUCH* faster than pmap_enter...
*/
-vm_page_t
-pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- vm_page_t mpte)
+void
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
PMAP_LOCK(pmap);
pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
FALSE);
PMAP_UNLOCK(pmap);
- return (NULL);
}
/*
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 86afaee..63b5b7c 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -2432,15 +2432,13 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
* but is *MUCH* faster than pmap_enter...
*/
-vm_page_t
-pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- vm_page_t mpte)
+void
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
PMAP_LOCK(pmap);
- mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte);
+ (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
PMAP_UNLOCK(pmap);
- return (mpte);
}
static vm_page_t
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 0fa0d0e..155b0c7 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -1665,15 +1665,13 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
* but is *MUCH* faster than pmap_enter...
*/
-vm_page_t
-pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- vm_page_t mpte)
+void
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
PMAP_LOCK(pmap);
pmap_enter_quick_locked(pmap, va, m, prot);
PMAP_UNLOCK(pmap);
- return (NULL);
}
static void
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 23e8c4f..6a9af00 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -313,8 +313,7 @@ void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
vm_prot_t);
-vm_page_t moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
- vm_page_t);
+void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
void moea_init(mmu_t);
@@ -1174,16 +1173,16 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
PMAP_UNLOCK(pm);
}
-vm_page_t
+void
moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
- vm_prot_t prot, vm_page_t mpte)
+ vm_prot_t prot)
{
PMAP_LOCK(pm);
moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
FALSE);
PMAP_UNLOCK(pm);
- return (NULL);
+
}
vm_paddr_t
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
index cd9a1d5..5fd0510 100644
--- a/sys/powerpc/powerpc/mmu_if.m
+++ b/sys/powerpc/powerpc/mmu_if.m
@@ -227,17 +227,13 @@ METHOD void enter_object {
* @param _va mapping virtual address
* @param _pg mapping physical page
* @param _prot new page protection - used to see if page is exec.
- * @param _mpte ???
- *
- * @retval NULL (possibly a hint for future calls ?)
*/
-METHOD vm_page_t enter_quick {
+METHOD void enter_quick {
mmu_t _mmu;
pmap_t _pmap;
vm_offset_t _va;
vm_page_t _pg;
vm_prot_t _prot;
- vm_page_t _mpte;
};
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
index 23e8c4f..6a9af00 100644
--- a/sys/powerpc/powerpc/mmu_oea.c
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -313,8 +313,7 @@ void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
vm_prot_t);
-vm_page_t moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
- vm_page_t);
+void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
void moea_init(mmu_t);
@@ -1174,16 +1173,16 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
PMAP_UNLOCK(pm);
}
-vm_page_t
+void
moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
- vm_prot_t prot, vm_page_t mpte)
+ vm_prot_t prot)
{
PMAP_LOCK(pm);
moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
FALSE);
PMAP_UNLOCK(pm);
- return (NULL);
+
}
vm_paddr_t
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index d454af6..77f8368 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -122,11 +122,10 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
}
-vm_page_t
-pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- vm_page_t mpte)
+void
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
- return (MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot, mpte));
+ MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
}
vm_paddr_t
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 43dfbd5..200867b 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1443,16 +1443,14 @@ pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
PMAP_UNLOCK(pm);
}
-vm_page_t
-pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- vm_page_t mpte)
+void
+pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
PMAP_LOCK(pm);
pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
FALSE);
PMAP_UNLOCK(pm);
- return (NULL);
}
void
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 937fc71..57d62aa 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -97,8 +97,8 @@ void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
void pmap_copy_page(vm_page_t, vm_page_t);
void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
boolean_t);
-vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
- vm_prot_t prot, vm_page_t mpte);
+void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot);
void pmap_enter_object(pmap_t pmap, vm_offset_t start,
vm_offset_t end, vm_page_t m_start, vm_prot_t prot);
vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va);
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 2484af1..9387575 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -953,7 +953,7 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
int i;
vm_offset_t addr, starta;
vm_pindex_t pindex;
- vm_page_t m, mpte;
+ vm_page_t m;
vm_object_t object;
if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))
@@ -968,7 +968,6 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
starta = 0;
}
- mpte = NULL;
for (i = 0; i < PAGEORDER_SIZE; i++) {
vm_object_t backing_object, lobject;
@@ -1009,8 +1008,7 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
vm_page_lock_queues();
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
vm_page_deactivate(m);
- mpte = pmap_enter_quick(pmap, addr, m,
- entry->protection, mpte);
+ pmap_enter_quick(pmap, addr, m, entry->protection);
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(lobject);
OpenPOWER on IntegriCloud