summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorups <ups@FreeBSD.org>2006-06-15 01:01:06 +0000
committerups <ups@FreeBSD.org>2006-06-15 01:01:06 +0000
commitb3a7439a45bf95ef2c21dfad6ba1a051467efad1 (patch)
treeef8795aacb2aaea54cc725dafc9bd2ea5f8fdba3 /sys/amd64
parent63bddd18cc9d5d6e46d95bda0c636f912901812d (diff)
downloadFreeBSD-src-b3a7439a45bf95ef2c21dfad6ba1a051467efad1.zip
FreeBSD-src-b3a7439a45bf95ef2c21dfad6ba1a051467efad1.tar.gz
Remove mpte optimization from pmap_enter_quick().
There is a race with the current locking scheme and removing it should have no measurable performance impact. This fixes page faults leading to panics in pmap_enter_quick_locked() on amd64/i386. Reviewed by: alc,jhb,peter,ps
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 9206f7d..44ed928 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2356,15 +2356,13 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
* but is *MUCH* faster than pmap_enter...
*/
-vm_page_t
-pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- vm_page_t mpte)
+void
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
PMAP_LOCK(pmap);
- mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte);
+ (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
PMAP_UNLOCK(pmap);
- return (mpte);
}
static vm_page_t
OpenPOWER on IntegriCloud