summaryrefslogtreecommitdiffstats
path: root/sys/vm/pmap.h
diff options
context:
space:
mode:
authorups <ups@FreeBSD.org>2006-06-15 01:01:06 +0000
committerups <ups@FreeBSD.org>2006-06-15 01:01:06 +0000
commitb3a7439a45bf95ef2c21dfad6ba1a051467efad1 (patch)
treeef8795aacb2aaea54cc725dafc9bd2ea5f8fdba3 /sys/vm/pmap.h
parent63bddd18cc9d5d6e46d95bda0c636f912901812d (diff)
downloadFreeBSD-src-b3a7439a45bf95ef2c21dfad6ba1a051467efad1.zip
FreeBSD-src-b3a7439a45bf95ef2c21dfad6ba1a051467efad1.tar.gz
Remove mpte optimization from pmap_enter_quick().
There is a race with the current locking scheme and removing it should have no measurable performance impact. This fixes page faults leading to panics in pmap_enter_quick_locked() on amd64/i386. Reviewed by: alc,jhb,peter,ps
Diffstat (limited to 'sys/vm/pmap.h')
-rw-r--r--sys/vm/pmap.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 937fc71..57d62aa 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -97,8 +97,8 @@ void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
void pmap_copy_page(vm_page_t, vm_page_t);
void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
boolean_t);
-vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
- vm_prot_t prot, vm_page_t mpte);
+void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot);
void pmap_enter_object(pmap_t pmap, vm_offset_t start,
vm_offset_t end, vm_page_t m_start, vm_prot_t prot);
vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va);
OpenPOWER on IntegriCloud