summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2006-11-12 21:48:34 +0000
committeralc <alc@FreeBSD.org>2006-11-12 21:48:34 +0000
commit6093953d36a905170046a19eb1141c99b22fa802 (patch)
treec4a9c71fe86ec1dd8e45421082e59941e177807f
parent68445c77f7e732fbe9d5f4b930a9f2fcd0052b3f (diff)
downloadFreeBSD-src-6093953d36a905170046a19eb1141c99b22fa802.zip
FreeBSD-src-6093953d36a905170046a19eb1141c99b22fa802.tar.gz
Make pmap_enter() responsible for setting PG_WRITEABLE instead
of its caller. (As a beneficial side-effect, a high-contention acquisition of the page queues lock in vm_fault() is eliminated.)
-rw-r--r--sys/amd64/amd64/pmap.c4
-rw-r--r--sys/arm/arm/pmap.c5
-rw-r--r--sys/i386/i386/pmap.c4
-rw-r--r--sys/ia64/ia64/pmap.c2
-rw-r--r--sys/powerpc/aim/mmu_oea.c5
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c5
-rw-r--r--sys/sparc64/sparc64/pmap.c5
-rw-r--r--sys/sun4v/sun4v/pmap.c4
-rw-r--r--sys/vm/vm_fault.c9
-rw-r--r--sys/vm/vm_kern.c2
10 files changed, 28 insertions, 17 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index c1f8c8b..7a8b672 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2357,8 +2357,10 @@ validate:
* Now validate mapping with desired protection/wiring.
*/
newpte = (pt_entry_t)(pa | PG_V);
- if ((prot & VM_PROT_WRITE) != 0)
+ if ((prot & VM_PROT_WRITE) != 0) {
newpte |= PG_RW;
+ vm_page_flag_set(m, PG_WRITEABLE);
+ }
if ((prot & VM_PROT_EXECUTE) == 0)
newpte |= pg_nx;
if (wired)
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index a390a98..c5c1b20 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -3396,8 +3396,11 @@ do_l2b_alloc:
npte |= L2_TYPE_INV;
}
- if (prot & VM_PROT_WRITE)
+ if (prot & VM_PROT_WRITE) {
npte |= L2_S_PROT_W;
+ if (m != NULL)
+ vm_page_flag_set(m, PG_WRITEABLE);
+ }
npte |= pte_l2_s_cache_mode;
if (m && m == opg) {
/*
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index a760978..1798180 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -2434,8 +2434,10 @@ validate:
* Now validate mapping with desired protection/wiring.
*/
newpte = (pt_entry_t)(pa | PG_V);
- if ((prot & VM_PROT_WRITE) != 0)
+ if ((prot & VM_PROT_WRITE) != 0) {
newpte |= PG_RW;
+ vm_page_flag_set(m, PG_WRITEABLE);
+ }
if (wired)
newpte |= PG_W;
if (va < VM_MAXUSER_ADDRESS)
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 37fe800..bcdaae7 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -1647,6 +1647,8 @@ validate:
pmap_pte_prot(pmap, pte, prot);
pmap_set_pte(pte, va, pa, wired, managed);
+ if ((prot & VM_PROT_WRITE) != 0)
+ vm_page_flag_set(m, PG_WRITEABLE);
vm_page_unlock_queues();
pmap_install(oldpmap);
PMAP_UNLOCK(pmap);
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index df5431d..08217b5 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1129,9 +1129,10 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
}
- if (prot & VM_PROT_WRITE)
+ if (prot & VM_PROT_WRITE) {
pte_lo |= PTE_BW;
- else
+ vm_page_flag_set(m, PG_WRITEABLE);
+ } else
pte_lo |= PTE_BR;
if (prot & VM_PROT_EXECUTE)
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
index df5431d..08217b5 100644
--- a/sys/powerpc/powerpc/mmu_oea.c
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -1129,9 +1129,10 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
}
- if (prot & VM_PROT_WRITE)
+ if (prot & VM_PROT_WRITE) {
pte_lo |= PTE_BW;
- else
+ vm_page_flag_set(m, PG_WRITEABLE);
+ } else
pte_lo |= PTE_BR;
if (prot & VM_PROT_EXECUTE)
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 15f3351..bf0cb0b 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1348,6 +1348,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (wired) {
tp->tte_data |= TD_W;
}
+ vm_page_flag_set(m, PG_WRITEABLE);
} else if ((data & TD_W) != 0) {
vm_page_dirty(m);
}
@@ -1387,8 +1388,10 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
data = TD_V | TD_8K | TD_PA(pa);
if (pm == kernel_pmap)
data |= TD_P;
- if (prot & VM_PROT_WRITE)
+ if ((prot & VM_PROT_WRITE) != 0) {
data |= TD_SW;
+ vm_page_flag_set(m, PG_WRITEABLE);
+ }
if (prot & VM_PROT_EXECUTE) {
data |= TD_EXEC;
icache_page_inval(pa);
diff --git a/sys/sun4v/sun4v/pmap.c b/sys/sun4v/sun4v/pmap.c
index da91579..bcc292f 100644
--- a/sys/sun4v/sun4v/pmap.c
+++ b/sys/sun4v/sun4v/pmap.c
@@ -1008,8 +1008,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* Now validate mapping with desired protection/wiring.
*/
- if ((prot & VM_PROT_WRITE) != 0)
+ if ((prot & VM_PROT_WRITE) != 0) {
tte_data |= VTD_SW_W;
+ vm_page_flag_set(m, PG_WRITEABLE);
+ }
if ((prot & VM_PROT_EXECUTE) != 0)
tte_data |= VTD_X;
if (wired)
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index d62f3c7..e577199 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -841,9 +841,6 @@ readrest:
}
}
if (prot & VM_PROT_WRITE) {
- vm_page_lock_queues();
- vm_page_flag_set(fs.m, PG_WRITEABLE);
- vm_page_unlock_queues();
vm_object_set_writeable_dirty(fs.object);
/*
@@ -1189,14 +1186,12 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
* Enter it in the pmap...
*/
pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
- VM_OBJECT_LOCK(dst_object);
- vm_page_lock_queues();
- if ((prot & VM_PROT_WRITE) != 0)
- vm_page_flag_set(dst_m, PG_WRITEABLE);
/*
* Mark it no longer busy, and put it on the active list.
*/
+ VM_OBJECT_LOCK(dst_object);
+ vm_page_lock_queues();
vm_page_activate(dst_m);
vm_page_unlock_queues();
vm_page_wakeup(dst_m);
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index c91dc48..e3a8c12 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -402,7 +402,7 @@ retry:
*/
pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
vm_page_lock_queues();
- vm_page_flag_set(m, PG_WRITEABLE | PG_REFERENCED);
+ vm_page_flag_set(m, PG_REFERENCED);
vm_page_unlock_queues();
vm_page_wakeup(m);
}
OpenPOWER on IntegriCloud