summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2005-09-03 18:20:20 +0000
committeralc <alc@FreeBSD.org>2005-09-03 18:20:20 +0000
commit39788de49ed8c451980050349cf26ad80c4f6eb1 (patch)
tree15a723ce98510b7ad44411dc2c1fe9370aed91b1
parentdcf881b1eb5cd7d4101ff068dead8b12f344a057 (diff)
downloadFreeBSD-src-39788de49ed8c451980050349cf26ad80c4f6eb1.zip
FreeBSD-src-39788de49ed8c451980050349cf26ad80c4f6eb1.tar.gz
Pass a value of type vm_prot_t to pmap_enter_quick() so that it determine
whether the mapping should permit execute access.
-rw-r--r--sys/alpha/alpha/pmap.c4
-rw-r--r--sys/amd64/amd64/pmap.c6
-rw-r--r--sys/arm/arm/pmap.c6
-rw-r--r--sys/i386/i386/pmap.c4
-rw-r--r--sys/ia64/ia64/pmap.c7
-rw-r--r--sys/powerpc/aim/mmu_oea.c5
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c5
-rw-r--r--sys/powerpc/powerpc/pmap.c5
-rw-r--r--sys/sparc64/sparc64/pmap.c5
-rw-r--r--sys/vm/pmap.h2
-rw-r--r--sys/vm/vm_fault.c3
-rw-r--r--sys/vm/vm_map.c4
12 files changed, 32 insertions, 24 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index dab8df4..59017cf 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -1787,12 +1787,12 @@ validate:
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
- * 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
vm_page_t
-pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ vm_page_t mpte)
{
register pt_entry_t *pte;
int managed;
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 3493848..17c9e64 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2036,12 +2036,12 @@ validate:
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
- * 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
vm_page_t
-pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ vm_page_t mpte)
{
pt_entry_t *pte;
vm_paddr_t pa;
@@ -2130,6 +2130,8 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap->pm_stats.resident_count++;
pa = VM_PAGE_TO_PHYS(m);
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ pa |= pg_nx;
/*
* Now validate mapping with RO protection
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 8083cea..6e9b4ad 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -3536,19 +3536,19 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
- * 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
vm_page_t
-pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ vm_page_t mpte)
{
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
mtx_lock(&Giant);
- pmap_enter(pmap, va, m, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
+ pmap_enter(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
pmap_idcache_wbinv_all(pmap);
mtx_unlock(&Giant);
VM_OBJECT_LOCK(m->object);
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 545993a..13d0923 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -2026,12 +2026,12 @@ validate:
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
- * 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
vm_page_t
-pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ vm_page_t mpte)
{
pt_entry_t *pte;
vm_paddr_t pa;
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index b2a5f6e..ce0df9f 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -1570,12 +1570,12 @@ validate:
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
- * 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
vm_page_t
-pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ vm_page_t mpte)
{
struct ia64_lpte *pte;
pmap_t oldpmap;
@@ -1613,7 +1613,8 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
/* Initialise with R/O protection and enter into VHPT. */
pmap_enter_vhpt(pte, va);
- pmap_pte_prot(pmap, pte, VM_PROT_READ);
+ pmap_pte_prot(pmap, pte,
+ prot & (VM_PROT_READ | VM_PROT_EXECUTE));
pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed);
}
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 2ea798d..e3515d5 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1069,14 +1069,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
vm_page_t
-pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
+pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ vm_page_t mpte)
{
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
mtx_lock(&Giant);
- pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
+ pmap_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
mtx_unlock(&Giant);
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
index 2ea798d..e3515d5 100644
--- a/sys/powerpc/powerpc/mmu_oea.c
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -1069,14 +1069,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
vm_page_t
-pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
+pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ vm_page_t mpte)
{
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
mtx_lock(&Giant);
- pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
+ pmap_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
mtx_unlock(&Giant);
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
diff --git a/sys/powerpc/powerpc/pmap.c b/sys/powerpc/powerpc/pmap.c
index 2ea798d..e3515d5 100644
--- a/sys/powerpc/powerpc/pmap.c
+++ b/sys/powerpc/powerpc/pmap.c
@@ -1069,14 +1069,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
vm_page_t
-pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
+pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ vm_page_t mpte)
{
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
mtx_lock(&Giant);
- pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
+ pmap_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
mtx_unlock(&Giant);
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 8d62058..9e4ff8b 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1401,13 +1401,14 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
vm_page_t
-pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
+pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ vm_page_t mpte)
{
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
- pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
+ pmap_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
vm_page_wakeup(m);
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 4eb8f5a..d5b64c3 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -100,7 +100,7 @@ void pmap_copy_page(vm_page_t, vm_page_t);
void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
boolean_t);
vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
- vm_page_t mpte);
+ vm_prot_t prot, vm_page_t mpte);
vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va);
vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va,
vm_prot_t prot);
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 57fbca0..2dc873a 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -1007,7 +1007,8 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
vm_page_lock_queues();
if ((m->queue - m->pc) == PQ_CACHE)
vm_page_deactivate(m);
- mpte = pmap_enter_quick(pmap, addr, m, mpte);
+ mpte = pmap_enter_quick(pmap, addr, m,
+ entry->protection, mpte);
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(lobject);
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 519163b..c162c50 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -1375,7 +1375,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
vm_page_t p, mpte;
boolean_t are_queues_locked;
- if ((prot & VM_PROT_READ) == 0 || object == NULL)
+ if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
return;
VM_OBJECT_LOCK(object);
if (object->type == OBJT_DEVICE) {
@@ -1433,7 +1433,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
if ((p->queue - p->pc) == PQ_CACHE)
vm_page_deactivate(p);
mpte = pmap_enter_quick(map->pmap,
- addr + ptoa(tmpidx), p, mpte);
+ addr + ptoa(tmpidx), p, prot, mpte);
}
}
if (are_queues_locked)
OpenPOWER on IntegriCloud