summaryrefslogtreecommitdiffstats
path: root/sys/powerpc/aim
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2006-08-01 19:06:06 +0000
committeralc <alc@FreeBSD.org>2006-08-01 19:06:06 +0000
commita152234cf9a52766984ecb9a96cc1e5740ae9d78 (patch)
tree7cf5203513490cb91de50813dfb5015b4f873256 /sys/powerpc/aim
parenta2e0f77d8f100b139a19a4fc8ecece7356b6afdc (diff)
downloadFreeBSD-src-a152234cf9a52766984ecb9a96cc1e5740ae9d78.zip
FreeBSD-src-a152234cf9a52766984ecb9a96cc1e5740ae9d78.tar.gz
Complete the transition from pmap_page_protect() to pmap_remove_write().
Originally, I had adopted sparc64's name, pmap_clear_write(), for the function that is now pmap_remove_write(). However, this function is more like pmap_remove_all() than like pmap_clear_modify() or pmap_clear_reference(), hence, the name change. The higher-level rationale behind this change is described in src/sys/amd64/amd64/pmap.c revision 1.567. The short version is that I'm trying to clean up and fix our support for execute access. Reviewed by: marcel@ (ia64)
Diffstat (limited to 'sys/powerpc/aim')
-rw-r--r--sys/powerpc/aim/mmu_oea.c121
1 files changed, 44 insertions, 77 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 9729fa3..df5431d 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -322,7 +322,6 @@ boolean_t moea_is_modified(mmu_t, vm_page_t);
boolean_t moea_ts_referenced(mmu_t, vm_page_t);
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
-void moea_page_protect(mmu_t, vm_page_t, vm_prot_t);
void moea_pinit(mmu_t, pmap_t);
void moea_pinit0(mmu_t, pmap_t);
void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
@@ -331,6 +330,7 @@ void moea_qremove(mmu_t, vm_offset_t, int);
void moea_release(mmu_t, pmap_t);
void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
void moea_remove_all(mmu_t, vm_page_t);
+void moea_remove_write(mmu_t, vm_page_t);
void moea_zero_page(mmu_t, vm_page_t);
void moea_zero_page_area(mmu_t, vm_page_t, int, int);
void moea_zero_page_idle(mmu_t, vm_page_t);
@@ -358,7 +358,6 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_ts_referenced, moea_ts_referenced),
MMUMETHOD(mmu_map, moea_map),
MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
- MMUMETHOD(mmu_page_protect, moea_page_protect),
MMUMETHOD(mmu_pinit, moea_pinit),
MMUMETHOD(mmu_pinit0, moea_pinit0),
MMUMETHOD(mmu_protect, moea_protect),
@@ -367,6 +366,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_release, moea_release),
MMUMETHOD(mmu_remove, moea_remove),
MMUMETHOD(mmu_remove_all, moea_remove_all),
+ MMUMETHOD(mmu_remove_write, moea_remove_write),
MMUMETHOD(mmu_zero_page, moea_zero_page),
MMUMETHOD(mmu_zero_page_area, moea_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle),
@@ -1293,6 +1293,48 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
}
/*
+ * Clear the write and modified bits in each of the given page's mappings.
+ */
+void
+moea_remove_write(mmu_t mmu, vm_page_t m)
+{
+ struct pvo_entry *pvo;
+ struct pte *pt;
+ pmap_t pmap;
+ u_int lo;
+
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
+ (m->flags & PG_WRITEABLE) == 0)
+ return;
+ lo = moea_attr_fetch(m);
+ SYNC();
+ LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
+ pmap = pvo->pvo_pmap;
+ PMAP_LOCK(pmap);
+ if ((pvo->pvo_pte.pte_lo & PTE_PP) != PTE_BR) {
+ pt = moea_pvo_to_pte(pvo, -1);
+ pvo->pvo_pte.pte_lo &= ~PTE_PP;
+ pvo->pvo_pte.pte_lo |= PTE_BR;
+ if (pt != NULL) {
+ moea_pte_synch(pt, &pvo->pvo_pte);
+ lo |= pvo->pvo_pte.pte_lo;
+ pvo->pvo_pte.pte_lo &= ~PTE_CHG;
+ moea_pte_change(pt, &pvo->pvo_pte,
+ pvo->pvo_vaddr);
+ mtx_unlock(&moea_table_mutex);
+ }
+ }
+ PMAP_UNLOCK(pmap);
+ }
+ if ((lo & PTE_CHG) != 0) {
+ moea_attr_clear(m, PTE_CHG);
+ vm_page_dirty(m);
+ }
+ vm_page_flag_clear(m, PG_WRITEABLE);
+}
+
+/*
* moea_ts_referenced:
*
* Return a count of reference bits for a page, clearing those bits.
@@ -1420,81 +1462,6 @@ moea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
}
/*
- * Lower the permission for all mappings to a given page.
- */
-void
-moea_page_protect(mmu_t mmu, vm_page_t m, vm_prot_t prot)
-{
- struct pvo_head *pvo_head;
- struct pvo_entry *pvo, *next_pvo;
- struct pte *pt;
- pmap_t pmap;
-
- /*
- * Since the routine only downgrades protection, if the
- * maximal protection is desired, there isn't any change
- * to be made.
- */
- if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) ==
- (VM_PROT_READ|VM_PROT_WRITE))
- return;
-
- pvo_head = vm_page_to_pvoh(m);
- for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
- next_pvo = LIST_NEXT(pvo, pvo_vlink);
- MOEA_PVO_CHECK(pvo); /* sanity check */
- pmap = pvo->pvo_pmap;
- PMAP_LOCK(pmap);
-
- /*
- * Downgrading to no mapping at all, we just remove the entry.
- */
- if ((prot & VM_PROT_READ) == 0) {
- moea_pvo_remove(pvo, -1);
- PMAP_UNLOCK(pmap);
- continue;
- }
-
- /*
- * If EXEC permission is being revoked, just clear the flag
- * in the PVO.
- */
- if ((prot & VM_PROT_EXECUTE) == 0)
- pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
-
- /*
- * If this entry is already RO, don't diddle with the page
- * table.
- */
- if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
- PMAP_UNLOCK(pmap);
- MOEA_PVO_CHECK(pvo);
- continue;
- }
-
- /*
- * Grab the PTE before we diddle the bits so pvo_to_pte can
- * verify the pte contents are as expected.
- */
- pt = moea_pvo_to_pte(pvo, -1);
- pvo->pvo_pte.pte_lo &= ~PTE_PP;
- pvo->pvo_pte.pte_lo |= PTE_BR;
- if (pt != NULL) {
- moea_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
- mtx_unlock(&moea_table_mutex);
- }
- PMAP_UNLOCK(pmap);
- MOEA_PVO_CHECK(pvo); /* sanity check */
- }
-
- /*
- * Downgrading from writeable: clear the VM page flag
- */
- if ((prot & VM_PROT_WRITE) != VM_PROT_WRITE)
- vm_page_flag_clear(m, PG_WRITEABLE);
-}
-
-/*
* Returns true if the pmap's pv is one of the first
* 16 pvs linked to from this page. This count may
* be changed upwards or downwards in the future; it
OpenPOWER on IntegriCloud