summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/mmu_oea.c26
-rw-r--r--sys/powerpc/aim/mmu_oea64.c26
-rw-r--r--sys/powerpc/booke/pmap.c30
-rw-r--r--sys/powerpc/powerpc/mmu_if.m2
4 files changed, 42 insertions, 42 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 331dbe9..57c35bf 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1102,7 +1102,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pte_lo |= PTE_BW;
if (pmap_bootstrapped &&
(m->oflags & VPO_UNMANAGED) == 0)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
} else
pte_lo |= PTE_BR;
@@ -1255,13 +1255,13 @@ moea_is_modified(mmu_t mmu, vm_page_t m)
("moea_is_modified: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
- * concurrently set while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PTE_CHG set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
return (moea_query_bit(m, PTE_CHG));
}
@@ -1299,11 +1299,11 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
("moea_clear_modify: page %p is busy", m));
/*
- * If the page is not PG_WRITEABLE, then no PTEs can have PTE_CHG
+ * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG
* set. If the object containing the page is locked and the page is
- * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
- if ((m->flags & PG_WRITEABLE) == 0)
+ if ((m->aflags & PGA_WRITEABLE) == 0)
return;
moea_clear_bit(m, PTE_CHG);
}
@@ -1323,13 +1323,13 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
("moea_remove_write: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
- * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
lo = moea_attr_fetch(m);
@@ -1356,7 +1356,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
moea_attr_clear(m, PTE_CHG);
vm_page_dirty(m);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@@ -1794,11 +1794,11 @@ moea_remove_all(mmu_t mmu, vm_page_t m)
moea_pvo_remove(pvo, -1);
PMAP_UNLOCK(pmap);
}
- if ((m->flags & PG_WRITEABLE) && moea_is_modified(mmu, m)) {
+ if ((m->aflags & PGA_WRITEABLE) && moea_is_modified(mmu, m)) {
moea_attr_clear(m, PTE_CHG);
vm_page_dirty(m);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 0a10ce8..7500462 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -1239,7 +1239,7 @@ moea64_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
pte_lo |= LPTE_BW;
if (pmap_bootstrapped &&
(m->oflags & VPO_UNMANAGED) == 0)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
} else
pte_lo |= LPTE_BR;
@@ -1484,13 +1484,13 @@ moea64_is_modified(mmu_t mmu, vm_page_t m)
("moea64_is_modified: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
- * concurrently set while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have LPTE_CHG set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
return (moea64_query_bit(mmu, m, LPTE_CHG));
}
@@ -1528,11 +1528,11 @@ moea64_clear_modify(mmu_t mmu, vm_page_t m)
("moea64_clear_modify: page %p is busy", m));
/*
- * If the page is not PG_WRITEABLE, then no PTEs can have LPTE_CHG
+ * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
* set. If the object containing the page is locked and the page is
- * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
- if ((m->flags & PG_WRITEABLE) == 0)
+ if ((m->aflags & PGA_WRITEABLE) == 0)
return;
moea64_clear_bit(mmu, m, LPTE_CHG);
}
@@ -1552,13 +1552,13 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
("moea64_remove_write: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
- * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
lo = moea64_attr_fetch(m);
@@ -1588,7 +1588,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
moea64_attr_clear(m, LPTE_CHG);
vm_page_dirty(m);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@@ -2064,11 +2064,11 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
moea64_pvo_remove(mmu, pvo);
PMAP_UNLOCK(pmap);
}
- if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) {
+ if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m)) {
moea64_attr_clear(m, LPTE_CHG);
vm_page_dirty(m);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 26261a6..4d1043a 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -771,7 +771,7 @@ pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
/* remove from pv_list */
TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
if (TAILQ_EMPTY(&m->md.pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
/* free pv entry struct */
pv_free(pve);
@@ -820,7 +820,7 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
vm_page_dirty(m);
if (PTE_ISREFERENCED(pte))
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
pv_remove(pmap, va, m);
}
@@ -1600,7 +1600,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
flags |= PTE_UW;
if ((flags & PTE_MANAGED) != 0)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
} else {
/* Handle modified pages, sense modify status. */
@@ -1667,7 +1667,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
flags |= PTE_UW;
if ((m->oflags & VPO_UNMANAGED) == 0)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
}
if (prot & VM_PROT_EXECUTE) {
@@ -1804,7 +1804,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
PMAP_UNLOCK(pv->pv_pmap);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@@ -1957,13 +1957,13 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
("mmu_booke_remove_write: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
- * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
@@ -1988,7 +1988,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
}
PMAP_UNLOCK(pv->pv_pmap);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@@ -2172,13 +2172,13 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
rv = FALSE;
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
- * concurrently set while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can be modified.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return (rv);
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
@@ -2253,11 +2253,11 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
("mmu_booke_clear_modify: page %p is busy", m));
/*
- * If the page is not PG_WRITEABLE, then no PTEs can be modified.
+ * If the page is not PG_AWRITEABLE, then no PTEs can be modified.
* If the object containing the page is locked and the page is not
- * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ * VPO_BUSY, then PG_AWRITEABLE cannot be concurrently set.
*/
- if ((m->flags & PG_WRITEABLE) == 0)
+ if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
index 6f60622..9d5b656 100644
--- a/sys/powerpc/powerpc/mmu_if.m
+++ b/sys/powerpc/powerpc/mmu_if.m
@@ -584,7 +584,7 @@ METHOD void remove {
/**
* @brief Traverse the reverse-map list off the given physical page and
- * remove all mappings. Clear the PG_WRITEABLE attribute from the page.
+ * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
*
* @param _pg physical page
*/
OpenPOWER on IntegriCloud