summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2011-09-06 10:30:11 +0000
committerkib <kib@FreeBSD.org>2011-09-06 10:30:11 +0000
commita9d505a22a9d9d343bf6874e995b921ad977453c (patch)
tree608b3b06589b15335451f37a8c8b11d1779e9a72 /sys
parenta6bb123606f7afa6fb3342b35dad217c76951ee3 (diff)
downloadFreeBSD-src-a9d505a22a9d9d343bf6874e995b921ad977453c.zip
FreeBSD-src-a9d505a22a9d9d343bf6874e995b921ad977453c.tar.gz
Split the vm_page flags PG_WRITEABLE and PG_REFERENCED into atomic
flags field. Updates to the atomic flags are performed using the atomic ops on the containing word, do not require any vm lock to be held, and are non-blocking. The vm_page_aflag_set(9) and vm_page_aflag_clear(9) functions are provided to modify afalgs. Document the changes to flags field to only require the page lock. Introduce vm_page_reference(9) function to provide a stable KPI and KBI for filesystems like tmpfs and zfs which need to mark a page as referenced. Reviewed by: alc, attilio Tested by: marius, flo (sparc64); andreast (powerpc, powerpc64) Approved by: re (bz)
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/pmap.c46
-rw-r--r--sys/arm/arm/pmap.c32
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c3
-rw-r--r--sys/fs/tmpfs/tmpfs_vnops.c9
-rw-r--r--sys/i386/i386/pmap.c46
-rw-r--r--sys/i386/xen/pmap.c40
-rw-r--r--sys/ia64/ia64/pmap.c32
-rw-r--r--sys/mips/mips/pmap.c44
-rw-r--r--sys/powerpc/aim/mmu_oea.c26
-rw-r--r--sys/powerpc/aim/mmu_oea64.c26
-rw-r--r--sys/powerpc/booke/pmap.c30
-rw-r--r--sys/powerpc/powerpc/mmu_if.m2
-rw-r--r--sys/sparc64/sparc64/pmap.c32
-rw-r--r--sys/vm/swap_pager.c2
-rw-r--r--sys/vm/vm_fault.c4
-rw-r--r--sys/vm/vm_mmap.c8
-rw-r--r--sys/vm/vm_object.c4
-rw-r--r--sys/vm/vm_page.c107
-rw-r--r--sys/vm/vm_page.h40
-rw-r--r--sys/vm/vm_pageout.c28
-rw-r--r--sys/vm/vnode_pager.c2
21 files changed, 288 insertions, 275 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index e6ab1a1..43df8ee 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2123,7 +2123,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
KASSERT((tpte & PG_W) == 0,
("pmap_collect: wired pte %#lx", tpte));
if (tpte & PG_A)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
free = NULL;
@@ -2137,7 +2137,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
}
if (TAILQ_EMPTY(&m->md.pv_list) &&
TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
}
@@ -2391,7 +2391,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
if (TAILQ_EMPTY(&m->md.pv_list)) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
if (TAILQ_EMPTY(&pvh->pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
}
@@ -2615,10 +2615,10 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
if (oldpde & PG_A)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
if (TAILQ_EMPTY(&m->md.pv_list) &&
TAILQ_EMPTY(&pvh->pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
}
if (pmap == kernel_pmap) {
@@ -2659,7 +2659,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
if (oldpte & PG_A)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
pmap_remove_entry(pmap, m, va);
}
return (pmap_unuse_pt(pmap, va, ptepde, free));
@@ -2872,7 +2872,7 @@ pmap_remove_all(vm_page_t m)
if (tpte & PG_W)
pmap->pm_stats.wired_count--;
if (tpte & PG_A)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
/*
* Update the vm_page_t clean and reference bits.
@@ -2885,7 +2885,7 @@ pmap_remove_all(vm_page_t m)
free_pv_entry(pmap, pv);
PMAP_UNLOCK(pmap);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
pmap_free_zero_pages(free);
}
@@ -3301,7 +3301,7 @@ validate:
if ((prot & VM_PROT_WRITE) != 0) {
newpte |= PG_RW;
if ((newpte & PG_MANAGED) != 0)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
}
if ((prot & VM_PROT_EXECUTE) == 0)
newpte |= pg_nx;
@@ -3325,7 +3325,7 @@ validate:
origpte = pte_load_store(pte, newpte);
if (origpte & PG_A) {
if (origpte & PG_MANAGED)
- vm_page_flag_set(om, PG_REFERENCED);
+ vm_page_aflag_set(om, PGA_REFERENCED);
if (opa != VM_PAGE_TO_PHYS(m) || ((origpte &
PG_NX) == 0 && (newpte & PG_NX)))
invlva = TRUE;
@@ -3339,7 +3339,7 @@ validate:
if ((origpte & PG_MANAGED) != 0 &&
TAILQ_EMPTY(&om->md.pv_list) &&
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))
- vm_page_flag_clear(om, PG_WRITEABLE);
+ vm_page_aflag_clear(om, PGA_WRITEABLE);
if (invlva)
pmap_invalidate_page(pmap, va);
} else
@@ -4147,7 +4147,7 @@ pmap_remove_pages(pmap_t pmap)
if (TAILQ_EMPTY(&pvh->pv_list)) {
for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
if (TAILQ_EMPTY(&mt->md.pv_list))
- vm_page_flag_clear(mt, PG_WRITEABLE);
+ vm_page_aflag_clear(mt, PGA_WRITEABLE);
}
mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
if (mpte != NULL) {
@@ -4165,7 +4165,7 @@ pmap_remove_pages(pmap_t pmap)
if (TAILQ_EMPTY(&m->md.pv_list)) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
if (TAILQ_EMPTY(&pvh->pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
}
pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
@@ -4203,13 +4203,13 @@ pmap_is_modified(vm_page_t m)
("pmap_is_modified: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
- * concurrently set while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
vm_page_lock_queues();
rv = pmap_is_modified_pvh(&m->md) ||
@@ -4332,13 +4332,13 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
- * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
@@ -4370,7 +4370,7 @@ retry:
}
PMAP_UNLOCK(pmap);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@@ -4478,11 +4478,11 @@ pmap_clear_modify(vm_page_t m)
("pmap_clear_modify: page %p is busy", m));
/*
- * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set.
+ * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
* If the object containing the page is locked and the page is not
- * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
- if ((m->flags & PG_WRITEABLE) == 0)
+ if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 38733c9..28835ec 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -1402,7 +1402,7 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va)
if ((kwritable == 0) && (writable == 0)) {
pg->md.pvh_attrs &= ~PVF_MOD;
- vm_page_flag_clear(pg, PG_WRITEABLE);
+ vm_page_aflag_clear(pg, PGA_WRITEABLE);
return;
}
}
@@ -1568,7 +1568,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
}
if (maskbits & PVF_WRITE)
- vm_page_flag_clear(pg, PG_WRITEABLE);
+ vm_page_aflag_clear(pg, PGA_WRITEABLE);
vm_page_unlock_queues();
return (count);
}
@@ -1630,7 +1630,7 @@ pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD);
if (pve->pv_flags & PVF_WIRED)
++pm->pm_stats.wired_count;
- vm_page_flag_set(pg, PG_REFERENCED);
+ vm_page_aflag_set(pg, PGA_REFERENCED);
}
/*
@@ -1699,7 +1699,7 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve)
if (TAILQ_FIRST(&pg->md.pv_list) == NULL)
pg->md.pvh_attrs &= ~PVF_REF;
else
- vm_page_flag_set(pg, PG_REFERENCED);
+ vm_page_aflag_set(pg, PGA_REFERENCED);
if ((pve->pv_flags & PVF_NC) && ((pm == pmap_kernel()) ||
(pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC)))
pmap_fix_cache(pg, pm, 0);
@@ -1709,7 +1709,7 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve)
break;
if (!pve) {
pg->md.pvh_attrs &= ~PVF_MOD;
- vm_page_flag_clear(pg, PG_WRITEABLE);
+ vm_page_aflag_clear(pg, PGA_WRITEABLE);
}
}
pv = TAILQ_FIRST(&pg->md.pv_list);
@@ -1724,7 +1724,7 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve)
--pm->pm_stats.wired_count;
pg->md.pvh_attrs &= ~PVF_REF;
pg->md.pvh_attrs &= ~PVF_MOD;
- vm_page_flag_clear(pg, PG_WRITEABLE);
+ vm_page_aflag_clear(pg, PGA_WRITEABLE);
pmap_free_pv_entry(pv);
}
}
@@ -2695,7 +2695,7 @@ pmap_remove_pages(pmap_t pmap)
npv = TAILQ_NEXT(pv, pv_plist);
pmap_nuke_pv(m, pmap, pv);
if (TAILQ_EMPTY(&m->md.pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
pmap_free_pv_entry(pv);
pmap_free_l2_bucket(pmap, l2b, 1);
}
@@ -3172,7 +3172,7 @@ pmap_remove_all(vm_page_t m)
else
pmap_tlb_flushD(curpm);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@@ -3406,7 +3406,7 @@ do_l2b_alloc:
vm_page_dirty(m);
}
if (m && opte)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
} else {
/*
* Need to do page referenced emulation.
@@ -3418,7 +3418,7 @@ do_l2b_alloc:
npte |= L2_S_PROT_W;
if (m != NULL &&
(m->oflags & VPO_UNMANAGED) == 0)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
}
npte |= pte_l2_s_cache_mode;
if (m && m == opg) {
@@ -4505,11 +4505,11 @@ pmap_clear_modify(vm_page_t m)
("pmap_clear_modify: page %p is busy", m));
/*
- * If the page is not PG_WRITEABLE, then no mappings can be modified.
+ * If the page is not PGA_WRITEABLE, then no mappings can be modified.
* If the object containing the page is locked and the page is not
- * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
- if ((m->flags & PG_WRITEABLE) == 0)
+ if ((m->aflags & PGA_WRITEABLE) == 0)
return;
if (m->md.pvh_attrs & PVF_MOD)
pmap_clearbit(m, PVF_MOD);
@@ -4558,13 +4558,13 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
- * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) != 0 ||
- (m->flags & PG_WRITEABLE) != 0)
+ (m->aflags & PGA_WRITEABLE) != 0)
pmap_clearbit(m, PVF_WRITE);
}
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
index 14b6823..8d8ddfc 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
@@ -331,8 +331,7 @@ page_lookup(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
- vm_page_lock_queues();
- vm_page_flag_set(pp, PG_REFERENCED);
+ vm_page_reference(pp);
vm_page_sleep(pp, "zfsmwb");
continue;
}
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index 0568e93..74aba04 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -518,8 +518,7 @@ lookupvpg:
* Reference the page before unlocking and sleeping so
* that the page daemon is less likely to reclaim it.
*/
- vm_page_lock_queues();
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_reference(m);
vm_page_sleep(m, "tmfsmr");
goto lookupvpg;
}
@@ -538,8 +537,7 @@ lookupvpg:
* Reference the page before unlocking and sleeping so
* that the page daemon is less likely to reclaim it.
*/
- vm_page_lock_queues();
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_reference(m);
vm_page_sleep(m, "tmfsmr");
goto lookupvpg;
}
@@ -650,8 +648,7 @@ lookupvpg:
* Reference the page before unlocking and sleeping so
* that the page daemon is less likely to reclaim it.
*/
- vm_page_lock_queues();
- vm_page_flag_set(vpg, PG_REFERENCED);
+ vm_page_reference(vpg);
vm_page_sleep(vpg, "tmfsmw");
goto lookupvpg;
}
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index e6b8669..db5f995 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -2207,7 +2207,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
KASSERT((tpte & PG_W) == 0,
("pmap_collect: wired pte %#jx", (uintmax_t)tpte));
if (tpte & PG_A)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
free = NULL;
@@ -2221,7 +2221,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
}
if (TAILQ_EMPTY(&m->md.pv_list) &&
TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
sched_unpin();
}
@@ -2461,7 +2461,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
if (TAILQ_EMPTY(&m->md.pv_list)) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
if (TAILQ_EMPTY(&pvh->pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
}
@@ -2714,10 +2714,10 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
if (oldpde & PG_A)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
if (TAILQ_EMPTY(&m->md.pv_list) &&
TAILQ_EMPTY(&pvh->pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
}
if (pmap == kernel_pmap) {
@@ -2763,7 +2763,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
if (oldpte & PG_A)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
pmap_remove_entry(pmap, m, va);
}
return (pmap_unuse_pt(pmap, va, free));
@@ -2953,7 +2953,7 @@ pmap_remove_all(vm_page_t m)
if (tpte & PG_W)
pmap->pm_stats.wired_count--;
if (tpte & PG_A)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
/*
* Update the vm_page_t clean and reference bits.
@@ -2966,7 +2966,7 @@ pmap_remove_all(vm_page_t m)
free_pv_entry(pmap, pv);
PMAP_UNLOCK(pmap);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
sched_unpin();
vm_page_unlock_queues();
pmap_free_zero_pages(free);
@@ -3413,7 +3413,7 @@ validate:
if ((prot & VM_PROT_WRITE) != 0) {
newpte |= PG_RW;
if ((newpte & PG_MANAGED) != 0)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
}
#ifdef PAE
if ((prot & VM_PROT_EXECUTE) == 0)
@@ -3439,7 +3439,7 @@ validate:
origpte = pte_load_store(pte, newpte);
if (origpte & PG_A) {
if (origpte & PG_MANAGED)
- vm_page_flag_set(om, PG_REFERENCED);
+ vm_page_aflag_set(om, PGA_REFERENCED);
if (opa != VM_PAGE_TO_PHYS(m))
invlva = TRUE;
#ifdef PAE
@@ -3457,7 +3457,7 @@ validate:
if ((origpte & PG_MANAGED) != 0 &&
TAILQ_EMPTY(&om->md.pv_list) &&
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))
- vm_page_flag_clear(om, PG_WRITEABLE);
+ vm_page_aflag_clear(om, PGA_WRITEABLE);
if (invlva)
pmap_invalidate_page(pmap, va);
} else
@@ -4287,7 +4287,7 @@ pmap_remove_pages(pmap_t pmap)
if (TAILQ_EMPTY(&pvh->pv_list)) {
for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
if (TAILQ_EMPTY(&mt->md.pv_list))
- vm_page_flag_clear(mt, PG_WRITEABLE);
+ vm_page_aflag_clear(mt, PGA_WRITEABLE);
}
mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
if (mpte != NULL) {
@@ -4305,7 +4305,7 @@ pmap_remove_pages(pmap_t pmap)
if (TAILQ_EMPTY(&m->md.pv_list)) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
if (TAILQ_EMPTY(&pvh->pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
pmap_unuse_pt(pmap, pv->pv_va, &free);
}
@@ -4345,13 +4345,13 @@ pmap_is_modified(vm_page_t m)
("pmap_is_modified: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
- * concurrently set while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
vm_page_lock_queues();
rv = pmap_is_modified_pvh(&m->md) ||
@@ -4478,13 +4478,13 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
- * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
sched_pin();
@@ -4522,7 +4522,7 @@ retry:
}
PMAP_UNLOCK(pmap);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
sched_unpin();
vm_page_unlock_queues();
}
@@ -4633,11 +4633,11 @@ pmap_clear_modify(vm_page_t m)
("pmap_clear_modify: page %p is busy", m));
/*
- * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set.
+ * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
* If the object containing the page is locked and the page is not
- * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
- if ((m->flags & PG_WRITEABLE) == 0)
+ if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
sched_pin();
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index ece1b6c..b19f75c 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -2037,7 +2037,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
KASSERT((tpte & PG_W) == 0,
("pmap_collect: wired pte %#jx", (uintmax_t)tpte));
if (tpte & PG_A)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
free = NULL;
@@ -2050,7 +2050,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
PMAP_UNLOCK(pmap);
}
if (TAILQ_EMPTY(&m->md.pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
sched_unpin();
}
@@ -2222,7 +2222,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
pmap_pvh_free(&m->md, pmap, va);
if (TAILQ_EMPTY(&m->md.pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
/*
@@ -2274,7 +2274,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
if (oldpte & PG_A)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
pmap_remove_entry(pmap, m, va);
}
return (pmap_unuse_pt(pmap, va, free));
@@ -2446,7 +2446,7 @@ pmap_remove_all(vm_page_t m)
if (tpte & PG_W)
pmap->pm_stats.wired_count--;
if (tpte & PG_A)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
/*
* Update the vm_page_t clean and reference bits.
@@ -2459,7 +2459,7 @@ pmap_remove_all(vm_page_t m)
free_pv_entry(pmap, pv);
PMAP_UNLOCK(pmap);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
PT_UPDATES_FLUSH();
if (*PMAP1)
PT_SET_MA(PADDR1, 0);
@@ -2739,7 +2739,7 @@ validate:
if ((prot & VM_PROT_WRITE) != 0) {
newpte |= PG_RW;
if ((newpte & PG_MANAGED) != 0)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
}
#ifdef PAE
if ((prot & VM_PROT_EXECUTE) == 0)
@@ -2764,7 +2764,7 @@ validate:
PT_SET_VA(pte, newpte | PG_A, FALSE);
if (origpte & PG_A) {
if (origpte & PG_MANAGED)
- vm_page_flag_set(om, PG_REFERENCED);
+ vm_page_aflag_set(om, PGA_REFERENCED);
if (opa != VM_PAGE_TO_PHYS(m))
invlva = TRUE;
#ifdef PAE
@@ -2781,7 +2781,7 @@ validate:
}
if ((origpte & PG_MANAGED) != 0 &&
TAILQ_EMPTY(&om->md.pv_list))
- vm_page_flag_clear(om, PG_WRITEABLE);
+ vm_page_aflag_clear(om, PGA_WRITEABLE);
if (invlva)
pmap_invalidate_page(pmap, va);
} else{
@@ -3549,7 +3549,7 @@ pmap_remove_pages(pmap_t pmap)
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
if (TAILQ_EMPTY(&m->md.pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
pmap_unuse_pt(pmap, pv->pv_va, &free);
@@ -3604,13 +3604,13 @@ pmap_is_modified(vm_page_t m)
rv = FALSE;
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
- * concurrently set while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return (rv);
vm_page_lock_queues();
sched_pin();
@@ -3735,13 +3735,13 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
- * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
sched_pin();
@@ -3769,7 +3769,7 @@ retry:
}
PMAP_UNLOCK(pmap);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
PT_UPDATES_FLUSH();
if (*PMAP1)
PT_SET_MA(PADDR1, 0);
@@ -3846,11 +3846,11 @@ pmap_clear_modify(vm_page_t m)
("pmap_clear_modify: page %p is busy", m));
/*
- * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set.
+ * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
* If the object containing the page is locked and the page is not
- * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
- if ((m->flags & PG_WRITEABLE) == 0)
+ if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
sched_pin();
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index f9f6060..37116f5 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -804,7 +804,7 @@ retry:
pmap_invalidate_page(va);
pmap_switch(oldpmap);
if (pmap_accessed(pte))
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
if (pmap_dirty(pte))
vm_page_dirty(m);
pmap_free_pte(pte, va);
@@ -819,7 +819,7 @@ retry:
free_pv_entry(pv);
}
if (TAILQ_EMPTY(&m->md.pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
if (allocated_pv == NULL) {
if (vpq == &vm_page_queues[PQ_INACTIVE]) {
@@ -972,7 +972,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va, pv_entry_t pv)
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
if (TAILQ_FIRST(&m->md.pv_list) == NULL)
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
free_pv_entry(pv);
@@ -1198,7 +1198,7 @@ pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vm_offset_t va,
if (pmap_dirty(pte))
vm_page_dirty(m);
if (pmap_accessed(pte))
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
error = pmap_remove_entry(pmap, m, va, pv);
}
@@ -1460,7 +1460,7 @@ pmap_remove_all(vm_page_t m)
pmap_switch(oldpmap);
PMAP_UNLOCK(pmap);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@@ -1647,7 +1647,7 @@ validate:
ia64_sync_icache(va, PAGE_SIZE);
if ((prot & VM_PROT_WRITE) != 0 && managed)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
vm_page_unlock_queues();
pmap_switch(oldpmap);
PMAP_UNLOCK(pmap);
@@ -2048,13 +2048,13 @@ pmap_is_modified(vm_page_t m)
rv = FALSE;
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
- * concurrently set while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can be dirty.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return (rv);
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -2139,11 +2139,11 @@ pmap_clear_modify(vm_page_t m)
("pmap_clear_modify: page %p is busy", m));
/*
- * If the page is not PG_WRITEABLE, then no PTEs can be modified.
+ * If the page is not PGA_WRITEABLE, then no PTEs can be modified.
* If the object containing the page is locked and the page is not
- * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
- if ((m->flags & PG_WRITEABLE) == 0)
+ if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -2206,13 +2206,13 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
- * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -2235,7 +2235,7 @@ pmap_remove_write(vm_page_t m)
pmap_switch(oldpmap);
PMAP_UNLOCK(pmap);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index e6dcd77..7da76be 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -1432,7 +1432,7 @@ retry:
KASSERT(!pte_test(&oldpte, PTE_W),
("wired pte for unwired page"));
if (m->md.pv_flags & PV_TABLE_REF)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
if (pte_test(&oldpte, PTE_D))
vm_page_dirty(m);
pmap_invalidate_page(pmap, va);
@@ -1448,7 +1448,7 @@ retry:
free_pv_entry(pv);
}
if (TAILQ_EMPTY(&m->md.pv_list)) {
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
}
}
@@ -1527,7 +1527,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
pmap_pvh_free(&m->md, pmap, va);
if (TAILQ_EMPTY(&m->md.pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
/*
@@ -1589,7 +1589,7 @@ pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va)
vm_page_dirty(m);
}
if (m->md.pv_flags & PV_TABLE_REF)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
pmap_remove_entry(pmap, m, va);
@@ -1713,7 +1713,7 @@ pmap_remove_all(vm_page_t m)
vm_page_lock_queues();
if (m->md.pv_flags & PV_TABLE_REF)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
PMAP_LOCK(pv->pv_pmap);
@@ -1757,7 +1757,7 @@ pmap_remove_all(vm_page_t m)
free_pv_entry(pv);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
vm_page_unlock_queues();
}
@@ -2004,7 +2004,7 @@ validate:
*pte = newpte;
if (page_is_managed(opa) && (opa != pa)) {
if (om->md.pv_flags & PV_TABLE_REF)
- vm_page_flag_set(om, PG_REFERENCED);
+ vm_page_aflag_set(om, PGA_REFERENCED);
om->md.pv_flags &=
~(PV_TABLE_REF | PV_TABLE_MOD);
}
@@ -2017,7 +2017,7 @@ validate:
}
if (page_is_managed(opa) &&
TAILQ_EMPTY(&om->md.pv_list))
- vm_page_flag_clear(om, PG_WRITEABLE);
+ vm_page_aflag_clear(om, PGA_WRITEABLE);
} else {
*pte = newpte;
}
@@ -2535,7 +2535,7 @@ pmap_remove_pages(pmap_t pmap)
m->md.pv_list_count--;
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
free_pv_entry(pv);
@@ -2615,7 +2615,7 @@ pmap_changebit(vm_page_t m, int bit, boolean_t setem)
PMAP_UNLOCK(pv->pv_pmap);
}
if (!setem && bit == PTE_D)
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
/*
@@ -2662,13 +2662,13 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
- * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return;
/*
@@ -2685,7 +2685,7 @@ pmap_remove_write(vm_page_t m)
pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
VM_PROT_READ | VM_PROT_EXECUTE);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@@ -2724,13 +2724,13 @@ pmap_is_modified(vm_page_t m)
("pmap_is_modified: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
- * concurrently set while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PTE_D set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
vm_page_lock_queues();
if (m->md.pv_flags & PV_TABLE_MOD)
@@ -2781,11 +2781,11 @@ pmap_clear_modify(vm_page_t m)
("pmap_clear_modify: page %p is busy", m));
/*
- * If the page is not PG_WRITEABLE, then no PTEs can have PTE_D set.
+ * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set.
* If the object containing the page is locked and the page is not
- * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
- if ((m->flags & PG_WRITEABLE) == 0)
+ if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
if (m->md.pv_flags & PV_TABLE_MOD) {
@@ -2929,7 +2929,7 @@ retry:
* determine if the address is MINCORE_REFERENCED.
*/
m = PHYS_TO_VM_PAGE(pa);
- if ((m->flags & PG_REFERENCED) != 0)
+ if ((m->aflags & PGA_REFERENCED) != 0)
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
}
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
@@ -3185,7 +3185,7 @@ init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot)
rw = PTE_V | PTE_D | PTE_C_CACHE;
else
rw = PTE_V | PTE_C_CACHE;
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
} else
/* Needn't emulate a modified bit for unmanaged pages. */
rw = PTE_V | PTE_D | PTE_C_CACHE;
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 331dbe9..57c35bf 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1102,7 +1102,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pte_lo |= PTE_BW;
if (pmap_bootstrapped &&
(m->oflags & VPO_UNMANAGED) == 0)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
} else
pte_lo |= PTE_BR;
@@ -1255,13 +1255,13 @@ moea_is_modified(mmu_t mmu, vm_page_t m)
("moea_is_modified: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
- * concurrently set while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PTE_CHG set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
return (moea_query_bit(m, PTE_CHG));
}
@@ -1299,11 +1299,11 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
("moea_clear_modify: page %p is busy", m));
/*
- * If the page is not PG_WRITEABLE, then no PTEs can have PTE_CHG
+ * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG
* set. If the object containing the page is locked and the page is
- * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
- if ((m->flags & PG_WRITEABLE) == 0)
+ if ((m->aflags & PGA_WRITEABLE) == 0)
return;
moea_clear_bit(m, PTE_CHG);
}
@@ -1323,13 +1323,13 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
("moea_remove_write: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
- * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
lo = moea_attr_fetch(m);
@@ -1356,7 +1356,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
moea_attr_clear(m, PTE_CHG);
vm_page_dirty(m);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@@ -1794,11 +1794,11 @@ moea_remove_all(mmu_t mmu, vm_page_t m)
moea_pvo_remove(pvo, -1);
PMAP_UNLOCK(pmap);
}
- if ((m->flags & PG_WRITEABLE) && moea_is_modified(mmu, m)) {
+ if ((m->aflags & PGA_WRITEABLE) && moea_is_modified(mmu, m)) {
moea_attr_clear(m, PTE_CHG);
vm_page_dirty(m);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 0a10ce8..7500462 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -1239,7 +1239,7 @@ moea64_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
pte_lo |= LPTE_BW;
if (pmap_bootstrapped &&
(m->oflags & VPO_UNMANAGED) == 0)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
} else
pte_lo |= LPTE_BR;
@@ -1484,13 +1484,13 @@ moea64_is_modified(mmu_t mmu, vm_page_t m)
("moea64_is_modified: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
- * concurrently set while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have LPTE_CHG set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
return (moea64_query_bit(mmu, m, LPTE_CHG));
}
@@ -1528,11 +1528,11 @@ moea64_clear_modify(mmu_t mmu, vm_page_t m)
("moea64_clear_modify: page %p is busy", m));
/*
- * If the page is not PG_WRITEABLE, then no PTEs can have LPTE_CHG
+ * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
* set. If the object containing the page is locked and the page is
- * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
- if ((m->flags & PG_WRITEABLE) == 0)
+ if ((m->aflags & PGA_WRITEABLE) == 0)
return;
moea64_clear_bit(mmu, m, LPTE_CHG);
}
@@ -1552,13 +1552,13 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
("moea64_remove_write: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
- * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
lo = moea64_attr_fetch(m);
@@ -1588,7 +1588,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
moea64_attr_clear(m, LPTE_CHG);
vm_page_dirty(m);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@@ -2064,11 +2064,11 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
moea64_pvo_remove(mmu, pvo);
PMAP_UNLOCK(pmap);
}
- if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) {
+ if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m)) {
moea64_attr_clear(m, LPTE_CHG);
vm_page_dirty(m);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 26261a6..4d1043a 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -771,7 +771,7 @@ pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
/* remove from pv_list */
TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
if (TAILQ_EMPTY(&m->md.pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
/* free pv entry struct */
pv_free(pve);
@@ -820,7 +820,7 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
vm_page_dirty(m);
if (PTE_ISREFERENCED(pte))
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
pv_remove(pmap, va, m);
}
@@ -1600,7 +1600,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
flags |= PTE_UW;
if ((flags & PTE_MANAGED) != 0)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
} else {
/* Handle modified pages, sense modify status. */
@@ -1667,7 +1667,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
flags |= PTE_UW;
if ((m->oflags & VPO_UNMANAGED) == 0)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
}
if (prot & VM_PROT_EXECUTE) {
@@ -1804,7 +1804,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
PMAP_UNLOCK(pv->pv_pmap);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@@ -1957,13 +1957,13 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
("mmu_booke_remove_write: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
- * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
@@ -1988,7 +1988,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
}
PMAP_UNLOCK(pv->pv_pmap);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@@ -2172,13 +2172,13 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
rv = FALSE;
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
- * concurrently set while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can be modified.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return (rv);
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
@@ -2253,11 +2253,11 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
("mmu_booke_clear_modify: page %p is busy", m));
/*
- * If the page is not PG_WRITEABLE, then no PTEs can be modified.
+ * If the page is not PG_AWRITEABLE, then no PTEs can be modified.
* If the object containing the page is locked and the page is not
- * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ * VPO_BUSY, then PG_AWRITEABLE cannot be concurrently set.
*/
- if ((m->flags & PG_WRITEABLE) == 0)
+ if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
index 6f60622..9d5b656 100644
--- a/sys/powerpc/powerpc/mmu_if.m
+++ b/sys/powerpc/powerpc/mmu_if.m
@@ -584,7 +584,7 @@ METHOD void remove {
/**
* @brief Traverse the reverse-map list off the given physical page and
- * remove all mappings. Clear the PG_WRITEABLE attribute from the page.
+ * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
*
* @param _pg physical page
*/
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 0792c4b..209be10 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1340,9 +1340,9 @@ pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
if ((data & TD_W) != 0)
vm_page_dirty(m);
if ((data & TD_REF) != 0)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
if (TAILQ_EMPTY(&m->md.tte_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
pm->pm_stats.resident_count--;
}
pmap_cache_remove(m, va);
@@ -1403,7 +1403,7 @@ pmap_remove_all(vm_page_t m)
if ((tp->tte_data & TD_WIRED) != 0)
pm->pm_stats.wired_count--;
if ((tp->tte_data & TD_REF) != 0)
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
if ((tp->tte_data & TD_W) != 0)
vm_page_dirty(m);
tp->tte_data &= ~TD_V;
@@ -1414,7 +1414,7 @@ pmap_remove_all(vm_page_t m)
TTE_ZERO(tp);
PMAP_UNLOCK(pm);
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@@ -1560,7 +1560,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (wired)
tp->tte_data |= TD_W;
if ((m->oflags & VPO_UNMANAGED) == 0)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
} else if ((data & TD_W) != 0)
vm_page_dirty(m);
@@ -1601,7 +1601,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if ((prot & VM_PROT_WRITE) != 0) {
data |= TD_SW;
if ((m->oflags & VPO_UNMANAGED) == 0)
- vm_page_flag_set(m, PG_WRITEABLE);
+ vm_page_aflag_set(m, PGA_WRITEABLE);
}
if (prot & VM_PROT_EXECUTE) {
data |= TD_EXEC;
@@ -2066,13 +2066,13 @@ pmap_is_modified(vm_page_t m)
rv = FALSE;
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
- * concurrently set while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no TTEs can have TD_W set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return (rv);
vm_page_lock_queues();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
@@ -2143,11 +2143,11 @@ pmap_clear_modify(vm_page_t m)
("pmap_clear_modify: page %p is busy", m));
/*
- * If the page is not PG_WRITEABLE, then no TTEs can have TD_W set.
+ * If the page is not PGA_WRITEABLE, then no TTEs can have TD_W set.
* If the object containing the page is locked and the page is not
- * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
- if ((m->flags & PG_WRITEABLE) == 0)
+ if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
@@ -2189,13 +2189,13 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: page %p is not managed", m));
/*
- * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
- * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
- (m->flags & PG_WRITEABLE) == 0)
+ (m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
@@ -2207,7 +2207,7 @@ pmap_remove_write(vm_page_t m)
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
}
- vm_page_flag_clear(m, PG_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index f491adc..d7da4f9 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -1593,7 +1593,7 @@ swp_pager_async_iodone(struct buf *bp)
* status, then finish the I/O ( which decrements the
* busy count and possibly wakes waiter's up ).
*/
- KASSERT((m->flags & PG_WRITEABLE) == 0,
+ KASSERT((m->aflags & PGA_WRITEABLE) == 0,
("swp_pager_async_iodone: page %p is not write"
" protected", m));
vm_page_undirty(m);
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index eeb10a4..1b8ac2f 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -345,9 +345,7 @@ RetryFault:;
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
- vm_page_lock_queues();
- vm_page_flag_set(fs.m, PG_REFERENCED);
- vm_page_unlock_queues();
+ vm_page_aflag_set(fs.m, PGA_REFERENCED);
vm_page_unlock(fs.m);
if (fs.object != fs.first_object) {
if (!VM_OBJECT_TRYLOCK(
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index a46d6b5..ce899e9 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -901,16 +901,16 @@ RestartScan:
if (m->dirty != 0)
mincoreinfo |= MINCORE_MODIFIED_OTHER;
/*
- * The first test for PG_REFERENCED is an
+ * The first test for PGA_REFERENCED is an
* optimization. The second test is
* required because a concurrent pmap
* operation could clear the last reference
- * and set PG_REFERENCED before the call to
+ * and set PGA_REFERENCED before the call to
* pmap_is_referenced().
*/
- if ((m->flags & PG_REFERENCED) != 0 ||
+ if ((m->aflags & PGA_REFERENCED) != 0 ||
pmap_is_referenced(m) ||
- (m->flags & PG_REFERENCED) != 0)
+ (m->aflags & PGA_REFERENCED) != 0)
mincoreinfo |= MINCORE_REFERENCED_OTHER;
}
if (object != NULL)
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 602d99e..3de793b 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -1098,9 +1098,7 @@ shadowlookup:
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
- vm_page_lock_queues();
- vm_page_flag_set(m, PG_REFERENCED);
- vm_page_unlock_queues();
+ vm_page_aflag_set(m, PGA_REFERENCED);
}
vm_page_unlock(m);
if (object != tobject)
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 6d55892..341c238 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -67,30 +67,9 @@
* page queue (vm_page_queue[]), regardless of other mutexes or the
* busy state of a page.
*
- * - a hash chain mutex is required when associating or disassociating
- * a page from the VM PAGE CACHE hash table (vm_page_buckets),
- * regardless of other mutexes or the busy state of a page.
- *
- * - either a hash chain mutex OR a busied page is required in order
- * to modify the page flags. A hash chain mutex must be obtained in
- * order to busy a page. A page's flags cannot be modified by a
- * hash chain mutex if the page is marked busy.
- *
- * - The object memq mutex is held when inserting or removing
- * pages from an object (vm_page_insert() or vm_page_remove()). This
- * is different from the object's main mutex.
- *
- * Generally speaking, you have to be aware of side effects when running
- * vm_page ops. A vm_page_lookup() will return with the hash chain
- * locked, whether it was able to lookup the page or not. vm_page_free(),
- * vm_page_cache(), vm_page_activate(), and a number of other routines
- * will release the hash chain mutex for you. Intermediate manipulation
- * routines such as vm_page_flag_set() expect the hash chain to be held
- * on entry and the hash chain will remain held on return.
- *
- * pageq scanning can only occur with the pageq in question locked.
- * We have a known bottleneck with the active queue, but the cache
- * and free queues are actually arrays already.
+ * - The object mutex is held when inserting or removing
+ * pages from an object (vm_page_insert() or vm_page_remove()).
+ *
*/
/*
@@ -473,33 +452,68 @@ vm_page_startup(vm_offset_t vaddr)
return (vaddr);
}
+
+CTASSERT(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0);
+
void
-vm_page_flag_set(vm_page_t m, unsigned short bits)
+vm_page_aflag_set(vm_page_t m, uint8_t bits)
{
+ uint32_t *addr, val;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
/*
- * The PG_WRITEABLE flag can only be set if the page is managed and
+ * The PGA_WRITEABLE flag can only be set if the page is managed and
* VPO_BUSY. Currently, this flag is only set by pmap_enter().
*/
- KASSERT((bits & PG_WRITEABLE) == 0 ||
+ KASSERT((bits & PGA_WRITEABLE) == 0 ||
(m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == VPO_BUSY,
- ("PG_WRITEABLE and !VPO_BUSY"));
- m->flags |= bits;
+ ("PGA_WRITEABLE and !VPO_BUSY"));
+
+ /*
+ * We want to use atomic updates for m->aflags, which is a
+ * byte wide. Not all architectures provide atomic operations
+ * on the single-byte destination. Punt and access the whole
+ * 4-byte word with an atomic update. Parallel non-atomic
+ * updates to the fields included in the update by proximity
+ * are handled properly by atomics.
+ */
+ addr = (void *)&m->aflags;
+ MPASS(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0);
+ val = bits;
+#if BYTE_ORDER == BIG_ENDIAN
+ val <<= 24;
+#endif
+ atomic_set_32(addr, val);
}
void
-vm_page_flag_clear(vm_page_t m, unsigned short bits)
+vm_page_aflag_clear(vm_page_t m, uint8_t bits)
{
+ uint32_t *addr, val;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
/*
- * The PG_REFERENCED flag can only be cleared if the object
+ * The PGA_REFERENCED flag can only be cleared if the object
* containing the page is locked.
*/
- KASSERT((bits & PG_REFERENCED) == 0 || VM_OBJECT_LOCKED(m->object),
- ("PG_REFERENCED and !VM_OBJECT_LOCKED"));
- m->flags &= ~bits;
+ KASSERT((bits & PGA_REFERENCED) == 0 || VM_OBJECT_LOCKED(m->object),
+ ("PGA_REFERENCED and !VM_OBJECT_LOCKED"));
+
+ /*
+ * See the comment in vm_page_aflag_set().
+ */
+ addr = (void *)&m->aflags;
+ MPASS(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0);
+ val = bits;
+#if BYTE_ORDER == BIG_ENDIAN
+ val <<= 24;
+#endif
+ atomic_clear_32(addr, val);
+}
+
+void
+vm_page_reference(vm_page_t m)
+{
+
+ vm_page_aflag_set(m, PGA_REFERENCED);
}
void
@@ -874,7 +888,7 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
* Since we are inserting a new and possibly dirty page,
* update the object's OBJ_MIGHTBEDIRTY flag.
*/
- if (m->flags & PG_WRITEABLE)
+ if (m->aflags & PGA_WRITEABLE)
vm_object_set_writeable_dirty(object);
}
@@ -1390,6 +1404,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
}
m->flags = flags;
mtx_unlock(&vm_page_queue_free_mtx);
+ m->aflags = 0;
if (object == NULL || object->type == OBJT_PHYS)
m->oflags = VPO_UNMANAGED;
else
@@ -1480,6 +1495,7 @@ vm_page_alloc_init(vm_page_t m)
vm_page_zero_count--;
/* Don't clear the PG_ZERO flag; we'll need it later. */
m->flags &= PG_ZERO;
+ m->aflags = 0;
m->oflags = VPO_UNMANAGED;
/* Unmanaged pages don't use "act_count". */
return (drop);
@@ -1880,7 +1896,7 @@ vm_page_unwire(vm_page_t m, int activate)
if (activate)
vm_page_enqueue(PQ_ACTIVE, m);
else {
- vm_page_flag_clear(m, PG_WINATCFLS);
+ m->flags &= ~PG_WINATCFLS;
vm_page_enqueue(PQ_INACTIVE, m);
}
vm_page_unlock_queues();
@@ -1923,7 +1939,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
return;
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
vm_page_lock_queues();
- vm_page_flag_clear(m, PG_WINATCFLS);
+ m->flags &= ~PG_WINATCFLS;
if (queue != PQ_NONE)
vm_page_queue_remove(queue, m);
if (athead)
@@ -2156,15 +2172,13 @@ vm_page_dontneed(vm_page_t m)
*
* Perform the pmap_clear_reference() first. Otherwise, a concurrent
* pmap operation, such as pmap_remove(), could clear a reference in
- * the pmap and set PG_REFERENCED on the page before the
+ * the pmap and set PGA_REFERENCED on the page before the
* pmap_clear_reference() had completed. Consequently, the page would
* appear referenced based upon an old reference that occurred before
* this function ran.
*/
pmap_clear_reference(m);
- vm_page_lock_queues();
- vm_page_flag_clear(m, PG_REFERENCED);
- vm_page_unlock_queues();
+ vm_page_aflag_clear(m, PGA_REFERENCED);
if (m->dirty == 0 && pmap_is_modified(m))
vm_page_dirty(m);
@@ -2213,8 +2227,7 @@ retrylookup:
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
- vm_page_lock_queues();
- vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_aflag_set(m, PGA_REFERENCED);
vm_page_sleep(m, "pgrbwt");
goto retrylookup;
} else {
@@ -2329,11 +2342,11 @@ vm_page_clear_dirty_mask(vm_page_t m, int pagebits)
/*
* If the object is locked and the page is neither VPO_BUSY nor
- * PG_WRITEABLE, then the page's dirty field cannot possibly be
+ * PGA_WRITEABLE, then the page's dirty field cannot possibly be
* set by a concurrent pmap operation.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
- if ((m->oflags & VPO_BUSY) == 0 && (m->flags & PG_WRITEABLE) == 0)
+ if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0)
m->dirty &= ~pagebits;
else {
#if defined(__amd64__) || defined(__i386__) || defined(__ia64__)
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 1dda1e2..5431d79 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -125,12 +125,13 @@ struct vm_page {
struct md_page md; /* machine dependant stuff */
uint8_t queue; /* page queue index (P,Q) */
int8_t segind;
- u_short flags; /* see below */
+ short hold_count; /* page hold count (P) */
uint8_t order; /* index of the buddy queue */
uint8_t pool;
u_short cow; /* page cow mapping count (P) */
u_int wire_count; /* wired down maps refs (P) */
- short hold_count; /* page hold count (P) */
+ uint8_t aflags; /* access is atomic */
+ uint8_t flags; /* see below, often immutable after alloc */
u_short oflags; /* page flags (O) */
u_char act_count; /* page usage count (O) */
u_char busy; /* page busy count (O) */
@@ -225,21 +226,29 @@ extern struct vpglocks pa_lock[];
/*
* These are the flags defined for vm_page.
*
- * PG_REFERENCED may be cleared only if the object containing the page is
+ * aflags are updated by atomic accesses. Use the vm_page_aflag_set()
+ * and vm_page_aflag_clear() functions to set and clear the flags.
+ *
+ * PGA_REFERENCED may be cleared only if the object containing the page is
* locked.
*
- * PG_WRITEABLE is set exclusively on managed pages by pmap_enter(). When it
+ * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter(). When it
* does so, the page must be VPO_BUSY.
*/
-#define PG_CACHED 0x0001 /* page is cached */
-#define PG_FREE 0x0002 /* page is free */
-#define PG_WINATCFLS 0x0004 /* flush dirty page on inactive q */
-#define PG_FICTITIOUS 0x0008 /* physical page doesn't exist (O) */
-#define PG_WRITEABLE 0x0010 /* page is mapped writeable */
-#define PG_ZERO 0x0040 /* page is zeroed */
-#define PG_REFERENCED 0x0080 /* page has been referenced */
-#define PG_MARKER 0x1000 /* special queue marker page */
-#define PG_SLAB 0x2000 /* object pointer is actually a slab */
+#define PGA_WRITEABLE 0x01 /* page may be mapped writeable */
+#define PGA_REFERENCED 0x02 /* page has been referenced */
+
+/*
+ * Page flags. If changed at any other time than page allocation or
+ * freeing, the modification must be protected by the vm_page lock.
+ */
+#define PG_CACHED 0x01 /* page is cached */
+#define PG_FREE 0x02 /* page is free */
+#define PG_FICTITIOUS 0x04 /* physical page doesn't exist (O) */
+#define PG_ZERO 0x08 /* page is zeroed */
+#define PG_MARKER 0x10 /* special queue marker page */
+#define PG_SLAB 0x20 /* object pointer is actually a slab */
+#define PG_WINATCFLS 0x40 /* flush dirty page on inactive q */
/*
* Misc constants.
@@ -341,8 +350,8 @@ extern struct vpglocks vm_page_queue_lock;
#define VM_ALLOC_COUNT_SHIFT 16
#define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT)
-void vm_page_flag_set(vm_page_t m, unsigned short bits);
-void vm_page_flag_clear(vm_page_t m, unsigned short bits);
+void vm_page_aflag_set(vm_page_t m, uint8_t bits);
+void vm_page_aflag_clear(vm_page_t m, uint8_t bits);
void vm_page_busy(vm_page_t m);
void vm_page_flash(vm_page_t m);
void vm_page_io_start(vm_page_t m);
@@ -377,6 +386,7 @@ vm_page_t vm_page_next(vm_page_t m);
int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
vm_page_t vm_page_prev(vm_page_t m);
void vm_page_putfake(vm_page_t m);
+void vm_page_reference(vm_page_t m);
void vm_page_remove (vm_page_t);
void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
void vm_page_requeue(vm_page_t m);
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index e9c9927..5dd450e 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -497,7 +497,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen)
vm_page_t mt = mc[i];
KASSERT(pageout_status[i] == VM_PAGER_PEND ||
- (mt->flags & PG_WRITEABLE) == 0,
+ (mt->aflags & PGA_WRITEABLE) == 0,
("vm_pageout_flush: page %p is not write protected", mt));
switch (pageout_status[i]) {
case VM_PAGER_OK:
@@ -597,12 +597,10 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
continue;
}
actcount = pmap_ts_referenced(p);
- if ((p->flags & PG_REFERENCED) != 0) {
+ if ((p->aflags & PGA_REFERENCED) != 0) {
if (actcount == 0)
actcount = 1;
- vm_page_lock_queues();
- vm_page_flag_clear(p, PG_REFERENCED);
- vm_page_unlock_queues();
+ vm_page_aflag_clear(p, PGA_REFERENCED);
}
if (p->queue != PQ_ACTIVE && actcount != 0) {
vm_page_activate(p);
@@ -846,7 +844,7 @@ rescan0:
* references.
*/
if (object->ref_count == 0) {
- vm_page_flag_clear(m, PG_REFERENCED);
+ vm_page_aflag_clear(m, PGA_REFERENCED);
KASSERT(!pmap_page_is_mapped(m),
("vm_pageout_scan: page %p is mapped", m));
@@ -859,7 +857,7 @@ rescan0:
* level VM system not knowing anything about existing
* references.
*/
- } else if (((m->flags & PG_REFERENCED) == 0) &&
+ } else if (((m->aflags & PGA_REFERENCED) == 0) &&
(actcount = pmap_ts_referenced(m))) {
vm_page_activate(m);
vm_page_unlock(m);
@@ -874,8 +872,8 @@ rescan0:
* "activation count" higher than normal so that we will less
* likely place pages back onto the inactive queue again.
*/
- if ((m->flags & PG_REFERENCED) != 0) {
- vm_page_flag_clear(m, PG_REFERENCED);
+ if ((m->aflags & PGA_REFERENCED) != 0) {
+ vm_page_aflag_clear(m, PGA_REFERENCED);
actcount = pmap_ts_referenced(m);
vm_page_activate(m);
vm_page_unlock(m);
@@ -891,7 +889,7 @@ rescan0:
* be updated.
*/
if (m->dirty != VM_PAGE_BITS_ALL &&
- (m->flags & PG_WRITEABLE) != 0) {
+ (m->aflags & PGA_WRITEABLE) != 0) {
/*
* Avoid a race condition: Unless write access is
* removed from the page, another processor could
@@ -938,7 +936,7 @@ rescan0:
* before being freed. This significantly extends
* the thrash point for a heavily loaded machine.
*/
- vm_page_flag_set(m, PG_WINATCFLS);
+ m->flags |= PG_WINATCFLS;
vm_page_requeue(m);
} else if (maxlaunder > 0) {
/*
@@ -1178,7 +1176,7 @@ unlock_and_continue:
*/
actcount = 0;
if (object->ref_count != 0) {
- if (m->flags & PG_REFERENCED) {
+ if (m->aflags & PGA_REFERENCED) {
actcount += 1;
}
actcount += pmap_ts_referenced(m);
@@ -1192,7 +1190,7 @@ unlock_and_continue:
/*
* Since we have "tested" this bit, we need to clear it now.
*/
- vm_page_flag_clear(m, PG_REFERENCED);
+ vm_page_aflag_clear(m, PGA_REFERENCED);
/*
* Only if an object is currently being used, do we use the
@@ -1435,8 +1433,8 @@ vm_pageout_page_stats()
}
actcount = 0;
- if (m->flags & PG_REFERENCED) {
- vm_page_flag_clear(m, PG_REFERENCED);
+ if (m->aflags & PGA_REFERENCED) {
+ vm_page_aflag_clear(m, PGA_REFERENCED);
actcount += 1;
}
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 23ade63..cb652f7 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -1132,7 +1132,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
m = ma[ncount - 1];
KASSERT(m->busy > 0,
("vnode_pager_generic_putpages: page %p is not busy", m));
- KASSERT((m->flags & PG_WRITEABLE) == 0,
+ KASSERT((m->aflags & PGA_WRITEABLE) == 0,
("vnode_pager_generic_putpages: page %p is not read-only", m));
vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
pgoff);
OpenPOWER on IntegriCloud