summaryrefslogtreecommitdiffstats
path: root/sys/ia64
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2011-08-09 21:01:36 +0000
committerkib <kib@FreeBSD.org>2011-08-09 21:01:36 +0000
commitf408aa11a3c5eee2273216823e5ccb3bbcb98d4c (patch)
tree570d750da32cd7ade17317435d427a47d947779a /sys/ia64
parentbceb19a351c4e3c01b16f29e3c6856629159df2e (diff)
downloadFreeBSD-src-f408aa11a3c5eee2273216823e5ccb3bbcb98d4c.zip
FreeBSD-src-f408aa11a3c5eee2273216823e5ccb3bbcb98d4c.tar.gz
- Move the PG_UNMANAGED flag from m->flags to m->oflags, renaming the flag
to VPO_UNMANAGED (and also making the flag protected by the vm object lock, instead of vm page queue lock). - Mark the fake pages with both PG_FICTITIOUS (as it is now) and VPO_UNMANAGED. As a consequence, pmap code now can use use just VPO_UNMANAGED to decide whether the page is unmanaged. Reviewed by: alc Tested by: pho (x86, previous version), marius (sparc64), marcel (arm, ia64, powerpc), ray (mips) Sponsored by: The FreeBSD Foundation Approved by: re (bz)
Diffstat (limited to 'sys/ia64')
-rw-r--r--sys/ia64/ia64/pmap.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index e10c6cf..f9f6060 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -1442,7 +1442,7 @@ pmap_remove_all(vm_page_t m)
pmap_t oldpmap;
pv_entry_t pv;
- KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_remove_all: page %p is not managed", m));
vm_page_lock_queues();
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
@@ -1548,8 +1548,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
va &= ~PAGE_MASK;
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
- KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
- (m->oflags & VPO_BUSY) != 0,
+ KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0,
("pmap_enter: page %p is not busy", m));
/*
@@ -1619,7 +1618,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
/*
* Enter on the PV list if part of our managed memory.
*/
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
("pmap_enter: managed mapping within the clean submap"));
pmap_insert_entry(pmap, va, m);
@@ -1720,7 +1719,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
boolean_t managed;
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
- (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
+ (m->oflags & VPO_UNMANAGED) != 0,
("pmap_enter_quick_locked: managed mapping within the clean submap"));
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -1730,7 +1729,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if (!pmap_present(pte)) {
/* Enter on the PV list if the page is managed. */
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
if (!pmap_try_insert_pv_entry(pmap, va, m)) {
pmap_free_pte(pte, va);
return;
@@ -1900,7 +1899,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
int loops = 0;
boolean_t rv;
- KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_page_exists_quick: page %p is not managed", m));
rv = FALSE;
vm_page_lock_queues();
@@ -1932,7 +1931,7 @@ pmap_page_wired_mappings(vm_page_t m)
int count;
count = 0;
- if ((m->flags & PG_FICTITIOUS) != 0)
+ if ((m->oflags & VPO_UNMANAGED) != 0)
return (count);
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -2010,7 +2009,7 @@ pmap_ts_referenced(vm_page_t m)
pv_entry_t pv;
int count = 0;
- KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_ts_referenced: page %p is not managed", m));
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -2044,7 +2043,7 @@ pmap_is_modified(vm_page_t m)
pv_entry_t pv;
boolean_t rv;
- KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_modified: page %p is not managed", m));
rv = FALSE;
@@ -2104,7 +2103,7 @@ pmap_is_referenced(vm_page_t m)
pv_entry_t pv;
boolean_t rv;
- KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_referenced: page %p is not managed", m));
rv = FALSE;
vm_page_lock_queues();
@@ -2133,7 +2132,7 @@ pmap_clear_modify(vm_page_t m)
pmap_t oldpmap;
pv_entry_t pv;
- KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) == 0,
@@ -2174,7 +2173,7 @@ pmap_clear_reference(vm_page_t m)
pmap_t oldpmap;
pv_entry_t pv;
- KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_reference: page %p is not managed", m));
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -2203,7 +2202,7 @@ pmap_remove_write(vm_page_t m)
pv_entry_t pv;
vm_prot_t prot;
- KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_remove_write: page %p is not managed", m));
/*
OpenPOWER on IntegriCloud