summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/amd64/pmap.c5
-rw-r--r--sys/i386/i386/pmap.c5
-rw-r--r--sys/i386/xen/pmap.c5
-rw-r--r--sys/powerpc/aim/mmu_oea.c5
-rw-r--r--sys/powerpc/aim/mmu_oea64.c5
-rw-r--r--sys/powerpc/booke/pmap.c5
-rw-r--r--sys/sparc64/sparc64/pmap.c5
-rw-r--r--sys/vm/vm_object.h2
8 files changed, 14 insertions, 23 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 662f926..c1ffe5c 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -3492,9 +3492,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
va >= kmi.clean_eva,
("pmap_enter: managed mapping within the clean submap"));
- KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
- VM_OBJECT_LOCKED(m->object),
- ("pmap_enter: page %p is not busy", m));
+ if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
+ VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
pa = VM_PAGE_TO_PHYS(m);
newpte = (pt_entry_t)(pa | PG_A | PG_V);
if ((access & VM_PROT_WRITE) != 0)
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index ec1db58..e4567df 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -3456,9 +3456,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
va));
- KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
- VM_OBJECT_LOCKED(m->object),
- ("pmap_enter: page %p is not busy", m));
+ if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
+ VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
mpte = NULL;
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index a8281be..a662eaf 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -2666,9 +2666,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
va));
- KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
- VM_OBJECT_LOCKED(m->object),
- ("pmap_enter: page %p is not busy", m));
+ if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
+ VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
mpte = NULL;
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index cd0f97c..555ccd7 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1122,9 +1122,8 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (pmap_bootstrapped)
rw_assert(&pvh_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
- KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
- VM_OBJECT_LOCKED(m->object),
- ("moea_enter_locked: page %p is not busy", m));
+ if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
+ VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
/* XXX change the pvo head for fake pages */
if ((m->oflags & VPO_UNMANAGED) != 0) {
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 8660e88..450d439 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -1184,9 +1184,8 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
pvo_flags = PVO_MANAGED;
}
- KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
- VM_OBJECT_LOCKED(m->object),
- ("moea64_enter: page %p is not busy", m));
+ if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
+ VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
/* XXX change the pvo head for fake pages */
if ((m->oflags & VPO_UNMANAGED) != 0) {
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 435960a..c3c18da 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -1561,9 +1561,8 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
KASSERT((va <= VM_MAXUSER_ADDRESS),
("mmu_booke_enter_locked: user pmap, non user va"));
}
- KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
- VM_OBJECT_LOCKED(m->object),
- ("mmu_booke_enter_locked: page %p is not busy", m));
+ if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
+ VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 9c6d6aa..9c0ed99 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1494,9 +1494,8 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
rw_assert(&tte_list_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pm, MA_OWNED);
- KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
- VM_OBJECT_LOCKED(m->object),
- ("pmap_enter_locked: page %p is not busy", m));
+ if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
+ VM_OBJECT_LOCK_ASSERT(m->object, RA_WLOCKED);
PMAP_STATS_INC(pmap_nenter);
pa = VM_PAGE_TO_PHYS(m);
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 475ec5b..c1d434a 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -210,8 +210,6 @@ extern struct vm_object kmem_object_store;
rw_assert(&(object)->lock, (type))
#define VM_OBJECT_LOCK_INIT(object, name) \
rw_init_flags(&(object)->lock, (name), RW_DUPOK)
-#define VM_OBJECT_LOCKED(object) \
- rw_wowned(&(object)->lock)
#define VM_OBJECT_SLEEP(wchan, object, pri, wmesg, timo) \
rw_sleep((wchan), &(object)->lock, (pri), (wmesg), (timo))
#define VM_OBJECT_TRYLOCK(object) \
OpenPOWER on IntegriCloud