summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-05-16 23:45:10 +0000
committeralc <alc@FreeBSD.org>2010-05-16 23:45:10 +0000
commitf6c07c5b87dfdddf4b4438ac3df38c4c83125281 (patch)
tree5d19152403395b71d9fe26760d1b05201ce13b02
parent9090794d6cc9b8867376522131d6ff69824038a7 (diff)
downloadFreeBSD-src-f6c07c5b87dfdddf4b4438ac3df38c4c83125281.zip
FreeBSD-src-f6c07c5b87dfdddf4b4438ac3df38c4c83125281.tar.gz
On entry to pmap_enter(), assert that the page is busy. While I'm
here, make the style of assertion used by pmap_enter() consistent across all architectures. On entry to pmap_remove_write(), assert that the page is neither unmanaged nor fictitious, since we cannot remove write access to either kind of page. With the push down of the page queues lock, pmap_remove_write() cannot condition its behavior on the state of the PG_WRITEABLE flag if the page is busy. Assert that the object containing the page is locked. This allows us to know that the page will neither become busy nor will PG_WRITEABLE be set on it while pmap_remove_write() is running. Correct a long-standing bug in vm_page_cowsetup(). We cannot possibly do copy-on-write-based zero-copy transmit on unmanaged or fictitious pages, so don't even try. Previously, the call to pmap_remove_write() would have failed silently.
-rw-r--r--sys/amd64/amd64/pmap.c16
-rw-r--r--sys/arm/arm/pmap.c14
-rw-r--r--sys/i386/i386/pmap.c16
-rw-r--r--sys/i386/xen/pmap.c23
-rw-r--r--sys/ia64/ia64/pmap.c18
-rw-r--r--sys/mips/mips/pmap.c18
-rw-r--r--sys/powerpc/aim/mmu_oea.c14
-rw-r--r--sys/powerpc/aim/mmu_oea64.c13
-rw-r--r--sys/powerpc/booke/pmap.c13
-rw-r--r--sys/sparc64/sparc64/pmap.c13
-rw-r--r--sys/sun4v/sun4v/pmap.c14
-rw-r--r--sys/vm/vm_page.c4
-rw-r--r--sys/vm/vm_page.h4
13 files changed, 150 insertions, 30 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 2f783f7..7bf0a70 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -3139,7 +3139,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
va = trunc_page(va);
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
- ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va));
+ ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
+ va));
+ KASSERT((m->oflags & VPO_BUSY) != 0,
+ ("pmap_enter: page %p is not busy", m));
mpte = NULL;
@@ -4240,7 +4243,16 @@ pmap_remove_write(vm_page_t m)
pt_entry_t oldpte, *pte;
vm_offset_t va;
- if ((m->flags & PG_FICTITIOUS) != 0 ||
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_remove_write: page %p is not managed", m));
+
+ /*
+ * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * is clear, no page table entries need updating.
+ */
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 5f7d430..00d499f 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -3318,6 +3318,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
u_int oflags;
vm_paddr_t pa;
+ KASSERT((m->oflags & VPO_BUSY) != 0 || (flags & M_NOWAIT) != 0,
+ ("pmap_enter_locked: page %p is not busy", m));
PMAP_ASSERT_LOCKED(pmap);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (va == vector_page) {
@@ -4527,7 +4529,17 @@ void
pmap_remove_write(vm_page_t m)
{
- if (m->flags & PG_WRITEABLE) {
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_remove_write: page %p is not managed", m));
+
+ /*
+ * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * is clear, no page table entries need updating.
+ */
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if ((m->oflags & VPO_BUSY) != 0 ||
+ (m->flags & PG_WRITEABLE) != 0) {
vm_page_lock_queues();
pmap_clearbit(m, PVF_WRITE);
vm_page_unlock_queues();
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 4b87922..591aed8 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -3268,7 +3268,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
va = trunc_page(va);
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
- ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va));
+ ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
+ va));
+ KASSERT((m->oflags & VPO_BUSY) != 0,
+ ("pmap_enter: page %p is not busy", m));
mpte = NULL;
@@ -4410,7 +4413,16 @@ pmap_remove_write(vm_page_t m)
pt_entry_t oldpte, *pte;
vm_offset_t va;
- if ((m->flags & PG_FICTITIOUS) != 0 ||
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_remove_write: page %p is not managed", m));
+
+ /*
+ * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * is clear, no page table entries need updating.
+ */
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index 42fdff9..ac1a17d 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -2682,12 +2682,12 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d",
pmap, va, access, xpmap_ptom(VM_PAGE_TO_PHYS(m)), prot, wired);
va = trunc_page(va);
-#ifdef PMAP_DIAGNOSTIC
- if (va > VM_MAX_KERNEL_ADDRESS)
- panic("pmap_enter: toobig");
- if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
- panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va);
-#endif
+ KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
+ KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
+ ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
+ va));
+ KASSERT((m->oflags & VPO_BUSY) != 0,
+ ("pmap_enter: page %p is not busy", m));
mpte = NULL;
@@ -3780,7 +3780,16 @@ pmap_remove_write(vm_page_t m)
pmap_t pmap;
pt_entry_t oldpte, *pte;
- if ((m->flags & PG_FICTITIOUS) != 0 ||
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_remove_write: page %p is not managed", m));
+
+ /*
+ * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * is clear, no page table entries need updating.
+ */
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 0a1a6d9..d2c5613 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -1497,10 +1497,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
oldpmap = pmap_switch(pmap);
va &= ~PAGE_MASK;
-#ifdef DIAGNOSTIC
- if (va > VM_MAX_KERNEL_ADDRESS)
- panic("pmap_enter: toobig");
-#endif
+ KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
+ KASSERT((m->oflags & VPO_BUSY) != 0,
+ ("pmap_enter: page %p is not busy", m));
/*
* Find (or create) a pte for the given mapping.
@@ -2116,7 +2115,16 @@ pmap_remove_write(vm_page_t m)
pv_entry_t pv;
vm_prot_t prot;
- if ((m->flags & PG_FICTITIOUS) != 0 ||
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_remove_write: page %p is not managed", m));
+
+ /*
+ * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * is clear, no page table entries need updating.
+ */
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index 6d0c02b..c11ab07 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -1791,10 +1791,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
return;
va &= ~PAGE_MASK;
-#ifdef PMAP_DIAGNOSTIC
- if (va > VM_MAX_KERNEL_ADDRESS)
- panic("pmap_enter: toobig");
-#endif
+ KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
+ KASSERT((m->oflags & VPO_BUSY) != 0,
+ ("pmap_enter: page %p is not busy", m));
mpte = NULL;
@@ -2584,7 +2583,16 @@ pmap_remove_write(vm_page_t m)
vm_offset_t va;
pt_entry_t *pte;
- if ((m->flags & PG_FICTITIOUS) != 0 ||
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_remove_write: page %p is not managed", m));
+
+ /*
+ * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * is clear, no page table entries need updating.
+ */
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index dc8ce0b..a2f120b 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -876,6 +876,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
struct vm_page m;
m.phys_addr = translations[i].om_pa + off;
+ m.oflags = VPO_BUSY;
PMAP_LOCK(&ofw_pmap);
moea_enter_locked(&ofw_pmap,
translations[i].om_va + off, &m,
@@ -1101,6 +1102,8 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (pmap_bootstrapped)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT((m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
+ ("moea_enter_locked: page %p is not busy", m));
/* XXX change the pvo head for fake pages */
if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) {
@@ -1323,7 +1326,16 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
pmap_t pmap;
u_int lo;
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("moea_remove_write: page %p is not managed", m));
+
+ /*
+ * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * is clear, no page table entries need updating.
+ */
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 5e329c8..8b6453a 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -1235,6 +1235,8 @@ moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (pmap_bootstrapped)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT((m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
+ ("moea64_enter_locked: page %p is not busy", m));
/* XXX change the pvo head for fake pages */
if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) {
@@ -1519,7 +1521,16 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
pmap_t pmap;
uint64_t lo;
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("moea64_remove_write: page %p is not managed", m));
+
+ /*
+ * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * is clear, no page table entries need updating.
+ */
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index e46b610..81a337f 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -1557,6 +1557,8 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
KASSERT((va <= VM_MAXUSER_ADDRESS),
("mmu_booke_enter_locked: user pmap, non user va"));
}
+ KASSERT((m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
+ ("mmu_booke_enter_locked: page %p is not busy", m));
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -1941,7 +1943,16 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
pv_entry_t pv;
pte_t *pte;
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("mmu_booke_remove_write: page %p is not managed", m));
+
+ /*
+ * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * is clear, no page table entries need updating.
+ */
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index da8714e..1524a1e 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1351,6 +1351,8 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
PMAP_LOCK_ASSERT(pm, MA_OWNED);
+ KASSERT((m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
+ ("pmap_enter_locked: page %p is not busy", m));
PMAP_STATS_INC(pmap_nenter);
pa = VM_PAGE_TO_PHYS(m);
@@ -1985,7 +1987,16 @@ pmap_remove_write(vm_page_t m)
struct tte *tp;
u_long data;
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_remove_write: page %p is not managed", m));
+
+ /*
+ * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * is clear, no page table entries need updating.
+ */
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
diff --git a/sys/sun4v/sun4v/pmap.c b/sys/sun4v/sun4v/pmap.c
index 34d7ed1..6117275 100644
--- a/sys/sun4v/sun4v/pmap.c
+++ b/sys/sun4v/sun4v/pmap.c
@@ -1061,6 +1061,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
vm_page_t om;
int invlva;
+ KASSERT((m->oflags & VPO_BUSY) != 0,
+ ("pmap_enter: page %p is not busy", m));
if (pmap->pm_context)
DPRINTF("pmap_enter(va=%lx, pa=0x%lx, prot=%x)\n", va,
VM_PAGE_TO_PHYS(m), prot);
@@ -1737,7 +1739,17 @@ void
pmap_remove_write(vm_page_t m)
{
- if ((m->flags & PG_WRITEABLE) == 0)
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_remove_write: page %p is not managed", m));
+
+ /*
+ * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
+ * another thread while the object is locked. Thus, if PG_WRITEABLE
+ * is clear, no page table entries need updating.
+ */
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if ((m->oflags & VPO_BUSY) == 0 &&
+ (m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
tte_clear_phys_bit(m, VTD_SW_W|VTD_W);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index e70ca3f..1c877d9 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -2336,10 +2336,12 @@ vm_page_cowsetup(vm_page_t m)
{
vm_page_lock_assert(m, MA_OWNED);
- if (m->cow == USHRT_MAX - 1)
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
+ m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object))
return (EBUSY);
m->cow++;
pmap_remove_write(m);
+ VM_OBJECT_UNLOCK(m->object);
return (0);
}
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index e9d9544..aebf79e 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -217,8 +217,8 @@ extern struct vpglocks pa_lock[];
* pte mappings, nor can they be removed from their objects via
* the object, and such pages are also not on any PQ queue.
*
- * PG_WRITEABLE is set exclusively by pmap_enter(). When it does so, either
- * the page must be VPO_BUSY or the containing object must be locked.
+ * PG_WRITEABLE is set exclusively by pmap_enter(). When it does so, the page
+ * must be VPO_BUSY.
*/
#define PG_CACHED 0x0001 /* page is cached */
#define PG_FREE 0x0002 /* page is free */
OpenPOWER on IntegriCloud