summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2011-08-09 21:01:36 +0000
committerkib <kib@FreeBSD.org>2011-08-09 21:01:36 +0000
commitf408aa11a3c5eee2273216823e5ccb3bbcb98d4c (patch)
tree570d750da32cd7ade17317435d427a47d947779a /sys/vm
parentbceb19a351c4e3c01b16f29e3c6856629159df2e (diff)
downloadFreeBSD-src-f408aa11a3c5eee2273216823e5ccb3bbcb98d4c.zip
FreeBSD-src-f408aa11a3c5eee2273216823e5ccb3bbcb98d4c.tar.gz
- Move the PG_UNMANAGED flag from m->flags to m->oflags, renaming the flag
to VPO_UNMANAGED (and also making the flag protected by the vm object lock, instead of vm page queue lock). - Mark the fake pages with both PG_FICTITIOUS (as it is now) and VPO_UNMANAGED. As a consequence, pmap code now can use use just VPO_UNMANAGED to decide whether the page is unmanaged. Reviewed by: alc Tested by: pho (x86, previous version), marius (sparc64), marcel (arm, ia64, powerpc), ray (mips) Sponsored by: The FreeBSD Foundation Approved by: re (bz)
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_kern.c4
-rw-r--r--sys/vm/vm_object.c4
-rw-r--r--sys/vm/vm_page.c45
-rw-r--r--sys/vm/vm_page.h17
4 files changed, 37 insertions, 33 deletions
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 23884af..24c2448 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -210,7 +210,7 @@ kmem_alloc(map, size)
mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
mem->valid = VM_PAGE_BITS_ALL;
- KASSERT((mem->flags & PG_UNMANAGED) != 0,
+ KASSERT((mem->oflags & VPO_UNMANAGED) != 0,
("kmem_alloc: page %p is managed", mem));
}
VM_OBJECT_UNLOCK(kernel_object);
@@ -428,7 +428,7 @@ retry:
if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
- KASSERT((m->flags & PG_UNMANAGED) != 0,
+ KASSERT((m->oflags & VPO_UNMANAGED) != 0,
("kmem_malloc: page %p is managed", m));
}
VM_OBJECT_UNLOCK(kmem_object);
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index b5788f5..602d99e 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -1087,7 +1087,9 @@ shadowlookup:
vm_page_unlock(m);
goto unlock_tobject;
}
- KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ KASSERT((m->flags & PG_FICTITIOUS) == 0,
+ ("vm_object_madvise: page %p is fictitious", m));
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("vm_object_madvise: page %p is not managed", m));
if ((m->oflags & VPO_BUSY) || m->busy) {
if (advise == MADV_WILLNEED) {
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 8e0c8bb..6d55892 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -483,8 +483,8 @@ vm_page_flag_set(vm_page_t m, unsigned short bits)
* VPO_BUSY. Currently, this flag is only set by pmap_enter().
*/
KASSERT((bits & PG_WRITEABLE) == 0 ||
- ((m->flags & (PG_UNMANAGED | PG_FICTITIOUS)) == 0 &&
- (m->oflags & VPO_BUSY) != 0), ("PG_WRITEABLE and !VPO_BUSY"));
+ (m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == VPO_BUSY,
+ ("PG_WRITEABLE and !VPO_BUSY"));
m->flags |= bits;
}
@@ -636,7 +636,7 @@ vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
/* Fictitious pages don't use "segind". */
m->flags = PG_FICTITIOUS;
/* Fictitious pages don't use "order" or "pool". */
- m->oflags = VPO_BUSY;
+ m->oflags = VPO_BUSY | VPO_UNMANAGED;
m->wire_count = 1;
pmap_page_set_memattr(m, memattr);
return (m);
@@ -896,7 +896,7 @@ vm_page_remove(vm_page_t m)
vm_object_t object;
vm_page_t root;
- if ((m->flags & PG_UNMANAGED) == 0)
+ if ((m->oflags & VPO_UNMANAGED) == 0)
vm_page_lock_assert(m, MA_OWNED);
if ((object = m->object) == NULL)
return;
@@ -1388,14 +1388,14 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
if (req & VM_ALLOC_ZERO)
flags = PG_ZERO;
}
- if (object == NULL || object->type == OBJT_PHYS)
- flags |= PG_UNMANAGED;
m->flags = flags;
mtx_unlock(&vm_page_queue_free_mtx);
- if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ))
- m->oflags = 0;
+ if (object == NULL || object->type == OBJT_PHYS)
+ m->oflags = VPO_UNMANAGED;
else
- m->oflags = VPO_BUSY;
+ m->oflags = 0;
+ if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) == 0)
+ m->oflags |= VPO_BUSY;
if (req & VM_ALLOC_WIRED) {
/*
* The page lock is not required for wiring a page until that
@@ -1479,8 +1479,8 @@ vm_page_alloc_init(vm_page_t m)
if (m->flags & PG_ZERO)
vm_page_zero_count--;
/* Don't clear the PG_ZERO flag; we'll need it later. */
- m->flags = PG_UNMANAGED | (m->flags & PG_ZERO);
- m->oflags = 0;
+ m->flags &= PG_ZERO;
+ m->oflags = VPO_UNMANAGED;
/* Unmanaged pages don't use "act_count". */
return (drop);
}
@@ -1670,7 +1670,7 @@ vm_page_activate(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((queue = m->queue) != PQ_ACTIVE) {
- if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
+ if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
if (m->act_count < ACT_INIT)
m->act_count = ACT_INIT;
vm_page_lock_queues();
@@ -1736,7 +1736,7 @@ void
vm_page_free_toq(vm_page_t m)
{
- if ((m->flags & PG_UNMANAGED) == 0) {
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
vm_page_lock_assert(m, MA_OWNED);
KASSERT(!pmap_page_is_mapped(m),
("vm_page_free_toq: freeing mapped page %p", m));
@@ -1754,7 +1754,7 @@ vm_page_free_toq(vm_page_t m)
* callback routine until after we've put the page on the
* appropriate free queue.
*/
- if ((m->flags & PG_UNMANAGED) == 0)
+ if ((m->oflags & VPO_UNMANAGED) == 0)
vm_pageq_remove(m);
vm_page_remove(m);
@@ -1834,7 +1834,7 @@ vm_page_wire(vm_page_t m)
return;
}
if (m->wire_count == 0) {
- if ((m->flags & PG_UNMANAGED) == 0)
+ if ((m->oflags & VPO_UNMANAGED) == 0)
vm_pageq_remove(m);
atomic_add_int(&cnt.v_wire_count, 1);
}
@@ -1862,7 +1862,7 @@ void
vm_page_unwire(vm_page_t m, int activate)
{
- if ((m->flags & PG_UNMANAGED) == 0)
+ if ((m->oflags & VPO_UNMANAGED) == 0)
vm_page_lock_assert(m, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0) {
KASSERT(m->wire_count == 1,
@@ -1873,7 +1873,7 @@ vm_page_unwire(vm_page_t m, int activate)
m->wire_count--;
if (m->wire_count == 0) {
atomic_subtract_int(&cnt.v_wire_count, 1);
- if ((m->flags & PG_UNMANAGED) != 0 ||
+ if ((m->oflags & VPO_UNMANAGED) != 0 ||
m->object == NULL)
return;
vm_page_lock_queues();
@@ -1921,7 +1921,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
*/
if ((queue = m->queue) == PQ_INACTIVE)
return;
- if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
+ if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
vm_page_lock_queues();
vm_page_flag_clear(m, PG_WINATCFLS);
if (queue != PQ_NONE)
@@ -1962,7 +1962,7 @@ vm_page_try_to_cache(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
- (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED))
+ (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
return (0);
pmap_remove_all(m);
if (m->dirty)
@@ -1985,7 +1985,7 @@ vm_page_try_to_free(vm_page_t m)
if (m->object != NULL)
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
- (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED))
+ (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
return (0);
pmap_remove_all(m);
if (m->dirty)
@@ -2010,7 +2010,7 @@ vm_page_cache(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
- if ((m->flags & PG_UNMANAGED) || (m->oflags & VPO_BUSY) || m->busy ||
+ if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy ||
m->hold_count || m->wire_count)
panic("vm_page_cache: attempting to cache busy page");
pmap_remove_all(m);
@@ -2657,7 +2657,8 @@ vm_page_cowsetup(vm_page_t m)
{
vm_page_lock_assert(m, MA_OWNED);
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
+ if ((m->flags & PG_FICTITIOUS) != 0 ||
+ (m->oflags & VPO_UNMANAGED) != 0 ||
m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object))
return (EBUSY);
m->cow++;
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index e852313..1dda1e2 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -157,9 +157,18 @@ struct vm_page {
*
* Access to these page flags is synchronized by the lock on the object
* containing the page (O).
+ *
+ * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
+ * indicates that the page is not under PV management but
+ * otherwise should be treated as a normal page. Pages not
+ * under PV management cannot be paged out via the
+ * object/vm_page_t because there is no knowledge of their pte
+ * mappings, and such pages are also not on any PQ queue.
+ *
*/
#define VPO_BUSY 0x0001 /* page is in transit */
#define VPO_WANTED 0x0002 /* someone is waiting for page */
+#define VPO_UNMANAGED 0x0004 /* No PV management for page */
#define VPO_SWAPINPROG 0x0200 /* swap I/O in progress on page */
#define VPO_NOSYNC 0x0400 /* do not collect for syncer */
@@ -216,13 +225,6 @@ extern struct vpglocks pa_lock[];
/*
* These are the flags defined for vm_page.
*
- * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
- * not under PV management but otherwise should be treated as a
- * normal page. Pages not under PV management cannot be paged out
- * via the object/vm_page_t because there is no knowledge of their
- * pte mappings, nor can they be removed from their objects via
- * the object, and such pages are also not on any PQ queue.
- *
* PG_REFERENCED may be cleared only if the object containing the page is
* locked.
*
@@ -236,7 +238,6 @@ extern struct vpglocks pa_lock[];
#define PG_WRITEABLE 0x0010 /* page is mapped writeable */
#define PG_ZERO 0x0040 /* page is zeroed */
#define PG_REFERENCED 0x0080 /* page has been referenced */
-#define PG_UNMANAGED 0x0800 /* No PV management for page */
#define PG_MARKER 0x1000 /* special queue marker page */
#define PG_SLAB 0x2000 /* object pointer is actually a slab */
OpenPOWER on IntegriCloud