summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/vm/vm_page.c83
-rw-r--r--sys/vm/vm_pageout.c12
2 files changed, 53 insertions, 42 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 1ec86af..fbef16a 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -487,13 +487,12 @@ vm_page_flag_set(vm_page_t m, unsigned short bits)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
/*
- * For a managed page, the PG_WRITEABLE flag can be set only if
- * the page is VPO_BUSY. Currently this flag is only set by
- * pmap_enter().
+ * The PG_WRITEABLE flag can only be set if the page is managed and
+ * VPO_BUSY. Currently, this flag is only set by pmap_enter().
*/
KASSERT((bits & PG_WRITEABLE) == 0 ||
- (m->flags & (PG_UNMANAGED | PG_FICTITIOUS)) != 0 ||
- (m->oflags & VPO_BUSY) != 0, ("PG_WRITEABLE and !VPO_BUSY"));
+ ((m->flags & (PG_UNMANAGED | PG_FICTITIOUS)) == 0 &&
+ (m->oflags & VPO_BUSY) != 0), ("PG_WRITEABLE and !VPO_BUSY"));
m->flags |= bits;
}
@@ -502,6 +501,10 @@ vm_page_flag_clear(vm_page_t m, unsigned short bits)
{
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ /*
+ * The PG_REFERENCED flag can only be cleared if the object
+ * containing the page is locked.
+ */
KASSERT((bits & PG_REFERENCED) == 0 || VM_OBJECT_LOCKED(m->object),
("PG_REFERENCED and !VM_OBJECT_LOCKED"));
m->flags &= ~bits;
@@ -1582,6 +1585,8 @@ vm_page_free_toq(vm_page_t m)
* another map, removing it from paging queues
* as necessary.
*
+ * If the page is fictitious, then its wire count must remain one.
+ *
* The page must be locked.
* This routine may not block.
*/
@@ -1595,8 +1600,12 @@ vm_page_wire(vm_page_t m)
* it is already off the queues).
*/
vm_page_lock_assert(m, MA_OWNED);
- if (m->flags & PG_FICTITIOUS)
+ if ((m->flags & PG_FICTITIOUS) != 0) {
+ KASSERT(m->wire_count == 1,
+ ("vm_page_wire: fictitious page %p's wire count isn't one",
+ m));
return;
+ }
if (m->wire_count == 0) {
if ((m->flags & PG_UNMANAGED) == 0)
vm_pageq_remove(m);
@@ -1607,32 +1616,20 @@ vm_page_wire(vm_page_t m)
}
/*
- * vm_page_unwire:
- *
- * Release one wiring of this page, potentially
- * enabling it to be paged again.
- *
- * Many pages placed on the inactive queue should actually go
- * into the cache, but it is difficult to figure out which. What
- * we do instead, if the inactive target is well met, is to put
- * clean pages at the head of the inactive queue instead of the tail.
- * This will cause them to be moved to the cache more quickly and
- * if not actively re-referenced, freed more quickly. If we just
- * stick these pages at the end of the inactive queue, heavy filesystem
- * meta-data accesses can cause an unnecessary paging load on memory bound
- * processes. This optimization causes one-time-use metadata to be
- * reused more quickly.
- *
- * BUT, if we are in a low-memory situation we have no choice but to
- * put clean pages on the cache queue.
- *
- * A number of routines use vm_page_unwire() to guarantee that the page
- * will go into either the inactive or active queues, and will NEVER
- * be placed in the cache - for example, just after dirtying a page.
- * dirty pages in the cache are not allowed.
+ * vm_page_unwire:
*
- * The page must be locked.
- * This routine may not block.
+ * Release one wiring of the specified page, potentially enabling it to be
+ * paged again. If paging is enabled, then the value of the parameter
+ * "activate" determines to which queue the page is added. If "activate" is
+ * non-zero, then the page is added to the active queue. Otherwise, it is
+ * added to the inactive queue.
+ *
+ * However, unless the page belongs to an object, it is not enqueued because
+ * it cannot be paged out.
+ *
+ * If a page is fictitious, then its wire count must alway be one.
+ *
+ * A managed page must be locked.
*/
void
vm_page_unwire(vm_page_t m, int activate)
@@ -1640,13 +1637,17 @@ vm_page_unwire(vm_page_t m, int activate)
if ((m->flags & PG_UNMANAGED) == 0)
vm_page_lock_assert(m, MA_OWNED);
- if (m->flags & PG_FICTITIOUS)
+ if ((m->flags & PG_FICTITIOUS) != 0) {
+ KASSERT(m->wire_count == 1,
+ ("vm_page_unwire: fictitious page %p's wire count isn't one", m));
return;
+ }
if (m->wire_count > 0) {
m->wire_count--;
if (m->wire_count == 0) {
atomic_subtract_int(&cnt.v_wire_count, 1);
- if ((m->flags & PG_UNMANAGED) != 0)
+ if ((m->flags & PG_UNMANAGED) != 0 ||
+ m->object == NULL)
return;
vm_page_lock_queues();
if (activate)
@@ -1657,14 +1658,24 @@ vm_page_unwire(vm_page_t m, int activate)
}
vm_page_unlock_queues();
}
- } else {
- panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
- }
+ } else
+ panic("vm_page_unwire: page %p's wire count is zero", m);
}
/*
* Move the specified page to the inactive queue.
*
+ * Many pages placed on the inactive queue should actually go
+ * into the cache, but it is difficult to figure out which. What
+ * we do instead, if the inactive target is well met, is to put
+ * clean pages at the head of the inactive queue instead of the tail.
+ * This will cause them to be moved to the cache more quickly and
+ * if not actively re-referenced, reclaimed more quickly. If we just
+ * stick these pages at the end of the inactive queue, heavy filesystem
+ * meta-data accesses can cause an unnecessary paging load on memory bound
+ * processes. This optimization causes one-time-use metadata to be
+ * reused more quickly.
+ *
* Normally athead is 0 resulting in LRU operation. athead is set
* to 1 if we want this page to be 'as if it were placed in the cache',
* except without unmapping it from the process address space.
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index bff803d..62163de 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -805,7 +805,7 @@ rescan0:
/*
* A held page may be undergoing I/O, so skip it.
*/
- if (m->hold_count || (object = m->object) == NULL) {
+ if (m->hold_count) {
vm_page_unlock(m);
vm_page_requeue(m);
addl_page_shortage++;
@@ -816,6 +816,7 @@ rescan0:
* Don't mess with busy pages, keep in the front of the
* queue, most likely are being paged out.
*/
+ object = m->object;
if (!VM_OBJECT_TRYLOCK(object) &&
(!vm_pageout_fallback_object_lock(m, &next) ||
m->hold_count != 0)) {
@@ -1127,17 +1128,16 @@ unlock_and_continue:
("vm_pageout_scan: page %p isn't active", m));
next = TAILQ_NEXT(m, pageq);
- object = m->object;
if ((m->flags & PG_MARKER) != 0) {
m = next;
continue;
}
- if (!vm_pageout_page_lock(m, &next) ||
- (object = m->object) == NULL) {
+ if (!vm_pageout_page_lock(m, &next)) {
vm_page_unlock(m);
m = next;
continue;
}
+ object = m->object;
if (!VM_OBJECT_TRYLOCK(object) &&
!vm_pageout_fallback_object_lock(m, &next)) {
VM_OBJECT_UNLOCK(object);
@@ -1397,12 +1397,12 @@ vm_pageout_page_stats()
continue;
}
vm_page_lock_assert(m, MA_NOTOWNED);
- if (!vm_pageout_page_lock(m, &next) ||
- (object = m->object) == NULL) {
+ if (!vm_pageout_page_lock(m, &next)) {
vm_page_unlock(m);
m = next;
continue;
}
+ object = m->object;
if (!VM_OBJECT_TRYLOCK(object) &&
!vm_pageout_fallback_object_lock(m, &next)) {
VM_OBJECT_UNLOCK(object);
OpenPOWER on IntegriCloud