summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-06-02 15:46:37 +0000
committeralc <alc@FreeBSD.org>2010-06-02 15:46:37 +0000
commit24ac89cf14c282421af2638eaf20a08c0649ab78 (patch)
tree7da5c63c04fad0ef79b4758aad12c540642e9863 /sys/vm
parentb16b48273ab37f4a59084d43d95bb66e8434c813 (diff)
downloadFreeBSD-src-24ac89cf14c282421af2638eaf20a08c0649ab78.zip
FreeBSD-src-24ac89cf14c282421af2638eaf20a08c0649ab78.tar.gz
Minimize the use of the page queues lock for synchronizing access to the
page's dirty field. With the exception of one case, access to this field is now synchronized by the object lock.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_page.c57
-rw-r--r--sys/vm/vnode_pager.c2
2 files changed, 47 insertions, 12 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 1ff371a..b29e30e 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -170,6 +170,7 @@ TUNABLE_INT("vm.boot_pages", &boot_pages);
SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
"number of pages allocated for bootstrapping the VM system");
+static void vm_page_clear_dirty_mask(vm_page_t m, int pagebits);
static void vm_page_queue_remove(int queue, vm_page_t m);
static void vm_page_enqueue(int queue, vm_page_t m);
@@ -2073,6 +2074,28 @@ vm_page_set_valid(vm_page_t m, int base, int size)
}
/*
+ * Clear the given bits from the specified page's dirty field.
+ */
+static __inline void
+vm_page_clear_dirty_mask(vm_page_t m, int pagebits)
+{
+
+ /*
+ * If the object is locked and the page is neither VPO_BUSY nor
+ * PG_WRITEABLE, then the page's dirty field cannot possibly be
+ * modified by a concurrent pmap operation.
+ */
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if ((m->oflags & VPO_BUSY) == 0 && (m->flags & PG_WRITEABLE) == 0)
+ m->dirty &= ~pagebits;
+ else {
+ vm_page_lock_queues();
+ m->dirty &= ~pagebits;
+ vm_page_unlock_queues();
+ }
+}
+
+/*
* vm_page_set_validclean:
*
* Sets portions of a page valid and clean. The arguments are expected
@@ -2087,9 +2110,8 @@ vm_page_set_valid(vm_page_t m, int base, int size)
void
vm_page_set_validclean(vm_page_t m, int base, int size)
{
- int pagebits;
- int frag;
- int endoff;
+ u_int oldvalid;
+ int endoff, frag, pagebits;
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if (size == 0) /* handle degenerate case */
@@ -2126,6 +2148,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
* clear dirty bits for DEV_BSIZE chunks that are fully within
* the range.
*/
+ oldvalid = m->valid;
pagebits = vm_page_bits(base, size);
m->valid |= pagebits;
#if 0 /* NOT YET */
@@ -2138,21 +2161,35 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
}
pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
#endif
- m->dirty &= ~pagebits;
if (base == 0 && size == PAGE_SIZE) {
- pmap_clear_modify(m);
+ /*
+ * The page can only be modified within the pmap if it is
+ * mapped, and it can only be mapped if it was previously
+ * fully valid.
+ */
+ if (oldvalid == VM_PAGE_BITS_ALL)
+ /*
+ * Perform the pmap_clear_modify() first. Otherwise,
+ * a concurrent pmap operation, such as
+ * pmap_protect(), could clear a modification in the
+ * pmap and set the dirty field on the page before
+ * pmap_clear_modify() had begun and after the dirty
+ * field was cleared here.
+ */
+ pmap_clear_modify(m);
+ m->dirty = 0;
m->oflags &= ~VPO_NOSYNC;
- }
+ } else if (oldvalid != VM_PAGE_BITS_ALL)
+ m->dirty &= ~pagebits;
+ else
+ vm_page_clear_dirty_mask(m, pagebits);
}
void
vm_page_clear_dirty(vm_page_t m, int base, int size)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
- if ((m->flags & PG_WRITEABLE) != 0)
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- m->dirty &= ~vm_page_bits(base, size);
+ vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
}
/*
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index c1f8cff..f497d41 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -429,9 +429,7 @@ vnode_pager_setsize(vp, nsize)
* bits. This would prevent bogus_page
* replacement from working properly.
*/
- vm_page_lock_queues();
vm_page_clear_dirty(m, base, PAGE_SIZE - base);
- vm_page_unlock_queues();
} else if ((nsize & PAGE_MASK) &&
__predict_false(object->cache != NULL)) {
vm_page_cache_free(object, OFF_TO_IDX(nsize),
OpenPOWER on IntegriCloud