summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-05-02 17:33:46 +0000
committeralc <alc@FreeBSD.org>2010-05-02 17:33:46 +0000
commitf35e97166b133de28351ffee3f6de3407d0272b7 (patch)
tree800dba3ed92fa905f4e1acbc91746a06e2e47bc9
parent1b4eb78965d8d551d4997edd6125fd401a212f5b (diff)
downloadFreeBSD-src-f35e97166b133de28351ffee3f6de3407d0272b7.zip
FreeBSD-src-f35e97166b133de28351ffee3f6de3407d0272b7.tar.gz
It makes no sense for vm_page_sleep_if_busy()'s helper, vm_page_sleep(),
to unconditionally set PG_REFERENCED on a page before sleeping. In many cases, it's perfectly ok for the page to disappear, i.e., be reclaimed by the page daemon, before the caller to vm_page_sleep() is reawakened. Instead, we now explicitly set PG_REFERENCED in those cases where having the page persist until the caller is awakened is clearly desirable. Note, however, that setting PG_REFERENCED on the page is still only a hint, and not a guarantee that the page should persist.
-rw-r--r--sys/fs/tmpfs/tmpfs_vnops.c30
-rw-r--r--sys/kern/vfs_bio.c11
-rw-r--r--sys/vm/vm_fault.c6
-rw-r--r--sys/vm/vm_page.c25
4 files changed, 59 insertions, 13 deletions
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index c17efff..3d3755d 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -516,8 +516,16 @@ tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio
lookupvpg:
if (((m = vm_page_lookup(vobj, idx)) != NULL) &&
vm_page_is_valid(m, offset, tlen)) {
- if (vm_page_sleep_if_busy(m, FALSE, "tmfsmr"))
+ if ((m->oflags & VPO_BUSY) != 0) {
+ /*
+ * Reference the page before unlocking and sleeping so
+ * that the page daemon is less likely to reclaim it.
+ */
+ vm_page_lock_queues();
+ vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_sleep(m, "tmfsmr");
goto lookupvpg;
+ }
vm_page_busy(m);
VM_OBJECT_UNLOCK(vobj);
error = uiomove_fromphys(&m, offset, tlen, uio);
@@ -526,8 +534,16 @@ lookupvpg:
VM_OBJECT_UNLOCK(vobj);
return (error);
} else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) {
- if (vm_page_sleep_if_busy(m, FALSE, "tmfsmr"))
+ if ((m->oflags & VPO_BUSY) != 0) {
+ /*
+ * Reference the page before unlocking and sleeping so
+ * that the page daemon is less likely to reclaim it.
+ */
+ vm_page_lock_queues();
+ vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_sleep(m, "tmfsmr");
goto lookupvpg;
+ }
vm_page_busy(m);
VM_OBJECT_UNLOCK(vobj);
sched_pin();
@@ -627,8 +643,16 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *ui
lookupvpg:
if (((vpg = vm_page_lookup(vobj, idx)) != NULL) &&
vm_page_is_valid(vpg, offset, tlen)) {
- if (vm_page_sleep_if_busy(vpg, FALSE, "tmfsmw"))
+ if ((vpg->oflags & VPO_BUSY) != 0) {
+ /*
+ * Reference the page before unlocking and sleeping so
+ * that the page daemon is less likely to reclaim it.
+ */
+ vm_page_lock_queues();
+ vm_page_flag_set(vpg, PG_REFERENCED);
+ vm_page_sleep(vpg, "tmfsmw");
goto lookupvpg;
+ }
vm_page_busy(vpg);
vm_page_lock_queues();
vm_page_undirty(vpg);
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index ea846a5..dbcd6e8 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -3024,8 +3024,17 @@ allocbuf(struct buf *bp, int size)
* vm_fault->getpages->cluster_read->allocbuf
*
*/
- if (vm_page_sleep_if_busy(m, FALSE, "pgtblk"))
+ if ((m->oflags & VPO_BUSY) != 0) {
+ /*
+ * Reference the page before unlocking
+ * and sleeping so that the page daemon
+ * is less likely to reclaim it.
+ */
+ vm_page_lock_queues();
+ vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_sleep(m, "pgtblk");
continue;
+ }
/*
* We have a good page.
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index d9cf63e..5ee4ab4 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -338,6 +338,12 @@ RetryFault:;
* to pmap it.
*/
if ((fs.m->oflags & VPO_BUSY) || fs.m->busy) {
+ /*
+ * Reference the page before unlocking and
+ * sleeping so that the page daemon is less
+ * likely to reclaim it.
+ */
+ vm_page_flag_set(fs.m, PG_REFERENCED);
vm_page_unlock_queues();
vm_page_unlock(fs.m);
VM_OBJECT_UNLOCK(fs.object);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index fb0950e..e73586a 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -599,7 +599,7 @@ vm_page_free_zero(vm_page_t m)
/*
* vm_page_sleep:
*
- * Sleep and release the page queues lock.
+ * Sleep and release the page and page queues locks.
*
* The object containing the given page must be locked.
*/
@@ -608,13 +608,10 @@ vm_page_sleep(vm_page_t m, const char *msg)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
- if (!mtx_owned(vm_page_lockptr(m)))
- vm_page_lock(m);
- if (!mtx_owned(&vm_page_queue_mtx))
- vm_page_lock_queues();
- vm_page_flag_set(m, PG_REFERENCED);
- vm_page_unlock_queues();
- vm_page_unlock(m);
+ if (mtx_owned(&vm_page_queue_mtx))
+ vm_page_unlock_queues();
+ if (mtx_owned(vm_page_lockptr(m)))
+ vm_page_unlock(m);
/*
* It's possible that while we sleep, the page will get
@@ -1896,7 +1893,17 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
retrylookup:
if ((m = vm_page_lookup(object, pindex)) != NULL) {
- if (vm_page_sleep_if_busy(m, TRUE, "pgrbwt")) {
+ if ((m->oflags & VPO_BUSY) != 0 || m->busy != 0) {
+ if ((allocflags & VM_ALLOC_RETRY) != 0) {
+ /*
+ * Reference the page before unlocking and
+ * sleeping so that the page daemon is less
+ * likely to reclaim it.
+ */
+ vm_page_lock_queues();
+ vm_page_flag_set(m, PG_REFERENCED);
+ }
+ vm_page_sleep(m, "pgrbwt");
if ((allocflags & VM_ALLOC_RETRY) == 0)
return (NULL);
goto retrylookup;
OpenPOWER on IntegriCloud