summaryrefslogtreecommitdiffstats
path: root/sys/vm/swap_pager.c
diff options
context:
space:
mode:
authormarkj <markj@FreeBSD.org>2017-05-23 07:27:30 +0000
committermarkj <markj@FreeBSD.org>2017-05-23 07:27:30 +0000
commitb1d352b152cd31bddbbb83670b7f45a213ca52cb (patch)
treed6aa7d62302443402a45530a50b241fd0f5128b9 /sys/vm/swap_pager.c
parenta6749049ae872846ba11cc9c7c14e8f8d61425f1 (diff)
downloadFreeBSD-src-b1d352b152cd31bddbbb83670b7f45a213ca52cb.zip
FreeBSD-src-b1d352b152cd31bddbbb83670b7f45a213ca52cb.tar.gz
MFC r308474, r308691, r309203, r309365, r309703, r309898, r310720,
r308489, r308706: Add PQ_LAUNDRY and remove PG_CACHED pages.
Diffstat (limited to 'sys/vm/swap_pager.c')
-rw-r--r--sys/vm/swap_pager.c33
1 files changed, 18 insertions, 15 deletions
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 30f6d97..0167117 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -1126,7 +1126,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
if (shift != 0) {
for (i = 1; i <= shift; i++) {
p = vm_page_alloc(object, m[0]->pindex - i,
- VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED);
+ VM_ALLOC_NORMAL);
if (p == NULL) {
/* Shift allocated pages to the left. */
for (j = 0; j < i - 1; j++)
@@ -1144,8 +1144,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
if (rahead != NULL) {
for (i = 0; i < *rahead; i++) {
p = vm_page_alloc(object,
- m[reqcount - 1]->pindex + i + 1,
- VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED);
+ m[reqcount - 1]->pindex + i + 1, VM_ALLOC_NORMAL);
if (p == NULL)
break;
bp->b_pages[shift + reqcount + i] = p;
@@ -1549,17 +1548,18 @@ swp_pager_async_iodone(struct buf *bp)
* For write success, clear the dirty
* status, then finish the I/O ( which decrements the
* busy count and possibly wakes waiter's up ).
+ * A page is only written to swap after a period of
+ * inactivity. Therefore, we do not expect it to be
+ * reused.
*/
KASSERT(!pmap_page_is_write_mapped(m),
("swp_pager_async_iodone: page %p is not write"
" protected", m));
vm_page_undirty(m);
+ vm_page_lock(m);
+ vm_page_deactivate_noreuse(m);
+ vm_page_unlock(m);
vm_page_sunbusy(m);
- if (vm_page_count_severe()) {
- vm_page_lock(m);
- vm_page_try_to_cache(m);
- vm_page_unlock(m);
- }
}
}
@@ -1635,12 +1635,15 @@ swap_pager_isswapped(vm_object_t object, struct swdevt *sp)
/*
* SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in
*
- * This routine dissociates the page at the given index within a
- * swap block from its backing store, paging it in if necessary.
- * If the page is paged in, it is placed in the inactive queue,
- * since it had its backing store ripped out from under it.
- * We also attempt to swap in all other pages in the swap block,
- * we only guarantee that the one at the specified index is
+ * This routine dissociates the page at the given index within an object
+ * from its backing store, paging it in if it does not reside in memory.
+ * If the page is paged in, it is marked dirty and placed in the laundry
+ * queue. The page is marked dirty because it no longer has backing
+ * store. It is placed in the laundry queue because it has not been
+ * accessed recently. Otherwise, it would already reside in memory.
+ *
+ * We also attempt to swap in all other pages in the swap block.
+ * However, we only guarantee that the one at the specified index is
* paged in.
*
* XXX - The code to page the whole block in doesn't work, so we
@@ -1669,7 +1672,7 @@ swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex)
vm_object_pip_wakeup(object);
vm_page_dirty(m);
vm_page_lock(m);
- vm_page_deactivate(m);
+ vm_page_launder(m);
vm_page_unlock(m);
vm_page_xunbusy(m);
vm_pager_page_unswapped(m);
OpenPOWER on IntegriCloud