summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2015-12-07 11:21:49 +0000
committerkib <kib@FreeBSD.org>2015-12-07 11:21:49 +0000
commitb290ccba9249b008c010b3c00cf343420431b6bc (patch)
tree44b6ed20feca4ef85ab154cfdc8292bb27946836 /sys/vm
parent91f228157cb0934be1fa5a2393a2ef39a2fdff49 (diff)
downloadFreeBSD-src-b290ccba9249b008c010b3c00cf343420431b6bc.zip
FreeBSD-src-b290ccba9249b008c010b3c00cf343420431b6bc.tar.gz
MFC r290920:
Raise OOM when pagedaemon is unable to produce a free page in several back-to-back passes.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_page.h1
-rw-r--r--sys/vm/vm_pageout.c43
2 files changed, 30 insertions, 14 deletions
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index accf517..7ecb6c7 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -227,6 +227,7 @@ struct vm_domain {
long vmd_segs; /* bitmask of the segments */
boolean_t vmd_oom;
int vmd_pass; /* local pagedaemon pass */
+ int vmd_oom_seq;
int vmd_last_active_scan;
struct vm_page vmd_marker; /* marker for pagedaemon private use */
};
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 871977f..156d11a 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -121,7 +121,8 @@ static void vm_pageout(void);
static void vm_pageout_init(void);
static int vm_pageout_clean(vm_page_t);
static void vm_pageout_scan(struct vm_domain *vmd, int pass);
-static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass);
+static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
+ int starting_page_shortage);
SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
NULL);
@@ -158,6 +159,7 @@ int vm_pages_needed; /* Event on which pageout daemon sleeps */
int vm_pageout_deficit; /* Estimated number of pages deficit */
int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */
int vm_pageout_wakeup_thresh;
+static int vm_pageout_oom_seq = 12;
#if !defined(NO_SWAPPING)
static int vm_pageout_req_swapout; /* XXX */
@@ -217,6 +219,10 @@ static int pageout_lock_miss;
SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
+SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
+ CTLFLAG_RW, &vm_pageout_oom_seq, 0,
+ "back-to-back calls to oom detector to start OOM");
+
#define VM_PAGEOUT_PAGE_COUNT 16
int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
@@ -941,7 +947,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
long min_scan;
int act_delta, addl_page_shortage, deficit, maxscan, page_shortage;
int vnodes_skipped = 0;
- int maxlaunder, scan_tick, scanned;
+ int maxlaunder, scan_tick, scanned, starting_page_shortage;
int lockmode;
boolean_t queues_locked;
@@ -981,6 +987,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
page_shortage = vm_paging_target() + deficit;
} else
page_shortage = deficit = 0;
+ starting_page_shortage = page_shortage;
/*
* maxlaunder limits the number of dirty pages we flush per scan.
@@ -1358,6 +1365,12 @@ relock_queues:
(void)speedup_syncer();
/*
+ * If the inactive queue scan fails repeatedly to meet its
+ * target, kill the largest process.
+ */
+ vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
+
+ /*
* Compute the number of pages we want to try to move from the
* active queue to the inactive queue.
*/
@@ -1469,15 +1482,6 @@ relock_queues:
}
}
#endif
-
- /*
- * If we are critically low on one of RAM or swap and low on
- * the other, kill the largest process. However, we avoid
- * doing this on the first pass in order to give ourselves a
- * chance to flush out dirty vnode-backed pages and to allow
- * active pages to be moved to the inactive queue and reclaimed.
- */
- vm_pageout_mightbe_oom(vmd, pass);
}
static int vm_pageout_oom_vote;
@@ -1488,12 +1492,17 @@ static int vm_pageout_oom_vote;
* failed to reach free target is premature.
*/
static void
-vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass)
+vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
+ int starting_page_shortage)
{
int old_vote;
- if (pass <= 1 || !((swap_pager_avail < 64 && vm_page_count_min()) ||
- (swap_pager_full && vm_paging_target() > 0))) {
+ if (starting_page_shortage <= 0 || starting_page_shortage !=
+ page_shortage)
+ vmd->vmd_oom_seq = 0;
+ else
+ vmd->vmd_oom_seq++;
+ if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
if (vmd->vmd_oom) {
vmd->vmd_oom = FALSE;
atomic_subtract_int(&vm_pageout_oom_vote, 1);
@@ -1501,6 +1510,12 @@ vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass)
return;
}
+ /*
+ * Do not follow the call sequence until OOM condition is
+ * cleared.
+ */
+ vmd->vmd_oom_seq = 0;
+
if (vmd->vmd_oom)
return;
OpenPOWER on IntegriCloud