summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2008-09-29 19:45:12 +0000
committerkib <kib@FreeBSD.org>2008-09-29 19:45:12 +0000
commitde9e8917480edc80682a8b4355dfd83e315ea796 (patch)
tree18104c10e6cb4ca4d2a3d1e545baf694cdb57694 /sys/vm
parent7e5e6d6f2777c86fc9d9242280c9293aac141d21 (diff)
downloadFreeBSD-src-de9e8917480edc80682a8b4355dfd83e315ea796.zip
FreeBSD-src-de9e8917480edc80682a8b4355dfd83e315ea796.tar.gz
Move the code for doing out-of-memory grass from vm_pageout_scan()
into the separate function vm_pageout_oom(). Supply a parameter for vm_pageout_oom() describing a reason for the call. Call vm_pageout_oom() from the swp_pager_meta_build() when swap zone is exhausted. Reviewed by: alc Tested by: pho, jhb MFC after: 2 weeks
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/swap_pager.c9
-rw-r--r--sys/vm/vm_pageout.c145
-rw-r--r--sys/vm/vm_pageout.h4
3 files changed, 87 insertions, 71 deletions
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 74138c7..19fb3a8 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -1711,9 +1711,12 @@ retry:
if (swap == NULL) {
mtx_unlock(&swhash_mtx);
VM_OBJECT_UNLOCK(object);
- if (uma_zone_exhausted(swap_zone))
- panic("swap zone exhausted, increase kern.maxswzone\n");
- VM_WAIT;
+ if (uma_zone_exhausted(swap_zone)) {
+ printf("swap zone exhausted, increase kern.maxswzone\n");
+ vm_pageout_oom(VM_OOM_SWAPZ);
+ pause("swzonex", 10);
+ } else
+ VM_WAIT;
VM_OBJECT_LOCK(object);
goto retry;
}
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 49abe42..380a920 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -681,9 +681,6 @@ vm_pageout_scan(int pass)
struct vm_page marker;
int page_shortage, maxscan, pcount;
int addl_page_shortage, addl_page_shortage_init;
- struct proc *p, *bigproc;
- struct thread *td;
- vm_offset_t size, bigsize;
vm_object_t object;
int actcount;
int vnodes_skipped = 0;
@@ -1174,7 +1171,22 @@ unlock_and_continue:
* doing this on the first pass in order to give ourselves a
* chance to flush out dirty vnode-backed pages and to allow
* active pages to be moved to the inactive queue and reclaimed.
- *
+ */
+ if (pass != 0 &&
+ ((swap_pager_avail < 64 && vm_page_count_min()) ||
+ (swap_pager_full && vm_paging_target() > 0)))
+ vm_pageout_oom(VM_OOM_MEM);
+}
+
+
+void
+vm_pageout_oom(int shortage)
+{
+ struct proc *p, *bigproc;
+ vm_offset_t size, bigsize;
+ struct thread *td;
+
+ /*
* We keep the process bigproc locked once we find it to keep anyone
* from messing with it; however, there is a possibility of
* deadlock if process B is bigproc and one of it's child processes
@@ -1182,75 +1194,72 @@ unlock_and_continue:
* lock while walking this list. To avoid this, we don't block on
* the process lock but just skip a process if it is already locked.
*/
- if (pass != 0 &&
- ((swap_pager_avail < 64 && vm_page_count_min()) ||
- (swap_pager_full && vm_paging_target() > 0))) {
- bigproc = NULL;
- bigsize = 0;
- sx_slock(&allproc_lock);
- FOREACH_PROC_IN_SYSTEM(p) {
- int breakout;
+ bigproc = NULL;
+ bigsize = 0;
+ sx_slock(&allproc_lock);
+ FOREACH_PROC_IN_SYSTEM(p) {
+ int breakout;
- if (PROC_TRYLOCK(p) == 0)
- continue;
- /*
- * If this is a system or protected process, skip it.
- */
- if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
- (p->p_flag & P_PROTECTED) ||
- ((p->p_pid < 48) && (swap_pager_avail != 0))) {
- PROC_UNLOCK(p);
- continue;
- }
- /*
- * If the process is in a non-running type state,
- * don't touch it. Check all the threads individually.
- */
- breakout = 0;
- FOREACH_THREAD_IN_PROC(p, td) {
- thread_lock(td);
- if (!TD_ON_RUNQ(td) &&
- !TD_IS_RUNNING(td) &&
- !TD_IS_SLEEPING(td)) {
- thread_unlock(td);
- breakout = 1;
- break;
- }
+ if (PROC_TRYLOCK(p) == 0)
+ continue;
+ /*
+ * If this is a system or protected process, skip it.
+ */
+ if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
+ (p->p_flag & P_PROTECTED) ||
+ ((p->p_pid < 48) && (swap_pager_avail != 0))) {
+ PROC_UNLOCK(p);
+ continue;
+ }
+ /*
+ * If the process is in a non-running type state,
+ * don't touch it. Check all the threads individually.
+ */
+ breakout = 0;
+ FOREACH_THREAD_IN_PROC(p, td) {
+ thread_lock(td);
+ if (!TD_ON_RUNQ(td) &&
+ !TD_IS_RUNNING(td) &&
+ !TD_IS_SLEEPING(td)) {
thread_unlock(td);
+ breakout = 1;
+ break;
}
- if (breakout) {
- PROC_UNLOCK(p);
- continue;
- }
- /*
- * get the process size
- */
- if (!vm_map_trylock_read(&p->p_vmspace->vm_map)) {
- PROC_UNLOCK(p);
- continue;
- }
- size = vmspace_swap_count(p->p_vmspace);
- vm_map_unlock_read(&p->p_vmspace->vm_map);
- size += vmspace_resident_count(p->p_vmspace);
- /*
- * if the this process is bigger than the biggest one
- * remember it.
- */
- if (size > bigsize) {
- if (bigproc != NULL)
- PROC_UNLOCK(bigproc);
- bigproc = p;
- bigsize = size;
- } else
- PROC_UNLOCK(p);
+ thread_unlock(td);
}
- sx_sunlock(&allproc_lock);
- if (bigproc != NULL) {
- killproc(bigproc, "out of swap space");
- sched_nice(bigproc, PRIO_MIN);
- PROC_UNLOCK(bigproc);
- wakeup(&cnt.v_free_count);
+ if (breakout) {
+ PROC_UNLOCK(p);
+ continue;
+ }
+ /*
+ * get the process size
+ */
+ if (!vm_map_trylock_read(&p->p_vmspace->vm_map)) {
+ PROC_UNLOCK(p);
+ continue;
}
+ size = vmspace_swap_count(p->p_vmspace);
+ vm_map_unlock_read(&p->p_vmspace->vm_map);
+ if (shortage == VM_OOM_MEM)
+ size += vmspace_resident_count(p->p_vmspace);
+ /*
+ * if the this process is bigger than the biggest one
+ * remember it.
+ */
+ if (size > bigsize) {
+ if (bigproc != NULL)
+ PROC_UNLOCK(bigproc);
+ bigproc = p;
+ bigsize = size;
+ } else
+ PROC_UNLOCK(p);
+ }
+ sx_sunlock(&allproc_lock);
+ if (bigproc != NULL) {
+ killproc(bigproc, "out of swap space");
+ sched_nice(bigproc, PRIO_MIN);
+ PROC_UNLOCK(bigproc);
+ wakeup(&cnt.v_free_count);
}
}
diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h
index 1586ac4..15ca570 100644
--- a/sys/vm/vm_pageout.h
+++ b/sys/vm/vm_pageout.h
@@ -83,6 +83,9 @@ extern int vm_pageout_page_count;
#define VM_SWAP_NORMAL 1
#define VM_SWAP_IDLE 2
+#define VM_OOM_MEM 1
+#define VM_OOM_SWAPZ 2
+
/*
* Exported routines.
*/
@@ -100,5 +103,6 @@ extern void vm_waitpfault(void);
#ifdef _KERNEL
boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
int vm_pageout_flush(vm_page_t *, int, int);
+void vm_pageout_oom(int shortage);
#endif
#endif /* _VM_VM_PAGEOUT_H_ */
OpenPOWER on IntegriCloud