summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/vm/vm_map.c35
-rw-r--r--sys/vm/vm_map.h1
-rw-r--r--sys/vm/vm_pageout.c11
3 files changed, 44 insertions, 3 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 428c194..d040819 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -226,6 +226,41 @@ vmspace_free(vm)
}
/*
+ * vmspace_swap_count() - count the approximate swap useage in pages for a
+ * vmspace.
+ *
+ * Swap useage is determined by taking the proportional swap used by
+ * VM objects backing the VM map. To make up for fractional losses,
+ * if the VM object has any swap use at all the associated map entries
+ * count for at least 1 swap page.
+ */
+int
+vmspace_swap_count(struct vmspace *vmspace)
+{
+ vm_map_t map = &vmspace->vm_map;
+ vm_map_entry_t cur;
+ int count = 0;
+
+ for (cur = map->header.next; cur != &map->header; cur = cur->next) {
+ vm_object_t object;
+
+ if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
+ (object = cur->object.vm_object) != NULL &&
+ object->type == OBJT_SWAP
+ ) {
+ int n = (cur->end - cur->start) / PAGE_SIZE;
+
+ if (object->un_pager.swp.swp_bcount) {
+ count += object->un_pager.swp.swp_bcount * SWAP_META_PAGES * n /
+ object->size + 1;
+ }
+ }
+ }
+ return(count);
+}
+
+
+/*
* vm_map_create:
*
* Creates and returns a new empty VM map with
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 241a80c..5ea3ccf 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -365,6 +365,7 @@ int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *));
void vm_freeze_copyopts __P((vm_object_t, vm_pindex_t, vm_pindex_t));
int vm_map_stack __P((vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int));
int vm_map_growstack __P((struct proc *p, vm_offset_t addr));
+int vmspace_swap_count __P((struct vmspace *vmspace));
#endif
#endif /* _VM_MAP_ */
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index c1e9fec..dd96cb2 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1139,8 +1139,8 @@ rescan0:
}
/*
- * make sure that we have swap space -- if we are low on memory and
- * swap -- then kill the biggest process.
+ * If we are out of swap and were not able to reach our paging
+ * target, kill the largest process.
*
* We keep the process bigproc locked once we find it to keep anyone
* from messing with it; however, there is a possibility of
@@ -1149,7 +1149,11 @@ rescan0:
* lock while walking this list. To avoid this, we don't block on
* the process lock but just skip a process if it is already locked.
*/
+ if ((vm_swap_size < 64 && vm_page_count_min()) ||
+ (swap_pager_full && vm_paging_target() > 0)) {
+#if 0
if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) {
+#endif
mtx_unlock(&vm_mtx);
bigproc = NULL;
bigsize = 0;
@@ -1184,7 +1188,8 @@ rescan0:
/*
* get the process size
*/
- size = vmspace_resident_count(p->p_vmspace);
+ size = vmspace_resident_count(p->p_vmspace) +
+ vmspace_swap_count(p->p_vmspace);
/*
* if the this process is bigger than the biggest one
* remember it.
OpenPOWER on IntegriCloud