summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_extern.h2
-rw-r--r--sys/vm/vm_glue.c138
2 files changed, 95 insertions, 45 deletions
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 53f7694..65b6c8e 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -80,9 +80,7 @@ int vm_fault_quick(caddr_t v, int prot);
struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset);
void vm_imgact_unmap_page(struct sf_buf *sf);
void vm_thread_dispose(struct thread *td);
-void vm_thread_dispose_altkstack(struct thread *td);
int vm_thread_new(struct thread *td, int pages);
-int vm_thread_new_altkstack(struct thread *td, int pages);
void vm_thread_swapin(struct thread *td);
void vm_thread_swapout(struct thread *td);
#endif /* _KERNEL */
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 9e43a3f..234cde9 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -77,6 +77,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sx.h>
#include <sys/sysctl.h>
+#include <sys/eventhandler.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/unistd.h>
@@ -308,6 +309,20 @@ vm_imgact_unmap_page(struct sf_buf *sf)
vm_page_unlock_queues();
}
+struct kstack_cache_entry {
+ vm_object_t ksobj;
+ struct kstack_cache_entry *next_ks_entry;
+};
+
+static struct kstack_cache_entry *kstack_cache;
+static int kstack_cache_size = 128;
+static int kstacks;
+static struct mtx kstack_cache_mtx;
+SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
+ "");
+SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
+ "");
+
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
@@ -323,6 +338,7 @@ vm_thread_new(struct thread *td, int pages)
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m, ma[KSTACK_MAX_PAGES];
+ struct kstack_cache_entry *ks_ce;
int i;
/* Bounds check */
@@ -330,6 +346,22 @@ vm_thread_new(struct thread *td, int pages)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
+
+ if (pages == KSTACK_PAGES) {
+ mtx_lock(&kstack_cache_mtx);
+ if (kstack_cache != NULL) {
+ ks_ce = kstack_cache;
+ kstack_cache = ks_ce->next_ks_entry;
+ mtx_unlock(&kstack_cache_mtx);
+
+ td->td_kstack_obj = ks_ce->ksobj;
+ td->td_kstack = (vm_offset_t)ks_ce;
+ td->td_kstack_pages = KSTACK_PAGES;
+ return (1);
+ }
+ mtx_unlock(&kstack_cache_mtx);
+ }
+
/*
* Allocate an object for the kstack.
*/
@@ -345,7 +377,8 @@ vm_thread_new(struct thread *td, int pages)
vm_object_deallocate(ksobj);
return (0);
}
-
+
+ atomic_add_int(&kstacks, 1);
if (KSTACK_GUARD_PAGES != 0) {
pmap_qremove(ks, KSTACK_GUARD_PAGES);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
@@ -376,20 +409,13 @@ vm_thread_new(struct thread *td, int pages)
return (1);
}
-/*
- * Dispose of a thread's kernel stack.
- */
-void
-vm_thread_dispose(struct thread *td)
+static void
+vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
{
- vm_object_t ksobj;
- vm_offset_t ks;
vm_page_t m;
- int i, pages;
+ int i;
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
+ atomic_add_int(&kstacks, -1);
pmap_qremove(ks, pages);
VM_OBJECT_LOCK(ksobj);
for (i = 0; i < pages; i++) {
@@ -405,9 +431,66 @@ vm_thread_dispose(struct thread *td)
vm_object_deallocate(ksobj);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+}
+
+/*
+ * Dispose of a thread's kernel stack.
+ */
+void
+vm_thread_dispose(struct thread *td)
+{
+ vm_object_t ksobj;
+ vm_offset_t ks;
+ struct kstack_cache_entry *ks_ce;
+ int pages;
+
+ pages = td->td_kstack_pages;
+ ksobj = td->td_kstack_obj;
+ ks = td->td_kstack;
+ if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
+ ks_ce = (struct kstack_cache_entry *)ks;
+ ks_ce->ksobj = ksobj;
+ mtx_lock(&kstack_cache_mtx);
+ ks_ce->next_ks_entry = ks_ce;
+ kstack_cache = ks_ce;
+ mtx_unlock(&kstack_cache_mtx);
+ return;
+ }
+ vm_thread_stack_dispose(ksobj, ks, pages);
td->td_kstack = 0;
+ td->td_kstack_pages = 0;
}
+static void
+vm_thread_stack_lowmem(void *nulll)
+{
+ struct kstack_cache_entry *ks_ce, *ks_ce1;
+
+ mtx_lock(&kstack_cache_mtx);
+ ks_ce = kstack_cache;
+ kstack_cache = NULL;
+ mtx_unlock(&kstack_cache_mtx);
+
+ while (ks_ce != NULL) {
+ ks_ce1 = ks_ce;
+ ks_ce = ks_ce->next_ks_entry;
+
+ vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
+ KSTACK_PAGES);
+ }
+}
+
+static void
+kstack_cache_init(void *nulll)
+{
+
+ EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
+ EVENTHANDLER_PRI_ANY);
+}
+
+MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
+SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
+
/*
* Allow a thread's kernel stack to be paged out.
*/
@@ -468,37 +551,6 @@ vm_thread_swapin(struct thread *td)
}
/*
- * Set up a variable-sized alternate kstack.
- */
-int
-vm_thread_new_altkstack(struct thread *td, int pages)
-{
-
- td->td_altkstack = td->td_kstack;
- td->td_altkstack_obj = td->td_kstack_obj;
- td->td_altkstack_pages = td->td_kstack_pages;
-
- return (vm_thread_new(td, pages));
-}
-
-/*
- * Restore the original kstack.
- */
-void
-vm_thread_dispose_altkstack(struct thread *td)
-{
-
- vm_thread_dispose(td);
-
- td->td_kstack = td->td_altkstack;
- td->td_kstack_obj = td->td_altkstack_obj;
- td->td_kstack_pages = td->td_altkstack_pages;
- td->td_altkstack = 0;
- td->td_altkstack_obj = NULL;
- td->td_altkstack_pages = 0;
-}
-
-/*
* Implement fork's actions on an address space.
* Here we arrange for the address space to be copied or referenced,
* allocate a user struct (pcb and kernel stack), then call the
OpenPOWER on IntegriCloud