summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_glue.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2003-06-14 23:23:55 +0000
committeralc <alc@FreeBSD.org>2003-06-14 23:23:55 +0000
commit83f108b04dd3280195b5f0cf6bc2af10630e5f75 (patch)
tree5b005cceb283a32d0b4cb30493fc1a788fe8a46b /sys/vm/vm_glue.c
parentcf4a22224f432bfbaaa9bceff6eee597501588ef (diff)
downloadFreeBSD-src-83f108b04dd3280195b5f0cf6bc2af10630e5f75.zip
FreeBSD-src-83f108b04dd3280195b5f0cf6bc2af10630e5f75.tar.gz
Migrate the thread stack management functions from the machine-dependent
to the machine-independent parts of the VM. At the same time, this introduces vm object locking for the non-i386 platforms. Two details: 1. KSTACK_GUARD has been removed in favor of KSTACK_GUARD_PAGES. The different machine-dependent implementations used various combinations of KSTACK_GUARD and KSTACK_GUARD_PAGES. To disable guard page, set KSTACK_GUARD_PAGES to 0. 2. Remove the (unnecessary) clearing of PG_ZERO in vm_thread_new. In 5.x, (but not 4.x,) PG_ZERO can only be set if VM_ALLOC_ZERO is passed to vm_page_alloc() or vm_page_grab().
Diffstat (limited to 'sys/vm/vm_glue.c')
-rw-r--r--sys/vm/vm_glue.c176
1 files changed, 172 insertions, 4 deletions
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 2145c0c..583de20 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -387,6 +387,174 @@ retry:
}
#endif
+#ifndef KSTACK_MAX_PAGES
+#define KSTACK_MAX_PAGES 32
+#endif
+
+/*
+ * Create the kernel stack (including pcb for i386) for a new thread.
+ * This routine directly affects the fork perf for a process and
+ * create performance for a thread.
+ */
+void
+vm_thread_new(struct thread *td, int pages)
+{
+ vm_object_t ksobj;
+ vm_offset_t ks;
+ vm_page_t m, ma[KSTACK_MAX_PAGES];
+ int i;
+
+ /* Bounds check */
+ if (pages <= 1)
+ pages = KSTACK_PAGES;
+ else if (pages > KSTACK_MAX_PAGES)
+ pages = KSTACK_MAX_PAGES;
+ /*
+ * Allocate an object for the kstack.
+ */
+ ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
+ td->td_kstack_obj = ksobj;
+ /*
+ * Get a kernel virtual address for this thread's kstack.
+ */
+ ks = kmem_alloc_nofault(kernel_map,
+ (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+ if (ks == 0)
+ panic("vm_thread_new: kstack allocation failed");
+ if (KSTACK_GUARD_PAGES != 0) {
+ pmap_qremove(ks, KSTACK_GUARD_PAGES);
+ ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
+ }
+ td->td_kstack = ks;
+ /*
+ * Knowing the number of pages allocated is useful when you
+ * want to deallocate them.
+ */
+ td->td_kstack_pages = pages;
+ /*
+ * For the length of the stack, link in a real page of ram for each
+ * page of stack.
+ */
+ VM_OBJECT_LOCK(ksobj);
+ for (i = 0; i < pages; i++) {
+ /*
+ * Get a kernel stack page.
+ */
+ m = vm_page_grab(ksobj, i,
+ VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
+ ma[i] = m;
+ vm_page_lock_queues();
+ vm_page_wakeup(m);
+ m->valid = VM_PAGE_BITS_ALL;
+ vm_page_unlock_queues();
+ }
+ VM_OBJECT_UNLOCK(ksobj);
+ pmap_qenter(ks, ma, pages);
+}
+
+/*
+ * Dispose of a thread's kernel stack.
+ */
+void
+vm_thread_dispose(struct thread *td)
+{
+ vm_object_t ksobj;
+ vm_offset_t ks;
+ vm_page_t m;
+ int i, pages;
+
+ pages = td->td_kstack_pages;
+ ksobj = td->td_kstack_obj;
+ ks = td->td_kstack;
+ pmap_qremove(ks, pages);
+ VM_OBJECT_LOCK(ksobj);
+ for (i = 0; i < pages; i++) {
+ m = vm_page_lookup(ksobj, i);
+ if (m == NULL)
+ panic("vm_thread_dispose: kstack already missing?");
+ vm_page_lock_queues();
+ vm_page_busy(m);
+ vm_page_unwire(m, 0);
+ vm_page_free(m);
+ vm_page_unlock_queues();
+ }
+ VM_OBJECT_UNLOCK(ksobj);
+ vm_object_deallocate(ksobj);
+ kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
+ (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+}
+
+/*
+ * Allow a thread's kernel stack to be paged out.
+ */
+void
+vm_thread_swapout(struct thread *td)
+{
+ vm_object_t ksobj;
+ vm_page_t m;
+ int i, pages;
+
+#ifdef __alpha
+ /*
+ * Make sure we aren't fpcurthread.
+ */
+ alpha_fpstate_save(td, 1);
+#endif
+ pages = td->td_kstack_pages;
+ ksobj = td->td_kstack_obj;
+ pmap_qremove(td->td_kstack, pages);
+ VM_OBJECT_LOCK(ksobj);
+ for (i = 0; i < pages; i++) {
+ m = vm_page_lookup(ksobj, i);
+ if (m == NULL)
+ panic("vm_thread_swapout: kstack already missing?");
+ vm_page_lock_queues();
+ vm_page_dirty(m);
+ vm_page_unwire(m, 0);
+ vm_page_unlock_queues();
+ }
+ VM_OBJECT_UNLOCK(ksobj);
+}
+
+/*
+ * Bring the kernel stack for a specified thread back in.
+ */
+void
+vm_thread_swapin(struct thread *td)
+{
+ vm_object_t ksobj;
+ vm_page_t m, ma[KSTACK_MAX_PAGES];
+ int i, pages, rv;
+
+ pages = td->td_kstack_pages;
+ ksobj = td->td_kstack_obj;
+ VM_OBJECT_LOCK(ksobj);
+ for (i = 0; i < pages; i++) {
+ m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+ if (m->valid != VM_PAGE_BITS_ALL) {
+ rv = vm_pager_get_pages(ksobj, &m, 1, 0);
+ if (rv != VM_PAGER_OK)
+ panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
+ m = vm_page_lookup(ksobj, i);
+ m->valid = VM_PAGE_BITS_ALL;
+ }
+ ma[i] = m;
+ vm_page_lock_queues();
+ vm_page_wire(m);
+ vm_page_wakeup(m);
+ vm_page_unlock_queues();
+ }
+ VM_OBJECT_UNLOCK(ksobj);
+ pmap_qenter(td->td_kstack, ma, pages);
+#ifdef __alpha
+ /*
+ * The pcb may be at a different physical address now so cache the
+ * new address.
+ */
+ td->td_md.md_pcbpaddr = (void *)vtophys((vm_offset_t)td->td_pcb);
+#endif
+}
+
/*
* Set up a variable-sized alternate kstack.
*/
@@ -398,7 +566,7 @@ vm_thread_new_altkstack(struct thread *td, int pages)
td->td_altkstack_obj = td->td_kstack_obj;
td->td_altkstack_pages = td->td_kstack_pages;
- pmap_new_thread(td, pages);
+ vm_thread_new(td, pages);
}
/*
@@ -408,7 +576,7 @@ void
vm_thread_dispose_altkstack(struct thread *td)
{
- pmap_dispose_thread(td);
+ vm_thread_dispose(td);
td->td_kstack = td->td_altkstack;
td->td_kstack_obj = td->td_altkstack_obj;
@@ -572,7 +740,7 @@ faultin(p)
vm_proc_swapin(p);
FOREACH_THREAD_IN_PROC(p, td)
- pmap_swapin_thread(td);
+ vm_thread_swapin(td);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
@@ -927,7 +1095,7 @@ swapout(p)
vm_proc_swapout(p);
FOREACH_THREAD_IN_PROC(p, td)
- pmap_swapout_thread(td);
+ vm_thread_swapout(td);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
OpenPOWER on IntegriCloud