summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/alpha/alpha/interrupt.c2
-rw-r--r--sys/alpha/alpha/pmap.c203
-rw-r--r--sys/alpha/include/param.h2
-rw-r--r--sys/amd64/amd64/pmap.c176
-rw-r--r--sys/amd64/include/param.h2
-rw-r--r--sys/i386/i386/pmap.c184
-rw-r--r--sys/i386/include/param.h3
-rw-r--r--sys/ia64/ia64/pmap.c47
-rw-r--r--sys/ia64/include/param.h1
-rw-r--r--sys/kern/kern_kse.c5
-rw-r--r--sys/kern/kern_thread.c5
-rw-r--r--sys/powerpc/aim/mmu_oea.c153
-rw-r--r--sys/powerpc/include/param.h2
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c153
-rw-r--r--sys/powerpc/powerpc/pmap.c153
-rw-r--r--sys/sparc64/include/param.h3
-rw-r--r--sys/sparc64/sparc64/pmap.c168
-rw-r--r--sys/vm/pmap.h4
-rw-r--r--sys/vm/vm_extern.h6
-rw-r--r--sys/vm/vm_glue.c176
20 files changed, 190 insertions, 1258 deletions
diff --git a/sys/alpha/alpha/interrupt.c b/sys/alpha/alpha/interrupt.c
index d266f14..e45f451 100644
--- a/sys/alpha/alpha/interrupt.c
+++ b/sys/alpha/alpha/interrupt.c
@@ -106,7 +106,7 @@ interrupt(a0, a1, a2, framep)
intr_restore(s);
#endif
atomic_add_int(&td->td_intr_nesting_level, 1);
-#ifndef KSTACK_GUARD
+#if KSTACK_GUARD_PAGES == 0
#ifndef SMP
{
if ((caddr_t) framep < (caddr_t) td->td_pcb + 1024) {
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index 0823885..55d46fb 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -932,209 +932,6 @@ retry:
return m;
}
-#ifndef KSTACK_MAX_PAGES
-#define KSTACK_MAX_PAGES 32
-#endif
-
-/*
- * Create the kernel stack for a new thread.
- * This routine directly affects the fork perf for a process and thread.
- */
-void
-pmap_new_thread(struct thread *td, int pages)
-{
- int i;
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- pt_entry_t *ptek, oldpte;
-
- /* Bounds check */
- if (pages <= 1)
- pages = KSTACK_PAGES;
- else if (pages > KSTACK_MAX_PAGES)
- pages = KSTACK_MAX_PAGES;
-
- /*
- * allocate object for the kstack
- */
- ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
- td->td_kstack_obj = ksobj;
-
-#ifdef KSTACK_GUARD
- /* get a kernel virtual address for the kstack for this thread */
- ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE);
- if (ks == NULL)
- panic("pmap_new_thread: kstack allocation failed");
-
- /* Set the first page to be the unmapped guard page. */
- ptek = vtopte(ks);
- oldpte = *ptek;
- *ptek = 0;
- if (oldpte)
- pmap_invalidate_page(kernel_pmap, ks);
- /* move to the next page, which is where the real stack starts. */
- ks += PAGE_SIZE;
- td->td_kstack = ks;
- ptek++;
-#else
- /* get a kernel virtual address for the kstack for this thread */
- ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE);
- if (ks == NULL)
- panic("pmap_new_thread: kstack allocation failed");
- td->td_kstack = ks;
- ptek = vtopte(ks);
-#endif
- /*
- * Knowing the number of pages allocated is useful when you
- * want to deallocate them.
- */
- td->td_kstack_pages = pages;
-
- /*
- * For the length of the stack, link in a real page of ram for each
- * page of stack.
- */
- for (i = 0; i < pages; i++) {
- /*
- * Get a kernel stack page
- */
- m = vm_page_grab(ksobj, i,
- VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
-
- /*
- * Enter the page into the kernel address space.
- */
- oldpte = ptek[i];
- ptek[i] = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m))
- | PG_ASM | PG_KRE | PG_KWE | PG_V;
- if (oldpte)
- pmap_invalidate_page(kernel_pmap, ks + i * PAGE_SIZE);
-
- vm_page_lock_queues();
- vm_page_wakeup(m);
- vm_page_flag_clear(m, PG_ZERO);
- m->valid = VM_PAGE_BITS_ALL;
- vm_page_unlock_queues();
- }
-}
-
-/*
- * Dispose the kernel stack for a thread that has exited.
- * This routine directly impacts the exit perf of a thread.
- */
-void
-pmap_dispose_thread(td)
- struct thread *td;
-{
- int i;
- int pages;
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- pt_entry_t *ptek;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- ptek = vtopte(ks);
- for (i = 0; i < pages; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_dispose_thread: kstack already missing?");
- ptek[i] = 0;
- pmap_invalidate_page(kernel_pmap, ks + i * PAGE_SIZE);
- vm_page_lock_queues();
- vm_page_busy(m);
- vm_page_unwire(m, 0);
- vm_page_free(m);
- vm_page_unlock_queues();
- }
-
- /*
- * Free the space that this stack was mapped to in the kernel
- * address map.
- */
-#ifdef KSTACK_GUARD
- kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE);
-#else
- kmem_free(kernel_map, ks, pages * PAGE_SIZE);
-#endif
- vm_object_deallocate(ksobj);
-}
-
-/*
- * Allow the kernel stack for a thread to be prejudicially paged out.
- */
-void
-pmap_swapout_thread(td)
- struct thread *td;
-{
- int i;
- int pages;
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
-
- /*
- * Make sure we aren't fpcurthread.
- */
- alpha_fpstate_save(td, 1);
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- for (i = 0; i < pages; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_swapout_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_dirty(m);
- vm_page_unwire(m, 0);
- vm_page_unlock_queues();
- pmap_kremove(ks + i * PAGE_SIZE);
- }
-}
-
-/*
- * Bring the kernel stack for a specified thread back in.
- */
-void
-pmap_swapin_thread(td)
- struct thread *td;
-{
- int i, rv;
- int pages;
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- for (i = 0; i < pages; i++) {
- m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
- pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
- if (m->valid != VM_PAGE_BITS_ALL) {
- rv = vm_pager_get_pages(ksobj, &m, 1, 0);
- if (rv != VM_PAGER_OK)
- panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid);
- m = vm_page_lookup(ksobj, i);
- m->valid = VM_PAGE_BITS_ALL;
- }
- vm_page_lock_queues();
- vm_page_wire(m);
- vm_page_wakeup(m);
- vm_page_unlock_queues();
- }
-
- /*
- * The pcb may be at a different physical address now so cache the
- * new address.
- */
- td->td_md.md_pcbpaddr = (void *)vtophys((vm_offset_t)td->td_pcb);
-}
-
/***************************************************
* Page table page management routines.....
***************************************************/
diff --git a/sys/alpha/include/param.h b/sys/alpha/include/param.h
index d75b932..6095714 100644
--- a/sys/alpha/include/param.h
+++ b/sys/alpha/include/param.h
@@ -118,9 +118,9 @@
#define SINCR 1 /* increment of stack/NBPG */
#define KSTACK_PAGES 2 /* pages of kstack (with pcb) */
+#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
#define UAREA_PAGES 1 /* pages of u-area */
-#define KSTACK_GUARD /* compile in kstack guard page */
/*
* Mach derived conversion macros
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 38a5f5b..3e26cfa 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -883,182 +883,6 @@ retry:
return m;
}
-#ifndef KSTACK_MAX_PAGES
-#define KSTACK_MAX_PAGES 32
-#endif
-
-/*
- * Create the kernel stack (including pcb for amd64) for a new thread.
- * This routine directly affects the fork perf for a process and
- * create performance for a thread.
- */
-void
-pmap_new_thread(struct thread *td, int pages)
-{
- int i;
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_page_t m;
- vm_offset_t ks;
-
- /* Bounds check */
- if (pages <= 1)
- pages = KSTACK_PAGES;
- else if (pages > KSTACK_MAX_PAGES)
- pages = KSTACK_MAX_PAGES;
-
- /*
- * allocate object for the kstack
- */
- ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
- td->td_kstack_obj = ksobj;
-
- /* get a kernel virtual address for the kstack for this thread */
-#ifdef KSTACK_GUARD
- ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE);
- if (ks == 0)
- panic("pmap_new_thread: kstack allocation failed");
- if (*vtopte(ks) != 0)
- pmap_qremove(ks, 1);
- ks += PAGE_SIZE;
- td->td_kstack = ks;
-#else
- /* get a kernel virtual address for the kstack for this thread */
- ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE);
- if (ks == 0)
- panic("pmap_new_thread: kstack allocation failed");
- td->td_kstack = ks;
-#endif
- /*
- * Knowing the number of pages allocated is useful when you
- * want to deallocate them.
- */
- td->td_kstack_pages = pages;
-
- /*
- * For the length of the stack, link in a real page of ram for each
- * page of stack.
- */
- for (i = 0; i < pages; i++) {
- /*
- * Get a kernel stack page
- */
- m = vm_page_grab(ksobj, i,
- VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
- ma[i] = m;
-
- vm_page_lock_queues();
- vm_page_wakeup(m);
- vm_page_flag_clear(m, PG_ZERO);
- m->valid = VM_PAGE_BITS_ALL;
- vm_page_unlock_queues();
- }
- pmap_qenter(ks, ma, pages);
-}
-
-/*
- * Dispose the kernel stack for a thread that has exited.
- * This routine directly impacts the exit perf of a process and thread.
- */
-void
-pmap_dispose_thread(td)
- struct thread *td;
-{
- int i;
- int pages;
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- pmap_qremove(ks, pages);
- for (i = 0; i < pages; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_dispose_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_busy(m);
- vm_page_unwire(m, 0);
- vm_page_free(m);
- vm_page_unlock_queues();
- }
- /*
- * Free the space that this stack was mapped to in the kernel
- * address map.
- */
-#ifdef KSTACK_GUARD
- kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE);
-#else
- kmem_free(kernel_map, ks, pages * PAGE_SIZE);
-#endif
- vm_object_deallocate(ksobj);
-}
-
-/*
- * Allow the Kernel stack for a thread to be prejudicially paged out.
- */
-void
-pmap_swapout_thread(td)
- struct thread *td;
-{
- int i;
- int pages;
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- pmap_qremove(ks, pages);
- for (i = 0; i < pages; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_swapout_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_dirty(m);
- vm_page_unwire(m, 0);
- vm_page_unlock_queues();
- }
-}
-
-/*
- * Bring the kernel stack for a specified thread back in.
- */
-void
-pmap_swapin_thread(td)
- struct thread *td;
-{
- int i, rv;
- int pages;
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- for (i = 0; i < pages; i++) {
- m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
- if (m->valid != VM_PAGE_BITS_ALL) {
- rv = vm_pager_get_pages(ksobj, &m, 1, 0);
- if (rv != VM_PAGER_OK)
- panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid);
- m = vm_page_lookup(ksobj, i);
- m->valid = VM_PAGE_BITS_ALL;
- }
- ma[i] = m;
- vm_page_lock_queues();
- vm_page_wire(m);
- vm_page_wakeup(m);
- vm_page_unlock_queues();
- }
- pmap_qenter(ks, ma, pages);
-}
-
/***************************************************
* Page table page management routines.....
***************************************************/
diff --git a/sys/amd64/include/param.h b/sys/amd64/include/param.h
index c1dccb9..6a86a62 100644
--- a/sys/amd64/include/param.h
+++ b/sys/amd64/include/param.h
@@ -118,9 +118,9 @@
#define IOPAGES 2 /* pages of i/o permission bitmap */
#define KSTACK_PAGES 4 /* pages of kstack (with pcb) */
+#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
#define UAREA_PAGES 1 /* holds struct user WITHOUT PCB (see def.) */
-#define KSTACK_GUARD 1 /* compile in the kstack guard page */
/*
* Ceiling on amount of swblock kva space, can be changed via
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index c9a910e..c9dc0af 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -982,190 +982,6 @@ retry:
return m;
}
-#ifndef KSTACK_MAX_PAGES
-#define KSTACK_MAX_PAGES 32
-#endif
-
-/*
- * Create the kernel stack (including pcb for i386) for a new thread.
- * This routine directly affects the fork perf for a process and
- * create performance for a thread.
- */
-void
-pmap_new_thread(struct thread *td, int pages)
-{
- int i;
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_page_t m;
- vm_offset_t ks;
-
- /* Bounds check */
- if (pages <= 1)
- pages = KSTACK_PAGES;
- else if (pages > KSTACK_MAX_PAGES)
- pages = KSTACK_MAX_PAGES;
-
- /*
- * allocate object for the kstack
- */
- ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
- td->td_kstack_obj = ksobj;
-
- /* get a kernel virtual address for the kstack for this thread */
-#ifdef KSTACK_GUARD
- ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE);
- if (ks == 0)
- panic("pmap_new_thread: kstack allocation failed");
- if (*vtopte(ks) != 0)
- pmap_qremove(ks, 1);
- ks += PAGE_SIZE;
- td->td_kstack = ks;
-#else
- /* get a kernel virtual address for the kstack for this thread */
- ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE);
- if (ks == 0)
- panic("pmap_new_thread: kstack allocation failed");
- td->td_kstack = ks;
-#endif
- /*
- * Knowing the number of pages allocated is useful when you
- * want to deallocate them.
- */
- td->td_kstack_pages = pages;
-
- /*
- * For the length of the stack, link in a real page of ram for each
- * page of stack.
- */
- VM_OBJECT_LOCK(ksobj);
- for (i = 0; i < pages; i++) {
- /*
- * Get a kernel stack page
- */
- m = vm_page_grab(ksobj, i,
- VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
- ma[i] = m;
-
- vm_page_lock_queues();
- vm_page_wakeup(m);
- vm_page_flag_clear(m, PG_ZERO);
- m->valid = VM_PAGE_BITS_ALL;
- vm_page_unlock_queues();
- }
- VM_OBJECT_UNLOCK(ksobj);
- pmap_qenter(ks, ma, pages);
-}
-
-/*
- * Dispose the kernel stack for a thread that has exited.
- * This routine directly impacts the exit perf of a process and thread.
- */
-void
-pmap_dispose_thread(td)
- struct thread *td;
-{
- int i;
- int pages;
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- pmap_qremove(ks, pages);
- VM_OBJECT_LOCK(ksobj);
- for (i = 0; i < pages; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_dispose_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_busy(m);
- vm_page_unwire(m, 0);
- vm_page_free(m);
- vm_page_unlock_queues();
- }
- VM_OBJECT_UNLOCK(ksobj);
- /*
- * Free the space that this stack was mapped to in the kernel
- * address map.
- */
-#ifdef KSTACK_GUARD
- kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE);
-#else
- kmem_free(kernel_map, ks, pages * PAGE_SIZE);
-#endif
- vm_object_deallocate(ksobj);
-}
-
-/*
- * Allow the Kernel stack for a thread to be prejudicially paged out.
- */
-void
-pmap_swapout_thread(td)
- struct thread *td;
-{
- int i;
- int pages;
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- pmap_qremove(ks, pages);
- VM_OBJECT_LOCK(ksobj);
- for (i = 0; i < pages; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_swapout_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_dirty(m);
- vm_page_unwire(m, 0);
- vm_page_unlock_queues();
- }
- VM_OBJECT_UNLOCK(ksobj);
-}
-
-/*
- * Bring the kernel stack for a specified thread back in.
- */
-void
-pmap_swapin_thread(td)
- struct thread *td;
-{
- int i, rv;
- int pages;
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- VM_OBJECT_LOCK(ksobj);
- for (i = 0; i < pages; i++) {
- m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
- if (m->valid != VM_PAGE_BITS_ALL) {
- rv = vm_pager_get_pages(ksobj, &m, 1, 0);
- if (rv != VM_PAGER_OK)
- panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid);
- m = vm_page_lookup(ksobj, i);
- m->valid = VM_PAGE_BITS_ALL;
- }
- ma[i] = m;
- vm_page_lock_queues();
- vm_page_wire(m);
- vm_page_wakeup(m);
- vm_page_unlock_queues();
- }
- VM_OBJECT_UNLOCK(ksobj);
- pmap_qenter(ks, ma, pages);
-}
-
/***************************************************
* Page table page management routines.....
***************************************************/
diff --git a/sys/i386/include/param.h b/sys/i386/include/param.h
index c3b9c59..e76c52a 100644
--- a/sys/i386/include/param.h
+++ b/sys/i386/include/param.h
@@ -106,10 +106,9 @@
#ifndef KSTACK_PAGES
#define KSTACK_PAGES 2 /* Includes pcb! */
#endif
+#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
#define UAREA_PAGES 1 /* holds struct user WITHOUT PCB (see def.) */
-#define KSTACK_GUARD /* compile in the kstack guard page */
-
/*
* Ceiling on amount of swblock kva space, can be changed via
* the kern.maxswzone /boot/loader.conf variable.
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index dd6ba1d..77effaa 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -765,53 +765,6 @@ pmap_track_modified(vm_offset_t va)
return 0;
}
-/*
- * Create the KSTACK for a new thread.
- * This routine directly affects the fork perf for a process/thread.
- */
-void
-pmap_new_thread(struct thread *td, int pages)
-{
-
- /* Bounds check */
- if (pages <= 1)
- pages = KSTACK_PAGES;
- else if (pages > KSTACK_MAX_PAGES)
- pages = KSTACK_MAX_PAGES;
- td->td_kstack = (vm_offset_t)malloc(pages * PAGE_SIZE, M_PMAP,
- M_WAITOK);
- td->td_kstack_pages = pages;
-}
-
-/*
- * Dispose the KSTACK for a thread that has exited.
- * This routine directly impacts the exit perf of a process/thread.
- */
-void
-pmap_dispose_thread(struct thread *td)
-{
-
- free((void*)td->td_kstack, M_PMAP);
- td->td_kstack = 0;
- td->td_kstack_pages = 0;
-}
-
-/*
- * Allow the KSTACK for a thread to be prejudicially paged out.
- */
-void
-pmap_swapout_thread(struct thread *td)
-{
-}
-
-/*
- * Bring the KSTACK for a specified thread back in.
- */
-void
-pmap_swapin_thread(struct thread *td)
-{
-}
-
/***************************************************
* Page table page management routines.....
***************************************************/
diff --git a/sys/ia64/include/param.h b/sys/ia64/include/param.h
index 2548fa6..1b3bf0c 100644
--- a/sys/ia64/include/param.h
+++ b/sys/ia64/include/param.h
@@ -135,6 +135,7 @@
#define SINCR 1 /* increment of stack/NBPG */
#define KSTACK_PAGES 4 /* pages of kernel stack */
+#define KSTACK_GUARD_PAGES 0 /* pages of kstack guard; 0 disables */
#define UAREA_PAGES 1 /* pages of u-area */
/*
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index bb4e3f0..32c755a 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <sys/ucontext.h>
#include <vm/vm.h>
+#include <vm/vm_extern.h>
#include <vm/vm_object.h>
#include <vm/pmap.h>
#include <vm/uma.h>
@@ -185,7 +186,7 @@ thread_init(void *mem, int size)
td = (struct thread *)mem;
mtx_lock(&Giant);
- pmap_new_thread(td, 0);
+ vm_thread_new(td, 0);
mtx_unlock(&Giant);
cpu_thread_setup(td);
td->td_sched = (struct td_sched *)&td[1];
@@ -200,7 +201,7 @@ thread_fini(void *mem, int size)
struct thread *td;
td = (struct thread *)mem;
- pmap_dispose_thread(td);
+ vm_thread_dispose(td);
}
/*
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index bb4e3f0..32c755a 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <sys/ucontext.h>
#include <vm/vm.h>
+#include <vm/vm_extern.h>
#include <vm/vm_object.h>
#include <vm/pmap.h>
#include <vm/uma.h>
@@ -185,7 +186,7 @@ thread_init(void *mem, int size)
td = (struct thread *)mem;
mtx_lock(&Giant);
- pmap_new_thread(td, 0);
+ vm_thread_new(td, 0);
mtx_unlock(&Giant);
cpu_thread_setup(td);
td->td_sched = (struct td_sched *)&td[1];
@@ -200,7 +201,7 @@ thread_fini(void *mem, int size)
struct thread *td;
td = (struct thread *)mem;
- pmap_dispose_thread(td);
+ vm_thread_dispose(td);
}
/*
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 4808b4d..ef84865 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1556,159 +1556,6 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
pmap_remove(pm, sva, eva);
}
-#ifndef KSTACK_MAX_PAGES
-#define KSTACK_MAX_PAGES 32
-#endif
-
-/*
- * Create the kernel stack and pcb for a new thread.
- * This routine directly affects the fork perf for a process and
- * create performance for a thread.
- */
-void
-pmap_new_thread(struct thread *td, int pages)
-{
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- u_int i;
-
- /* Bounds check */
- if (pages <= 1)
- pages = KSTACK_PAGES;
- else if (pages > KSTACK_MAX_PAGES)
- pages = KSTACK_MAX_PAGES;
-
- /*
- * Allocate object for the kstack.
- */
- ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
- td->td_kstack_obj = ksobj;
-
- /*
- * Get a kernel virtual address for the kstack for this thread.
- */
- ks = kmem_alloc_nofault(kernel_map,
- (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
- if (ks == 0)
- panic("pmap_new_thread: kstack allocation failed");
- TLBIE(ks);
- ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
- td->td_kstack = ks;
-
- /*
- * Knowing the number of pages allocated is useful when you
- * want to deallocate them.
- */
- td->td_kstack_pages = pages;
-
- for (i = 0; i < pages; i++) {
- /*
- * Get a kernel stack page.
- */
- m = vm_page_grab(ksobj, i,
- VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
- ma[i] = m;
-
- vm_page_lock_queues();
- vm_page_wakeup(m);
- vm_page_flag_clear(m, PG_ZERO);
- m->valid = VM_PAGE_BITS_ALL;
- vm_page_unlock_queues();
- }
-
- /*
- * Enter the page into the kernel address space
- */
- pmap_qenter(ks, ma, pages);
-}
-
-void
-pmap_dispose_thread(struct thread *td)
-{
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int i;
- int pages;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- for (i = 0; i < pages ; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_dispose_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_busy(m);
- vm_page_unwire(m, 0);
- vm_page_free(m);
- vm_page_unlock_queues();
- }
- pmap_qremove(ks, pages);
- kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
- (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
- vm_object_deallocate(ksobj);
-}
-
-void
-pmap_swapin_thread(struct thread *td)
-{
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int rv;
- int i;
- int pages;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- for (i = 0; i < pages; i++) {
- m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
- if (m->valid != VM_PAGE_BITS_ALL) {
- rv = vm_pager_get_pages(ksobj, &m, 1, 0);
- if (rv != VM_PAGER_OK)
- panic("pmap_swapin_thread: cannot get kstack");
- m = vm_page_lookup(ksobj, i);
- m->valid = VM_PAGE_BITS_ALL;
- }
- ma[i] = m;
- vm_page_lock_queues();
- vm_page_wire(m);
- vm_page_wakeup(m);
- vm_page_unlock_queues();
- }
- pmap_qenter(ks, ma, pages);
-}
-
-
-void
-pmap_swapout_thread(struct thread *td)
-{
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int i;
- int pages;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = (vm_offset_t)td->td_kstack;
- for (i = 0; i < pages; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_swapout_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_dirty(m);
- vm_page_unwire(m, 0);
- vm_page_unlock_queues();
- }
- pmap_qremove(ks, pages);
-}
-
/*
* Allocate a physical page of memory directly from the phys_avail map.
* Can only be called from pmap_bootstrap before avail start and end are
diff --git a/sys/powerpc/include/param.h b/sys/powerpc/include/param.h
index 7a16fe7..cf4d4a4 100644
--- a/sys/powerpc/include/param.h
+++ b/sys/powerpc/include/param.h
@@ -99,7 +99,7 @@
#ifndef KSTACK_UPAGES
#define KSTACK_PAGES 4 /* includes pcb */
-#define KSTACK_GUARD_PAGES 1
+#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
#endif
#define USPACE (KSTACK_PAGES * PAGE_SIZE) /* total size of pcb */
#define UAREA_PAGES 1 /* holds struct user WITHOUT PCB */
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
index 4808b4d..ef84865 100644
--- a/sys/powerpc/powerpc/mmu_oea.c
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -1556,159 +1556,6 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
pmap_remove(pm, sva, eva);
}
-#ifndef KSTACK_MAX_PAGES
-#define KSTACK_MAX_PAGES 32
-#endif
-
-/*
- * Create the kernel stack and pcb for a new thread.
- * This routine directly affects the fork perf for a process and
- * create performance for a thread.
- */
-void
-pmap_new_thread(struct thread *td, int pages)
-{
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- u_int i;
-
- /* Bounds check */
- if (pages <= 1)
- pages = KSTACK_PAGES;
- else if (pages > KSTACK_MAX_PAGES)
- pages = KSTACK_MAX_PAGES;
-
- /*
- * Allocate object for the kstack.
- */
- ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
- td->td_kstack_obj = ksobj;
-
- /*
- * Get a kernel virtual address for the kstack for this thread.
- */
- ks = kmem_alloc_nofault(kernel_map,
- (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
- if (ks == 0)
- panic("pmap_new_thread: kstack allocation failed");
- TLBIE(ks);
- ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
- td->td_kstack = ks;
-
- /*
- * Knowing the number of pages allocated is useful when you
- * want to deallocate them.
- */
- td->td_kstack_pages = pages;
-
- for (i = 0; i < pages; i++) {
- /*
- * Get a kernel stack page.
- */
- m = vm_page_grab(ksobj, i,
- VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
- ma[i] = m;
-
- vm_page_lock_queues();
- vm_page_wakeup(m);
- vm_page_flag_clear(m, PG_ZERO);
- m->valid = VM_PAGE_BITS_ALL;
- vm_page_unlock_queues();
- }
-
- /*
- * Enter the page into the kernel address space
- */
- pmap_qenter(ks, ma, pages);
-}
-
-void
-pmap_dispose_thread(struct thread *td)
-{
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int i;
- int pages;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- for (i = 0; i < pages ; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_dispose_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_busy(m);
- vm_page_unwire(m, 0);
- vm_page_free(m);
- vm_page_unlock_queues();
- }
- pmap_qremove(ks, pages);
- kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
- (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
- vm_object_deallocate(ksobj);
-}
-
-void
-pmap_swapin_thread(struct thread *td)
-{
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int rv;
- int i;
- int pages;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- for (i = 0; i < pages; i++) {
- m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
- if (m->valid != VM_PAGE_BITS_ALL) {
- rv = vm_pager_get_pages(ksobj, &m, 1, 0);
- if (rv != VM_PAGER_OK)
- panic("pmap_swapin_thread: cannot get kstack");
- m = vm_page_lookup(ksobj, i);
- m->valid = VM_PAGE_BITS_ALL;
- }
- ma[i] = m;
- vm_page_lock_queues();
- vm_page_wire(m);
- vm_page_wakeup(m);
- vm_page_unlock_queues();
- }
- pmap_qenter(ks, ma, pages);
-}
-
-
-void
-pmap_swapout_thread(struct thread *td)
-{
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int i;
- int pages;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = (vm_offset_t)td->td_kstack;
- for (i = 0; i < pages; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_swapout_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_dirty(m);
- vm_page_unwire(m, 0);
- vm_page_unlock_queues();
- }
- pmap_qremove(ks, pages);
-}
-
/*
* Allocate a physical page of memory directly from the phys_avail map.
* Can only be called from pmap_bootstrap before avail start and end are
diff --git a/sys/powerpc/powerpc/pmap.c b/sys/powerpc/powerpc/pmap.c
index 4808b4d..ef84865 100644
--- a/sys/powerpc/powerpc/pmap.c
+++ b/sys/powerpc/powerpc/pmap.c
@@ -1556,159 +1556,6 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
pmap_remove(pm, sva, eva);
}
-#ifndef KSTACK_MAX_PAGES
-#define KSTACK_MAX_PAGES 32
-#endif
-
-/*
- * Create the kernel stack and pcb for a new thread.
- * This routine directly affects the fork perf for a process and
- * create performance for a thread.
- */
-void
-pmap_new_thread(struct thread *td, int pages)
-{
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- u_int i;
-
- /* Bounds check */
- if (pages <= 1)
- pages = KSTACK_PAGES;
- else if (pages > KSTACK_MAX_PAGES)
- pages = KSTACK_MAX_PAGES;
-
- /*
- * Allocate object for the kstack.
- */
- ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
- td->td_kstack_obj = ksobj;
-
- /*
- * Get a kernel virtual address for the kstack for this thread.
- */
- ks = kmem_alloc_nofault(kernel_map,
- (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
- if (ks == 0)
- panic("pmap_new_thread: kstack allocation failed");
- TLBIE(ks);
- ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
- td->td_kstack = ks;
-
- /*
- * Knowing the number of pages allocated is useful when you
- * want to deallocate them.
- */
- td->td_kstack_pages = pages;
-
- for (i = 0; i < pages; i++) {
- /*
- * Get a kernel stack page.
- */
- m = vm_page_grab(ksobj, i,
- VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
- ma[i] = m;
-
- vm_page_lock_queues();
- vm_page_wakeup(m);
- vm_page_flag_clear(m, PG_ZERO);
- m->valid = VM_PAGE_BITS_ALL;
- vm_page_unlock_queues();
- }
-
- /*
- * Enter the page into the kernel address space
- */
- pmap_qenter(ks, ma, pages);
-}
-
-void
-pmap_dispose_thread(struct thread *td)
-{
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int i;
- int pages;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- for (i = 0; i < pages ; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_dispose_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_busy(m);
- vm_page_unwire(m, 0);
- vm_page_free(m);
- vm_page_unlock_queues();
- }
- pmap_qremove(ks, pages);
- kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
- (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
- vm_object_deallocate(ksobj);
-}
-
-void
-pmap_swapin_thread(struct thread *td)
-{
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int rv;
- int i;
- int pages;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- for (i = 0; i < pages; i++) {
- m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
- if (m->valid != VM_PAGE_BITS_ALL) {
- rv = vm_pager_get_pages(ksobj, &m, 1, 0);
- if (rv != VM_PAGER_OK)
- panic("pmap_swapin_thread: cannot get kstack");
- m = vm_page_lookup(ksobj, i);
- m->valid = VM_PAGE_BITS_ALL;
- }
- ma[i] = m;
- vm_page_lock_queues();
- vm_page_wire(m);
- vm_page_wakeup(m);
- vm_page_unlock_queues();
- }
- pmap_qenter(ks, ma, pages);
-}
-
-
-void
-pmap_swapout_thread(struct thread *td)
-{
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int i;
- int pages;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = (vm_offset_t)td->td_kstack;
- for (i = 0; i < pages; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_swapout_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_dirty(m);
- vm_page_unwire(m, 0);
- vm_page_unlock_queues();
- }
- pmap_qremove(ks, pages);
-}
-
/*
* Allocate a physical page of memory directly from the phys_avail map.
* Can only be called from pmap_bootstrap before avail start and end are
diff --git a/sys/sparc64/include/param.h b/sys/sparc64/include/param.h
index 94a6ef8..7ee6360 100644
--- a/sys/sparc64/include/param.h
+++ b/sys/sparc64/include/param.h
@@ -105,11 +105,10 @@
#define PAGE_MASK_MAX PAGE_MASK_4M
#define KSTACK_PAGES 4 /* pages of kernel stack (with pcb) */
+#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
#define UAREA_PAGES 1 /* pages of user area */
#define PCPU_PAGES 1
-#define KSTACK_GUARD /* compile in kstack guard page */
-#define KSTACK_GUARD_PAGES 1
/*
* Mach derived conversion macros
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 4e30859..3801f12 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -924,174 +924,6 @@ pmap_qremove(vm_offset_t sva, int count)
tlb_range_demap(kernel_pmap, sva, va);
}
-#ifndef KSTACK_MAX_PAGES
-#define KSTACK_MAX_PAGES 32
-#endif
-
-/*
- * Create the kernel stack and pcb for a new thread.
- * This routine directly affects the fork perf for a process and
- * create performance for a thread.
- */
-void
-pmap_new_thread(struct thread *td, int pages)
-{
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- u_int i;
-
- PMAP_STATS_INC(pmap_nnew_thread);
- /* Bounds check */
- if (pages <= 1)
- pages = KSTACK_PAGES;
- else if (pages > KSTACK_MAX_PAGES)
- pages = KSTACK_MAX_PAGES;
-
- /*
- * Allocate object for the kstack,
- */
- ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
- td->td_kstack_obj = ksobj;
-
- /*
- * Get a kernel virtual address for the kstack for this thread.
- */
- ks = kmem_alloc_nofault(kernel_map,
- (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
- if (ks == 0)
- panic("pmap_new_thread: kstack allocation failed");
- if (KSTACK_GUARD_PAGES != 0) {
- tlb_page_demap(kernel_pmap, ks);
- ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
- }
- td->td_kstack = ks;
-
- /*
- * Knowing the number of pages allocated is useful when you
- * want to deallocate them.
- */
- td->td_kstack_pages = pages;
-
- for (i = 0; i < pages; i++) {
- /*
- * Get a kernel stack page.
- */
- m = vm_page_grab(ksobj, i,
- VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
- ma[i] = m;
- if (DCACHE_COLOR(ks + (i * PAGE_SIZE)) !=
- DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
- PMAP_STATS_INC(pmap_nnew_thread_oc);
-
- vm_page_lock_queues();
- vm_page_wakeup(m);
- vm_page_flag_clear(m, PG_ZERO);
- m->valid = VM_PAGE_BITS_ALL;
- vm_page_unlock_queues();
- }
-
- /*
- * Enter the page into the kernel address space.
- */
- pmap_qenter(ks, ma, pages);
-}
-
-/*
- * Dispose the kernel stack for a thread that has exited.
- * This routine directly impacts the exit perf of a process and thread.
- */
-void
-pmap_dispose_thread(struct thread *td)
-{
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int pages;
- int i;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- for (i = 0; i < pages ; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_dispose_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_busy(m);
- vm_page_unwire(m, 0);
- vm_page_free(m);
- vm_page_unlock_queues();
- }
- pmap_qremove(ks, pages);
- kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
- (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
- vm_object_deallocate(ksobj);
-}
-
-/*
- * Allow the kernel stack for a thread to be prejudicially paged out.
- */
-void
-pmap_swapout_thread(struct thread *td)
-{
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int pages;
- int i;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = (vm_offset_t)td->td_kstack;
- for (i = 0; i < pages; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_swapout_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_dirty(m);
- vm_page_unwire(m, 0);
- vm_page_unlock_queues();
- }
- pmap_qremove(ks, pages);
-}
-
-/*
- * Bring the kernel stack for a specified thread back in.
- */
-void
-pmap_swapin_thread(struct thread *td)
-{
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int rv;
- int i;
- int pages;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- for (i = 0; i < pages; i++) {
- m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
- if (m->valid != VM_PAGE_BITS_ALL) {
- rv = vm_pager_get_pages(ksobj, &m, 1, 0);
- if (rv != VM_PAGER_OK)
- panic("pmap_swapin_thread: cannot get kstack");
- m = vm_page_lookup(ksobj, i);
- m->valid = VM_PAGE_BITS_ALL;
- }
- ma[i] = m;
- vm_page_lock_queues();
- vm_page_wire(m);
- vm_page_wakeup(m);
- vm_page_unlock_queues();
- }
- pmap_qenter(ks, ma, pages);
-}
-
/*
* Initialize the pmap associated with process 0.
*/
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 36ca187..4f93704 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -129,10 +129,6 @@ void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_zero_page_idle(vm_page_t);
void pmap_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
int pmap_mincore(pmap_t pmap, vm_offset_t addr);
-void pmap_new_thread(struct thread *td, int pages);
-void pmap_dispose_thread(struct thread *td);
-void pmap_swapout_thread(struct thread *td);
-void pmap_swapin_thread(struct thread *td);
void pmap_activate(struct thread *td);
vm_offset_t pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size);
void *pmap_kenter_temporary(vm_offset_t pa, int i);
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 4718eff..25a861a 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -96,7 +96,11 @@ void vm_object_print(/* db_expr_t */ long, boolean_t, /* db_expr_t */ long,
int vm_fault_quick(caddr_t v, int prot);
void vm_proc_new(struct proc *p);
void vm_proc_dispose(struct proc *p);
-void vm_thread_new_altkstack(struct thread *td, int pages);
+void vm_thread_dispose(struct thread *td);
void vm_thread_dispose_altkstack(struct thread *td);
+void vm_thread_new(struct thread *td, int pages);
+void vm_thread_new_altkstack(struct thread *td, int pages);
+void vm_thread_swapin(struct thread *td);
+void vm_thread_swapout(struct thread *td);
#endif /* _KERNEL */
#endif /* !_VM_EXTERN_H_ */
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 2145c0c..583de20 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -387,6 +387,174 @@ retry:
}
#endif
+#ifndef KSTACK_MAX_PAGES
+#define KSTACK_MAX_PAGES 32
+#endif
+
+/*
+ * Create the kernel stack (including pcb for i386) for a new thread.
+ * This routine directly affects the fork perf for a process and
+ * create performance for a thread.
+ */
+void
+vm_thread_new(struct thread *td, int pages)
+{
+ vm_object_t ksobj;
+ vm_offset_t ks;
+ vm_page_t m, ma[KSTACK_MAX_PAGES];
+ int i;
+
+ /* Bounds check */
+ if (pages <= 1)
+ pages = KSTACK_PAGES;
+ else if (pages > KSTACK_MAX_PAGES)
+ pages = KSTACK_MAX_PAGES;
+ /*
+ * Allocate an object for the kstack.
+ */
+ ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
+ td->td_kstack_obj = ksobj;
+ /*
+ * Get a kernel virtual address for this thread's kstack.
+ */
+ ks = kmem_alloc_nofault(kernel_map,
+ (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+ if (ks == 0)
+ panic("vm_thread_new: kstack allocation failed");
+ if (KSTACK_GUARD_PAGES != 0) {
+ pmap_qremove(ks, KSTACK_GUARD_PAGES);
+ ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
+ }
+ td->td_kstack = ks;
+ /*
+ * Knowing the number of pages allocated is useful when you
+ * want to deallocate them.
+ */
+ td->td_kstack_pages = pages;
+ /*
+ * For the length of the stack, link in a real page of ram for each
+ * page of stack.
+ */
+ VM_OBJECT_LOCK(ksobj);
+ for (i = 0; i < pages; i++) {
+ /*
+ * Get a kernel stack page.
+ */
+ m = vm_page_grab(ksobj, i,
+ VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
+ ma[i] = m;
+ vm_page_lock_queues();
+ vm_page_wakeup(m);
+ m->valid = VM_PAGE_BITS_ALL;
+ vm_page_unlock_queues();
+ }
+ VM_OBJECT_UNLOCK(ksobj);
+ pmap_qenter(ks, ma, pages);
+}
+
+/*
+ * Dispose of a thread's kernel stack.
+ */
+void
+vm_thread_dispose(struct thread *td)
+{
+ vm_object_t ksobj;
+ vm_offset_t ks;
+ vm_page_t m;
+ int i, pages;
+
+ pages = td->td_kstack_pages;
+ ksobj = td->td_kstack_obj;
+ ks = td->td_kstack;
+ pmap_qremove(ks, pages);
+ VM_OBJECT_LOCK(ksobj);
+ for (i = 0; i < pages; i++) {
+ m = vm_page_lookup(ksobj, i);
+ if (m == NULL)
+ panic("vm_thread_dispose: kstack already missing?");
+ vm_page_lock_queues();
+ vm_page_busy(m);
+ vm_page_unwire(m, 0);
+ vm_page_free(m);
+ vm_page_unlock_queues();
+ }
+ VM_OBJECT_UNLOCK(ksobj);
+ vm_object_deallocate(ksobj);
+ kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
+ (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+}
+
+/*
+ * Allow a thread's kernel stack to be paged out.
+ */
+void
+vm_thread_swapout(struct thread *td)
+{
+ vm_object_t ksobj;
+ vm_page_t m;
+ int i, pages;
+
+#ifdef __alpha
+ /*
+ * Make sure we aren't fpcurthread.
+ */
+ alpha_fpstate_save(td, 1);
+#endif
+ pages = td->td_kstack_pages;
+ ksobj = td->td_kstack_obj;
+ pmap_qremove(td->td_kstack, pages);
+ VM_OBJECT_LOCK(ksobj);
+ for (i = 0; i < pages; i++) {
+ m = vm_page_lookup(ksobj, i);
+ if (m == NULL)
+ panic("vm_thread_swapout: kstack already missing?");
+ vm_page_lock_queues();
+ vm_page_dirty(m);
+ vm_page_unwire(m, 0);
+ vm_page_unlock_queues();
+ }
+ VM_OBJECT_UNLOCK(ksobj);
+}
+
+/*
+ * Bring the kernel stack for a specified thread back in.
+ */
+void
+vm_thread_swapin(struct thread *td)
+{
+ vm_object_t ksobj;
+ vm_page_t m, ma[KSTACK_MAX_PAGES];
+ int i, pages, rv;
+
+ pages = td->td_kstack_pages;
+ ksobj = td->td_kstack_obj;
+ VM_OBJECT_LOCK(ksobj);
+ for (i = 0; i < pages; i++) {
+ m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+ if (m->valid != VM_PAGE_BITS_ALL) {
+ rv = vm_pager_get_pages(ksobj, &m, 1, 0);
+ if (rv != VM_PAGER_OK)
+ panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
+ m = vm_page_lookup(ksobj, i);
+ m->valid = VM_PAGE_BITS_ALL;
+ }
+ ma[i] = m;
+ vm_page_lock_queues();
+ vm_page_wire(m);
+ vm_page_wakeup(m);
+ vm_page_unlock_queues();
+ }
+ VM_OBJECT_UNLOCK(ksobj);
+ pmap_qenter(td->td_kstack, ma, pages);
+#ifdef __alpha
+ /*
+ * The pcb may be at a different physical address now so cache the
+ * new address.
+ */
+ td->td_md.md_pcbpaddr = (void *)vtophys((vm_offset_t)td->td_pcb);
+#endif
+}
+
/*
* Set up a variable-sized alternate kstack.
*/
@@ -398,7 +566,7 @@ vm_thread_new_altkstack(struct thread *td, int pages)
td->td_altkstack_obj = td->td_kstack_obj;
td->td_altkstack_pages = td->td_kstack_pages;
- pmap_new_thread(td, pages);
+ vm_thread_new(td, pages);
}
/*
@@ -408,7 +576,7 @@ void
vm_thread_dispose_altkstack(struct thread *td)
{
- pmap_dispose_thread(td);
+ vm_thread_dispose(td);
td->td_kstack = td->td_altkstack;
td->td_kstack_obj = td->td_altkstack_obj;
@@ -572,7 +740,7 @@ faultin(p)
vm_proc_swapin(p);
FOREACH_THREAD_IN_PROC(p, td)
- pmap_swapin_thread(td);
+ vm_thread_swapin(td);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
@@ -927,7 +1095,7 @@ swapout(p)
vm_proc_swapout(p);
FOREACH_THREAD_IN_PROC(p, td)
- pmap_swapout_thread(td);
+ vm_thread_swapout(td);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
OpenPOWER on IntegriCloud