summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2003-06-14 23:23:55 +0000
committeralc <alc@FreeBSD.org>2003-06-14 23:23:55 +0000
commit83f108b04dd3280195b5f0cf6bc2af10630e5f75 (patch)
tree5b005cceb283a32d0b4cb30493fc1a788fe8a46b /sys/amd64
parentcf4a22224f432bfbaaa9bceff6eee597501588ef (diff)
downloadFreeBSD-src-83f108b04dd3280195b5f0cf6bc2af10630e5f75.zip
FreeBSD-src-83f108b04dd3280195b5f0cf6bc2af10630e5f75.tar.gz
Migrate the thread stack management functions from the machine-dependent
to the machine-independent parts of the VM. At the same time, this introduces vm object locking for the non-i386 platforms. Two details: 1. KSTACK_GUARD has been removed in favor of KSTACK_GUARD_PAGES. The different machine-dependent implementations used various combinations of KSTACK_GUARD and KSTACK_GUARD_PAGES. To disable guard page, set KSTACK_GUARD_PAGES to 0. 2. Remove the (unnecessary) clearing of PG_ZERO in vm_thread_new. In 5.x, (but not 4.x,) PG_ZERO can only be set if VM_ALLOC_ZERO is passed to vm_page_alloc() or vm_page_grab().
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c176
-rw-r--r--sys/amd64/include/param.h2
2 files changed, 1 insertions, 177 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 38a5f5b..3e26cfa 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -883,182 +883,6 @@ retry:
return m;
}
-#ifndef KSTACK_MAX_PAGES
-#define KSTACK_MAX_PAGES 32
-#endif
-
-/*
- * Create the kernel stack (including pcb for amd64) for a new thread.
- * This routine directly affects the fork perf for a process and
- * create performance for a thread.
- */
-void
-pmap_new_thread(struct thread *td, int pages)
-{
- int i;
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_page_t m;
- vm_offset_t ks;
-
- /* Bounds check */
- if (pages <= 1)
- pages = KSTACK_PAGES;
- else if (pages > KSTACK_MAX_PAGES)
- pages = KSTACK_MAX_PAGES;
-
- /*
- * allocate object for the kstack
- */
- ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
- td->td_kstack_obj = ksobj;
-
- /* get a kernel virtual address for the kstack for this thread */
-#ifdef KSTACK_GUARD
- ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE);
- if (ks == 0)
- panic("pmap_new_thread: kstack allocation failed");
- if (*vtopte(ks) != 0)
- pmap_qremove(ks, 1);
- ks += PAGE_SIZE;
- td->td_kstack = ks;
-#else
- /* get a kernel virtual address for the kstack for this thread */
- ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE);
- if (ks == 0)
- panic("pmap_new_thread: kstack allocation failed");
- td->td_kstack = ks;
-#endif
- /*
- * Knowing the number of pages allocated is useful when you
- * want to deallocate them.
- */
- td->td_kstack_pages = pages;
-
- /*
- * For the length of the stack, link in a real page of ram for each
- * page of stack.
- */
- for (i = 0; i < pages; i++) {
- /*
- * Get a kernel stack page
- */
- m = vm_page_grab(ksobj, i,
- VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
- ma[i] = m;
-
- vm_page_lock_queues();
- vm_page_wakeup(m);
- vm_page_flag_clear(m, PG_ZERO);
- m->valid = VM_PAGE_BITS_ALL;
- vm_page_unlock_queues();
- }
- pmap_qenter(ks, ma, pages);
-}
-
-/*
- * Dispose the kernel stack for a thread that has exited.
- * This routine directly impacts the exit perf of a process and thread.
- */
-void
-pmap_dispose_thread(td)
- struct thread *td;
-{
- int i;
- int pages;
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- pmap_qremove(ks, pages);
- for (i = 0; i < pages; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_dispose_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_busy(m);
- vm_page_unwire(m, 0);
- vm_page_free(m);
- vm_page_unlock_queues();
- }
- /*
- * Free the space that this stack was mapped to in the kernel
- * address map.
- */
-#ifdef KSTACK_GUARD
- kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE);
-#else
- kmem_free(kernel_map, ks, pages * PAGE_SIZE);
-#endif
- vm_object_deallocate(ksobj);
-}
-
-/*
- * Allow the Kernel stack for a thread to be prejudicially paged out.
- */
-void
-pmap_swapout_thread(td)
- struct thread *td;
-{
- int i;
- int pages;
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- pmap_qremove(ks, pages);
- for (i = 0; i < pages; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_swapout_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_dirty(m);
- vm_page_unwire(m, 0);
- vm_page_unlock_queues();
- }
-}
-
-/*
- * Bring the kernel stack for a specified thread back in.
- */
-void
-pmap_swapin_thread(td)
- struct thread *td;
-{
- int i, rv;
- int pages;
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- for (i = 0; i < pages; i++) {
- m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
- if (m->valid != VM_PAGE_BITS_ALL) {
- rv = vm_pager_get_pages(ksobj, &m, 1, 0);
- if (rv != VM_PAGER_OK)
- panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid);
- m = vm_page_lookup(ksobj, i);
- m->valid = VM_PAGE_BITS_ALL;
- }
- ma[i] = m;
- vm_page_lock_queues();
- vm_page_wire(m);
- vm_page_wakeup(m);
- vm_page_unlock_queues();
- }
- pmap_qenter(ks, ma, pages);
-}
-
/***************************************************
* Page table page management routines.....
***************************************************/
diff --git a/sys/amd64/include/param.h b/sys/amd64/include/param.h
index c1dccb9..6a86a62 100644
--- a/sys/amd64/include/param.h
+++ b/sys/amd64/include/param.h
@@ -118,9 +118,9 @@
#define IOPAGES 2 /* pages of i/o permission bitmap */
#define KSTACK_PAGES 4 /* pages of kstack (with pcb) */
+#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
#define UAREA_PAGES 1 /* holds struct user WITHOUT PCB (see def.) */
-#define KSTACK_GUARD 1 /* compile in the kstack guard page */
/*
* Ceiling on amount of swblock kva space, can be changed via
OpenPOWER on IntegriCloud