summaryrefslogtreecommitdiffstats
path: root/sys/sparc64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2003-06-14 23:23:55 +0000
committeralc <alc@FreeBSD.org>2003-06-14 23:23:55 +0000
commit83f108b04dd3280195b5f0cf6bc2af10630e5f75 (patch)
tree5b005cceb283a32d0b4cb30493fc1a788fe8a46b /sys/sparc64
parentcf4a22224f432bfbaaa9bceff6eee597501588ef (diff)
downloadFreeBSD-src-83f108b04dd3280195b5f0cf6bc2af10630e5f75.zip
FreeBSD-src-83f108b04dd3280195b5f0cf6bc2af10630e5f75.tar.gz
Migrate the thread stack management functions from the machine-dependent
to the machine-independent parts of the VM. At the same time, this introduces vm object locking for the non-i386 platforms. Two details: 1. KSTACK_GUARD has been removed in favor of KSTACK_GUARD_PAGES. The different machine-dependent implementations used various combinations of KSTACK_GUARD and KSTACK_GUARD_PAGES. To disable guard page, set KSTACK_GUARD_PAGES to 0. 2. Remove the (unnecessary) clearing of PG_ZERO in vm_thread_new. In 5.x, (but not 4.x,) PG_ZERO can only be set if VM_ALLOC_ZERO is passed to vm_page_alloc() or vm_page_grab().
Diffstat (limited to 'sys/sparc64')
-rw-r--r--sys/sparc64/include/param.h3
-rw-r--r--sys/sparc64/sparc64/pmap.c168
2 files changed, 1 insertions, 170 deletions
diff --git a/sys/sparc64/include/param.h b/sys/sparc64/include/param.h
index 94a6ef8..7ee6360 100644
--- a/sys/sparc64/include/param.h
+++ b/sys/sparc64/include/param.h
@@ -105,11 +105,10 @@
#define PAGE_MASK_MAX PAGE_MASK_4M
#define KSTACK_PAGES 4 /* pages of kernel stack (with pcb) */
+#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
#define UAREA_PAGES 1 /* pages of user area */
#define PCPU_PAGES 1
-#define KSTACK_GUARD /* compile in kstack guard page */
-#define KSTACK_GUARD_PAGES 1
/*
* Mach derived conversion macros
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 4e30859..3801f12 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -924,174 +924,6 @@ pmap_qremove(vm_offset_t sva, int count)
tlb_range_demap(kernel_pmap, sva, va);
}
-#ifndef KSTACK_MAX_PAGES
-#define KSTACK_MAX_PAGES 32
-#endif
-
-/*
- * Create the kernel stack and pcb for a new thread.
- * This routine directly affects the fork perf for a process and
- * create performance for a thread.
- */
-void
-pmap_new_thread(struct thread *td, int pages)
-{
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- u_int i;
-
- PMAP_STATS_INC(pmap_nnew_thread);
- /* Bounds check */
- if (pages <= 1)
- pages = KSTACK_PAGES;
- else if (pages > KSTACK_MAX_PAGES)
- pages = KSTACK_MAX_PAGES;
-
- /*
- * Allocate object for the kstack,
- */
- ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
- td->td_kstack_obj = ksobj;
-
- /*
- * Get a kernel virtual address for the kstack for this thread.
- */
- ks = kmem_alloc_nofault(kernel_map,
- (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
- if (ks == 0)
- panic("pmap_new_thread: kstack allocation failed");
- if (KSTACK_GUARD_PAGES != 0) {
- tlb_page_demap(kernel_pmap, ks);
- ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
- }
- td->td_kstack = ks;
-
- /*
- * Knowing the number of pages allocated is useful when you
- * want to deallocate them.
- */
- td->td_kstack_pages = pages;
-
- for (i = 0; i < pages; i++) {
- /*
- * Get a kernel stack page.
- */
- m = vm_page_grab(ksobj, i,
- VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
- ma[i] = m;
- if (DCACHE_COLOR(ks + (i * PAGE_SIZE)) !=
- DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
- PMAP_STATS_INC(pmap_nnew_thread_oc);
-
- vm_page_lock_queues();
- vm_page_wakeup(m);
- vm_page_flag_clear(m, PG_ZERO);
- m->valid = VM_PAGE_BITS_ALL;
- vm_page_unlock_queues();
- }
-
- /*
- * Enter the page into the kernel address space.
- */
- pmap_qenter(ks, ma, pages);
-}
-
-/*
- * Dispose the kernel stack for a thread that has exited.
- * This routine directly impacts the exit perf of a process and thread.
- */
-void
-pmap_dispose_thread(struct thread *td)
-{
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int pages;
- int i;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- for (i = 0; i < pages ; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_dispose_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_busy(m);
- vm_page_unwire(m, 0);
- vm_page_free(m);
- vm_page_unlock_queues();
- }
- pmap_qremove(ks, pages);
- kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
- (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
- vm_object_deallocate(ksobj);
-}
-
-/*
- * Allow the kernel stack for a thread to be prejudicially paged out.
- */
-void
-pmap_swapout_thread(struct thread *td)
-{
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int pages;
- int i;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = (vm_offset_t)td->td_kstack;
- for (i = 0; i < pages; i++) {
- m = vm_page_lookup(ksobj, i);
- if (m == NULL)
- panic("pmap_swapout_thread: kstack already missing?");
- vm_page_lock_queues();
- vm_page_dirty(m);
- vm_page_unwire(m, 0);
- vm_page_unlock_queues();
- }
- pmap_qremove(ks, pages);
-}
-
-/*
- * Bring the kernel stack for a specified thread back in.
- */
-void
-pmap_swapin_thread(struct thread *td)
-{
- vm_page_t ma[KSTACK_MAX_PAGES];
- vm_object_t ksobj;
- vm_offset_t ks;
- vm_page_t m;
- int rv;
- int i;
- int pages;
-
- pages = td->td_kstack_pages;
- ksobj = td->td_kstack_obj;
- ks = td->td_kstack;
- for (i = 0; i < pages; i++) {
- m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
- if (m->valid != VM_PAGE_BITS_ALL) {
- rv = vm_pager_get_pages(ksobj, &m, 1, 0);
- if (rv != VM_PAGER_OK)
- panic("pmap_swapin_thread: cannot get kstack");
- m = vm_page_lookup(ksobj, i);
- m->valid = VM_PAGE_BITS_ALL;
- }
- ma[i] = m;
- vm_page_lock_queues();
- vm_page_wire(m);
- vm_page_wakeup(m);
- vm_page_unlock_queues();
- }
- pmap_qenter(ks, ma, pages);
-}
-
/*
* Initialize the pmap associated with process 0.
*/
OpenPOWER on IntegriCloud