diff options
author | alc <alc@FreeBSD.org> | 2003-06-14 23:23:55 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2003-06-14 23:23:55 +0000 |
commit | 83f108b04dd3280195b5f0cf6bc2af10630e5f75 (patch) | |
tree | 5b005cceb283a32d0b4cb30493fc1a788fe8a46b /sys/i386 | |
parent | cf4a22224f432bfbaaa9bceff6eee597501588ef (diff) | |
download | FreeBSD-src-83f108b04dd3280195b5f0cf6bc2af10630e5f75.zip FreeBSD-src-83f108b04dd3280195b5f0cf6bc2af10630e5f75.tar.gz |
Migrate the thread stack management functions from the machine-dependent
to the machine-independent parts of the VM. At the same time, this
introduces vm object locking for the non-i386 platforms.
Two details:
1. KSTACK_GUARD has been removed in favor of KSTACK_GUARD_PAGES. The
different machine-dependent implementations used various combinations
of KSTACK_GUARD and KSTACK_GUARD_PAGES. To disable guard page, set
KSTACK_GUARD_PAGES to 0.
2. Remove the (unnecessary) clearing of PG_ZERO in vm_thread_new. In
5.x, (but not 4.x,) PG_ZERO can only be set if VM_ALLOC_ZERO is passed
to vm_page_alloc() or vm_page_grab().
Diffstat (limited to 'sys/i386')
-rw-r--r-- | sys/i386/i386/pmap.c | 184 | ||||
-rw-r--r-- | sys/i386/include/param.h | 3 |
2 files changed, 1 insertions, 186 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index c9a910e..c9dc0af 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -982,190 +982,6 @@ retry: return m; } -#ifndef KSTACK_MAX_PAGES -#define KSTACK_MAX_PAGES 32 -#endif - -/* - * Create the kernel stack (including pcb for i386) for a new thread. - * This routine directly affects the fork perf for a process and - * create performance for a thread. - */ -void -pmap_new_thread(struct thread *td, int pages) -{ - int i; - vm_page_t ma[KSTACK_MAX_PAGES]; - vm_object_t ksobj; - vm_page_t m; - vm_offset_t ks; - - /* Bounds check */ - if (pages <= 1) - pages = KSTACK_PAGES; - else if (pages > KSTACK_MAX_PAGES) - pages = KSTACK_MAX_PAGES; - - /* - * allocate object for the kstack - */ - ksobj = vm_object_allocate(OBJT_DEFAULT, pages); - td->td_kstack_obj = ksobj; - - /* get a kernel virtual address for the kstack for this thread */ -#ifdef KSTACK_GUARD - ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE); - if (ks == 0) - panic("pmap_new_thread: kstack allocation failed"); - if (*vtopte(ks) != 0) - pmap_qremove(ks, 1); - ks += PAGE_SIZE; - td->td_kstack = ks; -#else - /* get a kernel virtual address for the kstack for this thread */ - ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE); - if (ks == 0) - panic("pmap_new_thread: kstack allocation failed"); - td->td_kstack = ks; -#endif - /* - * Knowing the number of pages allocated is useful when you - * want to deallocate them. - */ - td->td_kstack_pages = pages; - - /* - * For the length of the stack, link in a real page of ram for each - * page of stack. - */ - VM_OBJECT_LOCK(ksobj); - for (i = 0; i < pages; i++) { - /* - * Get a kernel stack page - */ - m = vm_page_grab(ksobj, i, - VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); - ma[i] = m; - - vm_page_lock_queues(); - vm_page_wakeup(m); - vm_page_flag_clear(m, PG_ZERO); - m->valid = VM_PAGE_BITS_ALL; - vm_page_unlock_queues(); - } - VM_OBJECT_UNLOCK(ksobj); - pmap_qenter(ks, ma, pages); -} - -/* - * Dispose the kernel stack for a thread that has exited. - * This routine directly impacts the exit perf of a process and thread. - */ -void -pmap_dispose_thread(td) - struct thread *td; -{ - int i; - int pages; - vm_object_t ksobj; - vm_offset_t ks; - vm_page_t m; - - pages = td->td_kstack_pages; - ksobj = td->td_kstack_obj; - ks = td->td_kstack; - pmap_qremove(ks, pages); - VM_OBJECT_LOCK(ksobj); - for (i = 0; i < pages; i++) { - m = vm_page_lookup(ksobj, i); - if (m == NULL) - panic("pmap_dispose_thread: kstack already missing?"); - vm_page_lock_queues(); - vm_page_busy(m); - vm_page_unwire(m, 0); - vm_page_free(m); - vm_page_unlock_queues(); - } - VM_OBJECT_UNLOCK(ksobj); - /* - * Free the space that this stack was mapped to in the kernel - * address map. - */ -#ifdef KSTACK_GUARD - kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE); -#else - kmem_free(kernel_map, ks, pages * PAGE_SIZE); -#endif - vm_object_deallocate(ksobj); -} - -/* - * Allow the Kernel stack for a thread to be prejudicially paged out. - */ -void -pmap_swapout_thread(td) - struct thread *td; -{ - int i; - int pages; - vm_object_t ksobj; - vm_offset_t ks; - vm_page_t m; - - pages = td->td_kstack_pages; - ksobj = td->td_kstack_obj; - ks = td->td_kstack; - pmap_qremove(ks, pages); - VM_OBJECT_LOCK(ksobj); - for (i = 0; i < pages; i++) { - m = vm_page_lookup(ksobj, i); - if (m == NULL) - panic("pmap_swapout_thread: kstack already missing?"); - vm_page_lock_queues(); - vm_page_dirty(m); - vm_page_unwire(m, 0); - vm_page_unlock_queues(); - } - VM_OBJECT_UNLOCK(ksobj); -} - -/* - * Bring the kernel stack for a specified thread back in. - */ -void -pmap_swapin_thread(td) - struct thread *td; -{ - int i, rv; - int pages; - vm_page_t ma[KSTACK_MAX_PAGES]; - vm_object_t ksobj; - vm_offset_t ks; - vm_page_t m; - - pages = td->td_kstack_pages; - ksobj = td->td_kstack_obj; - ks = td->td_kstack; - VM_OBJECT_LOCK(ksobj); - for (i = 0; i < pages; i++) { - m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); - if (m->valid != VM_PAGE_BITS_ALL) { - rv = vm_pager_get_pages(ksobj, &m, 1, 0); - if (rv != VM_PAGER_OK) - panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid); - m = vm_page_lookup(ksobj, i); - m->valid = VM_PAGE_BITS_ALL; - } - ma[i] = m; - vm_page_lock_queues(); - vm_page_wire(m); - vm_page_wakeup(m); - vm_page_unlock_queues(); - } - VM_OBJECT_UNLOCK(ksobj); - pmap_qenter(ks, ma, pages); -} - /*************************************************** * Page table page management routines..... ***************************************************/ diff --git a/sys/i386/include/param.h b/sys/i386/include/param.h index c3b9c59..e76c52a 100644 --- a/sys/i386/include/param.h +++ b/sys/i386/include/param.h @@ -106,10 +106,9 @@ #ifndef KSTACK_PAGES #define KSTACK_PAGES 2 /* Includes pcb! */ #endif +#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */ #define UAREA_PAGES 1 /* holds struct user WITHOUT PCB (see def.) */ -#define KSTACK_GUARD /* compile in the kstack guard page */ - /* * Ceiling on amount of swblock kva space, can be changed via * the kern.maxswzone /boot/loader.conf variable. |