diff options
Diffstat (limited to 'sys/amd64')
-rw-r--r-- | sys/amd64/amd64/pmap.c | 170 |
1 files changed, 0 insertions, 170 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 7b673e6..9e3c6c3 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -820,176 +820,6 @@ retry: } /* - * Create the Uarea stack for a new process. - * This routine directly affects the fork perf for a process. - */ -void -pmap_new_proc(struct proc *p) -{ -#ifdef I386_CPU - int updateneeded = 0; -#endif - int i; - vm_object_t upobj; - vm_offset_t up; - vm_page_t m; - pt_entry_t *ptek, oldpte; - - /* - * allocate object for the upage - */ - upobj = p->p_upages_obj; - if (upobj == NULL) { - upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); - p->p_upages_obj = upobj; - } - - /* get a kernel virtual address for the U area for this thread */ - up = (vm_offset_t)p->p_uarea; - if (up == 0) { - up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); - if (up == 0) - panic("pmap_new_proc: upage allocation failed"); - p->p_uarea = (struct user *)up; - } - - ptek = vtopte(up); - - for (i = 0; i < UAREA_PAGES; i++) { - /* - * Get a kernel page for the uarea - */ - m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); - - /* - * Wire the page - */ - m->wire_count++; - cnt.v_wire_count++; - - /* - * Enter the page into the kernel address space. - */ - oldpte = ptek[i]; - ptek[i] = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V | pgeflag; - if (oldpte) { -#ifdef I386_CPU - updateneeded = 1; -#else - invlpg(up + i * PAGE_SIZE); -#endif - } - - vm_page_wakeup(m); - vm_page_flag_clear(m, PG_ZERO); - vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); - m->valid = VM_PAGE_BITS_ALL; - } -#ifdef I386_CPU - if (updateneeded) - invltlb(); -#endif -} - -/* - * Dispose the U-Area for a process that has exited. - * This routine directly impacts the exit perf of a process. - */ -void -pmap_dispose_proc(p) - struct proc *p; -{ - int i; - vm_object_t upobj; - vm_offset_t up; - vm_page_t m; - pt_entry_t *ptek; - - upobj = p->p_upages_obj; - up = (vm_offset_t)p->p_uarea; - ptek = vtopte(up); - for (i = 0; i < UAREA_PAGES; i++) { - m = vm_page_lookup(upobj, i); - if (m == NULL) - panic("pmap_dispose_proc: upage already missing?"); - vm_page_busy(m); - ptek[i] = 0; -#ifndef I386_CPU - invlpg(up + i * PAGE_SIZE); -#endif - vm_page_unwire(m, 0); - vm_page_free(m); - } -#ifdef I386_CPU - invltlb(); -#endif - - /* - * If the process got swapped out some of its UPAGES might have gotten - * swapped. Just get rid of the object to clean up the swap use - * proactively. NOTE! might block waiting for paging I/O to complete. - */ - if (upobj->type == OBJT_SWAP) { - p->p_upages_obj = NULL; - vm_object_deallocate(upobj); - } -} - -/* - * Allow the U_AREA for a process to be prejudicially paged out. - */ -void -pmap_swapout_proc(p) - struct proc *p; -{ - int i; - vm_object_t upobj; - vm_offset_t up; - vm_page_t m; - - upobj = p->p_upages_obj; - up = (vm_offset_t)p->p_uarea; - for (i = 0; i < UAREA_PAGES; i++) { - m = vm_page_lookup(upobj, i); - if (m == NULL) - panic("pmap_swapout_proc: upage already missing?"); - vm_page_dirty(m); - vm_page_unwire(m, 0); - pmap_kremove(up + i * PAGE_SIZE); - } -} - -/* - * Bring the U-Area for a specified process back in. - */ -void -pmap_swapin_proc(p) - struct proc *p; -{ - int i, rv; - vm_object_t upobj; - vm_offset_t up; - vm_page_t m; - - upobj = p->p_upages_obj; - up = (vm_offset_t)p->p_uarea; - for (i = 0; i < UAREA_PAGES; i++) { - m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); - pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); - if (m->valid != VM_PAGE_BITS_ALL) { - rv = vm_pager_get_pages(upobj, &m, 1, 0); - if (rv != VM_PAGER_OK) - panic("pmap_swapin_proc: cannot get upage for proc: %d\n", p->p_pid); - m = vm_page_lookup(upobj, i); - m->valid = VM_PAGE_BITS_ALL; - } - vm_page_wire(m); - vm_page_wakeup(m); - vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); - } -} - -/* * Create the kernel stack (including pcb for i386) for a new thread. * This routine directly affects the fork perf for a process and * create performance for a thread. |