summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2007-11-05 11:36:16 +0000
committerkib <kib@FreeBSD.org>2007-11-05 11:36:16 +0000
commit9ae733819b7cdf0eef51ba1b18d39feb136a9dbf (patch)
treecccec36134d2cab5ce1eabb67dcaab5981e9beb0 /sys/vm
parentf3f033b9b90ea07350fbe7483af3d9636cb31d1d (diff)
downloadFreeBSD-src-9ae733819b7cdf0eef51ba1b18d39feb136a9dbf.zip
FreeBSD-src-9ae733819b7cdf0eef51ba1b18d39feb136a9dbf.tar.gz
Fix for the panic("vm_thread_new: kstack allocation failed") and
silent NULL pointer dereference in the i386 and sparc64 pmap_pinit() when the kmem_alloc_nofault() failed to allocate address space. Both functions now return error instead of panicing or dereferencing NULL. As consequence, vmspace_exec() and vmspace_unshare() returns the errno int. struct vmspace arg was added to vm_forkproc() to avoid dealing with failed allocation when most of the fork1() job is already done. The kernel stack for the thread is now set up in the thread_alloc(), that itself may return NULL. Also, allocation of the first process thread is performed in the fork1() to properly deal with stack allocation failure. proc_linkup() is separated into proc_linkup() called from fork1(), and proc_linkup0(), that is used to set up the kernel process (was known as swapper). In collaboration with: Peter Holm Reviewed by: jhb
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/pmap.h2
-rw-r--r--sys/vm/vm_extern.h10
-rw-r--r--sys/vm/vm_glue.c33
-rw-r--r--sys/vm/vm_map.c29
4 files changed, 51 insertions, 23 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 44c6a82..3d9045f 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -114,7 +114,7 @@ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size);
boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
void pmap_page_init(vm_page_t m);
-void pmap_pinit(pmap_t);
+int pmap_pinit(pmap_t);
void pmap_pinit0(pmap_t);
void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
void pmap_qenter(vm_offset_t, vm_page_t *, int);
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 31f6e23..27580bd 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -70,14 +70,14 @@ int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int);
void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t);
void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
-void vm_forkproc(struct thread *, struct proc *, struct thread *, int);
+int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace *, int);
void vm_waitproc(struct proc *);
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);
void vm_set_page_size(void);
struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t);
struct vmspace *vmspace_fork(struct vmspace *);
-void vmspace_exec(struct proc *, vm_offset_t, vm_offset_t);
-void vmspace_unshare(struct proc *);
+int vmspace_exec(struct proc *, vm_offset_t, vm_offset_t);
+int vmspace_unshare(struct proc *);
void vmspace_exit(struct thread *);
struct vmspace *vmspace_acquire_ref(struct proc *);
void vmspace_free(struct vmspace *);
@@ -92,8 +92,8 @@ struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset);
void vm_imgact_unmap_page(struct sf_buf *sf);
void vm_thread_dispose(struct thread *td);
void vm_thread_dispose_altkstack(struct thread *td);
-void vm_thread_new(struct thread *td, int pages);
-void vm_thread_new_altkstack(struct thread *td, int pages);
+int vm_thread_new(struct thread *td, int pages);
+int vm_thread_new_altkstack(struct thread *td, int pages);
void vm_thread_swapin(struct thread *td);
void vm_thread_swapout(struct thread *td);
#endif /* _KERNEL */
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index caa8aa0..258b886 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -321,7 +321,7 @@ vm_imgact_unmap_page(struct sf_buf *sf)
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
-void
+int
vm_thread_new(struct thread *td, int pages)
{
vm_object_t ksobj;
@@ -338,18 +338,22 @@ vm_thread_new(struct thread *td, int pages)
* Allocate an object for the kstack.
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
- td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for this thread's kstack.
*/
ks = kmem_alloc_nofault(kernel_map,
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
- if (ks == 0)
- panic("vm_thread_new: kstack allocation failed");
+ if (ks == 0) {
+ printf("vm_thread_new: kstack allocation failed\n");
+ vm_object_deallocate(ksobj);
+ return (0);
+ }
+
if (KSTACK_GUARD_PAGES != 0) {
pmap_qremove(ks, KSTACK_GUARD_PAGES);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
}
+ td->td_kstack_obj = ksobj;
td->td_kstack = ks;
/*
* Knowing the number of pages allocated is useful when you
@@ -372,6 +376,7 @@ vm_thread_new(struct thread *td, int pages)
}
VM_OBJECT_UNLOCK(ksobj);
pmap_qenter(ks, ma, pages);
+ return (1);
}
/*
@@ -403,6 +408,7 @@ vm_thread_dispose(struct thread *td)
vm_object_deallocate(ksobj);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+ td->td_kstack = 0;
}
/*
@@ -468,7 +474,7 @@ vm_thread_swapin(struct thread *td)
/*
* Set up a variable-sized alternate kstack.
*/
-void
+int
vm_thread_new_altkstack(struct thread *td, int pages)
{
@@ -476,7 +482,7 @@ vm_thread_new_altkstack(struct thread *td, int pages)
td->td_altkstack_obj = td->td_kstack_obj;
td->td_altkstack_pages = td->td_kstack_pages;
- vm_thread_new(td, pages);
+ return (vm_thread_new(td, pages));
}
/*
@@ -504,14 +510,16 @@ vm_thread_dispose_altkstack(struct thread *td)
* ready to run. The new process is set up so that it returns directly
* to user mode to avoid stack copying and relocation problems.
*/
-void
-vm_forkproc(td, p2, td2, flags)
+int
+vm_forkproc(td, p2, td2, vm2, flags)
struct thread *td;
struct proc *p2;
struct thread *td2;
+ struct vmspace *vm2;
int flags;
{
struct proc *p1 = td->td_proc;
+ int error;
if ((flags & RFPROC) == 0) {
/*
@@ -521,11 +529,13 @@ vm_forkproc(td, p2, td2, flags)
*/
if ((flags & RFMEM) == 0) {
if (p1->p_vmspace->vm_refcnt > 1) {
- vmspace_unshare(p1);
+ error = vmspace_unshare(p1);
+ if (error)
+ return (error);
}
}
cpu_fork(td, p2, td2, flags);
- return;
+ return (0);
}
if (flags & RFMEM) {
@@ -538,7 +548,7 @@ vm_forkproc(td, p2, td2, flags)
}
if ((flags & RFMEM) == 0) {
- p2->p_vmspace = vmspace_fork(p1->p_vmspace);
+ p2->p_vmspace = vm2;
if (p1->p_vmspace->vm_shm)
shmfork(p1, p2);
}
@@ -548,6 +558,7 @@ vm_forkproc(td, p2, td2, flags)
* and make the child ready to run.
*/
cpu_fork(td, p2, td2, flags);
+ return (0);
}
/*
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 4b31fb3..96e7411 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -197,7 +197,6 @@ vmspace_zfini(void *mem, int size)
struct vmspace *vm;
vm = (struct vmspace *)mem;
- pmap_release(vmspace_pmap(vm));
vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
}
@@ -208,8 +207,8 @@ vmspace_zinit(void *mem, int size, int flags)
vm = (struct vmspace *)mem;
+ vm->vm_map.pmap = NULL;
(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
- pmap_pinit(vmspace_pmap(vm));
return (0);
}
@@ -272,6 +271,10 @@ vmspace_alloc(min, max)
struct vmspace *vm;
vm = uma_zalloc(vmspace_zone, M_WAITOK);
+ if (vm->vm_map.pmap == NULL && !pmap_pinit(vmspace_pmap(vm))) {
+ uma_zfree(vmspace_zone, vm);
+ return (NULL);
+ }
CTR1(KTR_VM, "vmspace_alloc: %p", vm);
_vm_map_init(&vm->vm_map, min, max);
vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */
@@ -321,6 +324,12 @@ vmspace_dofree(struct vmspace *vm)
(void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
vm->vm_map.max_offset);
+ /*
+ * XXX Comment out the pmap_release call for now. The
+ * vmspace_zone is marked as UMA_ZONE_NOFREE, and bugs cause
+ * pmap.resident_count to be != 0 on exit sometimes.
+ */
+/* pmap_release(vmspace_pmap(vm)); */
uma_zfree(vmspace_zone, vm);
}
@@ -2584,6 +2593,8 @@ vmspace_fork(struct vmspace *vm1)
vm_map_lock(old_map);
vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
+ if (vm2 == NULL)
+ goto unlock_and_return;
vm2->vm_taddr = vm1->vm_taddr;
vm2->vm_daddr = vm1->vm_daddr;
vm2->vm_maxsaddr = vm1->vm_maxsaddr;
@@ -2675,7 +2686,7 @@ vmspace_fork(struct vmspace *vm1)
}
old_entry = old_entry->next;
}
-
+unlock_and_return:
vm_map_unlock(old_map);
return (vm2);
@@ -3003,13 +3014,15 @@ Retry:
* Unshare the specified VM space for exec. If other processes are
* mapped to it, then create a new one. The new vmspace is null.
*/
-void
+int
vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
{
struct vmspace *oldvmspace = p->p_vmspace;
struct vmspace *newvmspace;
newvmspace = vmspace_alloc(minuser, maxuser);
+ if (newvmspace == NULL)
+ return (ENOMEM);
newvmspace->vm_swrss = oldvmspace->vm_swrss;
/*
* This code is written like this for prototype purposes. The
@@ -3024,27 +3037,31 @@ vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
if (p == curthread->td_proc) /* XXXKSE ? */
pmap_activate(curthread);
vmspace_free(oldvmspace);
+ return (0);
}
/*
* Unshare the specified VM space for forcing COW. This
* is called by rfork, for the (RFMEM|RFPROC) == 0 case.
*/
-void
+int
vmspace_unshare(struct proc *p)
{
struct vmspace *oldvmspace = p->p_vmspace;
struct vmspace *newvmspace;
if (oldvmspace->vm_refcnt == 1)
- return;
+ return (0);
newvmspace = vmspace_fork(oldvmspace);
+ if (newvmspace == NULL)
+ return (ENOMEM);
PROC_VMSPACE_LOCK(p);
p->p_vmspace = newvmspace;
PROC_VMSPACE_UNLOCK(p);
if (p == curthread->td_proc) /* XXXKSE ? */
pmap_activate(curthread);
vmspace_free(oldvmspace);
+ return (0);
}
/*
OpenPOWER on IntegriCloud