diff options
author | kib <kib@FreeBSD.org> | 2007-11-05 11:36:16 +0000 |
---|---|---|
committer | kib <kib@FreeBSD.org> | 2007-11-05 11:36:16 +0000 |
commit | 9ae733819b7cdf0eef51ba1b18d39feb136a9dbf (patch) | |
tree | cccec36134d2cab5ce1eabb67dcaab5981e9beb0 /sys/kern | |
parent | f3f033b9b90ea07350fbe7483af3d9636cb31d1d (diff) | |
download | FreeBSD-src-9ae733819b7cdf0eef51ba1b18d39feb136a9dbf.zip FreeBSD-src-9ae733819b7cdf0eef51ba1b18d39feb136a9dbf.tar.gz |
Fix for the panic("vm_thread_new: kstack allocation failed") and
silent NULL pointer dereference in the i386 and sparc64 pmap_pinit()
when the kmem_alloc_nofault() failed to allocate address space. Both
functions now return error instead of panicing or dereferencing NULL.
As consequence, vmspace_exec() and vmspace_unshare() returns the errno
int. struct vmspace arg was added to vm_forkproc() to avoid dealing
with failed allocation when most of the fork1() job is already done.
The kernel stack for the thread is now set up in the thread_alloc(),
that itself may return NULL. Also, allocation of the first process
thread is performed in the fork1() to properly deal with stack
allocation failure. proc_linkup() is separated into proc_linkup()
called from fork1(), and proc_linkup0(), that is used to set up the
kernel process (was known as swapper).
In collaboration with: Peter Holm
Reviewed by: jhb
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/imgact_aout.c | 4 | ||||
-rw-r--r-- | sys/kern/imgact_elf.c | 4 | ||||
-rw-r--r-- | sys/kern/imgact_gzip.c | 6 | ||||
-rw-r--r-- | sys/kern/kern_exec.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_fork.c | 41 | ||||
-rw-r--r-- | sys/kern/kern_kse.c | 43 | ||||
-rw-r--r-- | sys/kern/kern_proc.c | 28 | ||||
-rw-r--r-- | sys/kern/kern_sig.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_thr.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_thread.c | 27 |
10 files changed, 116 insertions, 45 deletions
diff --git a/sys/kern/imgact_aout.c b/sys/kern/imgact_aout.c index 7f202d9..45b39c2 100644 --- a/sys/kern/imgact_aout.c +++ b/sys/kern/imgact_aout.c @@ -198,9 +198,11 @@ exec_aout_imgact(imgp) /* * Destroy old process VM and create a new one (with a new stack) */ - exec_new_vmspace(imgp, &aout_sysvec); + error = exec_new_vmspace(imgp, &aout_sysvec); vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); + if (error) + return (error); /* * The vm space can be changed by exec_new_vmspace diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index b992f2b..619be4c 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -666,10 +666,12 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) */ VOP_UNLOCK(imgp->vp, 0, td); - exec_new_vmspace(imgp, sv); + error = exec_new_vmspace(imgp, sv); imgp->proc->p_sysent = sv; vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); + if (error) + return (error); vmspace = imgp->proc->p_vmspace; diff --git a/sys/kern/imgact_gzip.c b/sys/kern/imgact_gzip.c index 60a0f6c..3cd1dab 100644 --- a/sys/kern/imgact_gzip.c +++ b/sys/kern/imgact_gzip.c @@ -239,9 +239,13 @@ do_aout_hdr(struct imgact_gzip * gz) /* * Destroy old process VM and create a new one (with a new stack) */ - exec_new_vmspace(gz->ip, &aout_sysvec); + error = exec_new_vmspace(gz->ip, &aout_sysvec); vn_lock(gz->ip->vp, LK_EXCLUSIVE | LK_RETRY, td); + if (error) { + gz->where = __LINE__; + return (error); + } vmspace = gz->ip->proc->p_vmspace; diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index d2798db..80ef703 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -914,7 +914,9 @@ exec_new_vmspace(imgp, sv) pmap_remove_pages(vmspace_pmap(vmspace)); vm_map_remove(map, vm_map_min(map), vm_map_max(map)); } else { - vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser); + error = vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser); + if (error) + return (error); vmspace = p->p_vmspace; map = &vmspace->vm_map; } diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 917de57..ab7ca8b 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -195,6 +195,7 @@ fork1(td, flags, pages, procp) struct filedesc_to_leader *fdtol; struct thread *td2; struct sigacts *newsigacts; + struct vmspace *vm2; int error; /* Can't copy and clear. */ @@ -218,7 +219,9 @@ fork1(td, flags, pages, procp) PROC_UNLOCK(p1); } - vm_forkproc(td, NULL, NULL, flags); + error = vm_forkproc(td, NULL, NULL, NULL, flags); + if (error) + goto norfproc_fail; /* * Close all file descriptors. @@ -236,6 +239,7 @@ fork1(td, flags, pages, procp) if (flags & RFFDG) fdunshare(p1, td); +norfproc_fail: if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) && (flags & (RFCFDG | RFFDG))) { PROC_LOCK(p1); @@ -243,7 +247,7 @@ fork1(td, flags, pages, procp) PROC_UNLOCK(p1); } *procp = NULL; - return (0); + return (error); } /* @@ -254,6 +258,32 @@ fork1(td, flags, pages, procp) /* Allocate new proc. */ newproc = uma_zalloc(proc_zone, M_WAITOK); + if (TAILQ_EMPTY(&newproc->p_threads)) { + td2 = thread_alloc(); + if (td2 == NULL) { + error = ENOMEM; + goto fail1; + } + proc_linkup(newproc, td2); + sched_newproc(newproc, td2); + } else + td2 = FIRST_THREAD_IN_PROC(newproc); + + /* Allocate and switch to an alternate kstack if specified. */ + if (pages != 0) { + if (!vm_thread_new_altkstack(td2, pages)) { + error = ENOMEM; + goto fail1; + } + } + if ((flags & RFMEM) == 0) { + vm2 = vmspace_fork(p1->p_vmspace); + if (vm2 == NULL) { + error = ENOMEM; + goto fail1; + } + } else + vm2 = NULL; #ifdef MAC mac_proc_init(newproc); #endif @@ -380,7 +410,6 @@ again: lastpid = trypid; p2 = newproc; - td2 = FIRST_THREAD_IN_PROC(newproc); p2->p_state = PRS_NEW; /* protect against others */ p2->p_pid = trypid; /* @@ -456,9 +485,6 @@ again: * Start by zeroing the section of proc that is zero-initialized, * then copy the section that is copied directly from the parent. */ - /* Allocate and switch to an alternate kstack if specified. */ - if (pages != 0) - vm_thread_new_altkstack(td2, pages); PROC_LOCK(p2); PROC_LOCK(p1); @@ -630,7 +656,7 @@ again: * Finish creating the child process. It will return via a different * execution path later. (ie: directly into user mode) */ - vm_forkproc(td, p2, td2, flags); + vm_forkproc(td, p2, td2, vm2, flags); if (flags == (RFFDG | RFPROC)) { PCPU_INC(cnt.v_forks); @@ -713,6 +739,7 @@ fail: #ifdef MAC mac_proc_destroy(newproc); #endif +fail1: uma_zfree(proc_zone, newproc); pause("fork", hz / 2); return (error); diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c index 4174bde..b044605 100644 --- a/sys/kern/kern_kse.c +++ b/sys/kern/kern_kse.c @@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$"); #include <sys/sched.h> #include <sys/signalvar.h> #include <sys/sleepqueue.h> +#include <sys/syslog.h> #include <sys/kse.h> #include <sys/ktr.h> #include <vm/uma.h> @@ -64,7 +65,7 @@ TAILQ_HEAD(, kse_upcall) zombie_upcalls = TAILQ_HEAD_INITIALIZER(zombie_upcalls); static int thread_update_usr_ticks(struct thread *td); -static void thread_alloc_spare(struct thread *td); +static int thread_alloc_spare(struct thread *td); static struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku); static struct kse_upcall *upcall_alloc(void); @@ -648,6 +649,16 @@ kse_create(struct thread *td, struct kse_create_args *uap) PROC_UNLOCK(p); } + /* + * For the first call this may not have been set. + * Of course nor may it actually be needed. + * thread_schedule_upcall() will look for it. + */ + if (td->td_standin == NULL) { + if (!thread_alloc_spare(td)) + return (ENOMEM); + } + /* * Even bound LWPs get a mailbox and an upcall to hold it. * XXX This should change. @@ -657,13 +668,6 @@ kse_create(struct thread *td, struct kse_create_args *uap) newku->ku_func = mbx.km_func; bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); - /* - * For the first call this may not have been set. - * Of course nor may it actually be needed. - * thread_schedule_upcall() will look for it. - */ - if (td->td_standin == NULL) - thread_alloc_spare(td); PROC_LOCK(p); PROC_SLOCK(p); /* @@ -989,20 +993,23 @@ error: * XXX BUG.. we need to get the cr ref after the thread has * checked and chenged its own, not 6 months before... */ -void +int thread_alloc_spare(struct thread *td) { struct thread *spare; if (td->td_standin) - return; + return (1); spare = thread_alloc(); + if (spare == NULL) + return (0); td->td_standin = spare; bzero(&spare->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); spare->td_proc = td->td_proc; spare->td_ucred = crhold(td->td_ucred); spare->td_flags = TDF_INMEM; + return (1); } /* @@ -1170,8 +1177,18 @@ thread_user_enter(struct thread *td) KASSERT(ku->ku_owner == td, ("wrong owner")); KASSERT(!TD_CAN_UNBIND(td), ("can unbind")); - if (td->td_standin == NULL) - thread_alloc_spare(td); + if (td->td_standin == NULL) { + if (!thread_alloc_spare(td)) { + PROC_LOCK(p); + if (kern_logsigexit) + log(LOG_INFO, + "pid %d (%s), uid %d: thread_alloc_spare failed\n", + p->p_pid, p->p_comm, + td->td_ucred ? td->td_ucred->cr_uid : -1); + sigexit(td, SIGSEGV); /* XXX ? */ + /* panic("thread_user_enter: thread_alloc_spare failed"); */ + } + } ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags); tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); if ((tmbx == NULL) || (tmbx == (void *)-1L) || @@ -1385,7 +1402,7 @@ out: * for when we re-enter the kernel. */ if (td->td_standin == NULL) - thread_alloc_spare(td); + thread_alloc_spare(td); /* XXX care of failure ? */ } ku->ku_mflags = 0; diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index f75112f..71e2b10 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -145,20 +145,21 @@ proc_dtor(void *mem, int size, void *arg) /* INVARIANTS checks go here */ p = (struct proc *)mem; td = FIRST_THREAD_IN_PROC(p); + if (td != NULL) { #ifdef INVARIANTS - KASSERT((p->p_numthreads == 1), - ("bad number of threads in exiting process")); - KASSERT((td != NULL), ("proc_dtor: bad thread pointer")); - KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr")); + KASSERT((p->p_numthreads == 1), + ("bad number of threads in exiting process")); + KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr")); #endif - /* Dispose of an alternate kstack, if it exists. - * XXX What if there are more than one thread in the proc? - * The first thread in the proc is special and not - * freed, so you gotta do this here. - */ - if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0)) - vm_thread_dispose_altkstack(td); + /* Dispose of an alternate kstack, if it exists. + * XXX What if there are more than one thread in the proc? + * The first thread in the proc is special and not + * freed, so you gotta do this here. + */ + if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0)) + vm_thread_dispose_altkstack(td); + } if (p->p_ksi != NULL) KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue")); } @@ -170,17 +171,14 @@ static int proc_init(void *mem, int size, int flags) { struct proc *p; - struct thread *td; p = (struct proc *)mem; p->p_sched = (struct p_sched *)&p[1]; - td = thread_alloc(); bzero(&p->p_mtx, sizeof(struct mtx)); mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); mtx_init(&p->p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE); + TAILQ_INIT(&p->p_threads); /* all threads in proc */ p->p_stats = pstats_alloc(); - proc_linkup(p, td); - sched_newproc(p, td); return (0); } diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index 3b213a6..239c420 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -102,7 +102,7 @@ static uma_zone_t ksiginfo_zone = NULL; struct filterops sig_filtops = { 0, filt_sigattach, filt_sigdetach, filt_signal }; -static int kern_logsigexit = 1; +int kern_logsigexit = 1; SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW, &kern_logsigexit, 0, "Log processes quitting on abnormal signals to syslog(3)"); diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c index de45d0a..6bf0ded 100644 --- a/sys/kern/kern_thr.c +++ b/sys/kern/kern_thr.c @@ -175,6 +175,8 @@ create_thread(struct thread *td, mcontext_t *ctx, /* Initialize our td */ newtd = thread_alloc(); + if (newtd == NULL) + return (ENOMEM); /* * Try the copyout as soon as we allocate the td so we don't diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 2a28823..cde764f 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -181,13 +181,12 @@ thread_init(void *mem, int size, int flags) td = (struct thread *)mem; - vm_thread_new(td, 0); - cpu_thread_setup(td); td->td_sleepqueue = sleepq_alloc(); td->td_turnstile = turnstile_alloc(); td->td_sched = (struct td_sched *)&td[1]; sched_newthread(td); umtx_thread_init(td); + td->td_kstack = 0; return (0); } @@ -203,7 +202,6 @@ thread_fini(void *mem, int size) turnstile_free(td->td_turnstile); sleepq_free(td->td_sleepqueue); umtx_thread_fini(td); - vm_thread_dispose(td); } /* @@ -215,10 +213,16 @@ thread_fini(void *mem, int size) * proc_init() */ void +proc_linkup0(struct proc *p, struct thread *td) +{ + TAILQ_INIT(&p->p_threads); /* all threads in proc */ + proc_linkup(p, td); +} + +void proc_linkup(struct proc *p, struct thread *td) { - TAILQ_INIT(&p->p_threads); /* all threads in proc */ #ifdef KSE TAILQ_INIT(&p->p_upcalls); /* upcall list */ #endif @@ -310,9 +314,18 @@ thread_reap(void) struct thread * thread_alloc(void) { + struct thread *td; thread_reap(); /* check if any zombies to get */ - return (uma_zalloc(thread_zone, M_WAITOK)); + + td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); + KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); + if (!vm_thread_new(td, 0)) { + uma_zfree(thread_zone, td); + return (NULL); + } + cpu_thread_setup(td); + return (td); } @@ -324,6 +337,10 @@ thread_free(struct thread *td) { cpu_thread_clean(td); + if (td->td_altkstack != 0) + vm_thread_dispose_altkstack(td); + if (td->td_kstack != 0) + vm_thread_dispose(td); uma_zfree(thread_zone, td); } |