summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authordavidxu <davidxu@FreeBSD.org>2005-11-08 09:09:26 +0000
committerdavidxu <davidxu@FreeBSD.org>2005-11-08 09:09:26 +0000
commit37bb48367998c0be2f8c72d721837dad4be5ae7b (patch)
tree13693dc8f2ef07d2346275020e67f9f5c512abc9 /sys/kern
parent9349a52f4187301776450d07f77fbf8af30d7c87 (diff)
downloadFreeBSD-src-37bb48367998c0be2f8c72d721837dad4be5ae7b.zip
FreeBSD-src-37bb48367998c0be2f8c72d721837dad4be5ae7b.tar.gz
Add support for queueing SIGCHLD same as other UNIX systems did.
For each child process whose status has been changed, a SIGCHLD instance is queued, if the signal is stilling pending, and process changed status several times, signal information is updated to reflect latest process status. If wait() returns because the status of a child process is available, pending SIGCHLD signal associated with the child process is discarded. Any other pending SIGCHLD signals remain pending. The signal information is allocated at the same time when proc structure is allocated, if process signal queue is fully filled or there is a memory shortage, it can still send the signal to process. There is a booting time tunable kern.sigqueue.queue_sigchild which can control the behavior, setting it to zero disables the SIGCHLD queueing feature, the tunable will be removed if the function is proved that it is stable enough. Tested on: i386 (SMP and UP)
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_exit.c28
-rw-r--r--sys/kern/kern_proc.c4
-rw-r--r--sys/kern/kern_sig.c97
-rw-r--r--sys/kern/kern_thread.c15
4 files changed, 131 insertions, 13 deletions
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 6107a00..191bb7c 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -173,6 +173,11 @@ retry:
}
p->p_flag |= P_WEXIT;
+
+ PROC_LOCK(p->p_pptr);
+ sigqueue_take(p->p_ksi);
+ PROC_UNLOCK(p->p_pptr);
+
PROC_UNLOCK(p);
/* Are we a task leader? */
@@ -480,8 +485,12 @@ retry:
if (p->p_pptr == initproc)
psignal(p->p_pptr, SIGCHLD);
- else if (p->p_sigparent != 0)
- psignal(p->p_pptr, p->p_sigparent);
+ else if (p->p_sigparent != 0) {
+ if (p->p_sigparent == SIGCHLD)
+ childproc_exited(p);
+ else /* LINUX thread */
+ psignal(p->p_pptr, p->p_sigparent);
+ }
PROC_UNLOCK(p->p_pptr);
/*
@@ -659,6 +668,10 @@ loop:
calcru(p, &rusage->ru_utime, &rusage->ru_stime);
}
+ PROC_LOCK(q);
+ sigqueue_take(p->p_ksi);
+ PROC_UNLOCK(q);
+
/*
* If we got the child via a ptrace 'attach',
* we need to give it back to the old parent.
@@ -669,7 +682,7 @@ loop:
p->p_oppid = 0;
proc_reparent(p, t);
PROC_UNLOCK(p);
- psignal(t, SIGCHLD);
+ tdsignal(t, NULL, SIGCHLD, p->p_ksi);
wakeup(t);
PROC_UNLOCK(t);
sx_xunlock(&proctree_lock);
@@ -751,6 +764,11 @@ loop:
if (status)
*status = W_STOPCODE(p->p_xstat);
PROC_UNLOCK(p);
+
+ PROC_LOCK(q);
+ sigqueue_take(p->p_ksi);
+ PROC_UNLOCK(q);
+
return (0);
}
mtx_unlock_spin(&sched_lock);
@@ -760,6 +778,10 @@ loop:
p->p_flag &= ~P_CONTINUED;
PROC_UNLOCK(p);
+ PROC_LOCK(q);
+ sigqueue_take(p->p_ksi);
+ PROC_UNLOCK(q);
+
if (status)
*status = SIGCONT;
return (0);
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 5caef58..43f1c0e 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -165,6 +165,8 @@ proc_dtor(void *mem, int size, void *arg)
*/
if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
vm_thread_dispose_altkstack(td);
+ if (p->p_ksi != NULL)
+ KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue"));
}
/*
@@ -204,6 +206,8 @@ proc_fini(void *mem, int size)
ksegrp_free(FIRST_KSEGRP_IN_PROC(p));
thread_free(FIRST_THREAD_IN_PROC(p));
mtx_destroy(&p->p_mtx);
+ if (p->p_ksi != NULL)
+ ksiginfo_free(p->p_ksi);
#else
panic("proc reclaimed");
#endif
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index e422df4..8efd21d 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -214,10 +214,15 @@ sigqueue_start(void)
}
ksiginfo_t *
-ksiginfo_alloc(void)
+ksiginfo_alloc(int wait)
{
+ int flags;
+
+ flags = M_ZERO;
+ if (! wait)
+ flags |= M_NOWAIT;
if (ksiginfo_zone != NULL)
- return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, M_NOWAIT | M_ZERO));
+ return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags));
return (NULL);
}
@@ -291,7 +296,7 @@ sigqueue_take(ksiginfo_t *ksi)
struct proc *p;
sigqueue_t *sq;
- if ((sq = ksi->ksi_sigq) == NULL)
+ if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
return;
p = sq->sq_proc;
@@ -331,10 +336,10 @@ sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
if (__predict_false(ksiginfo_zone == NULL))
goto out_set_bit;
- if (p != NULL && p->p_pendingcnt > max_pending_per_proc) {
+ if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
signal_overflow++;
ret = EAGAIN;
- } else if ((ksi = ksiginfo_alloc()) == NULL) {
+ } else if ((ksi = ksiginfo_alloc(0)) == NULL) {
signal_alloc_fail++;
ret = EAGAIN;
} else {
@@ -2106,7 +2111,12 @@ do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
return (ret);
}
sigqueue_delete_proc(p, SIGCONT);
- p->p_flag &= ~P_CONTINUED;
+ if (p->p_flag & P_CONTINUED) {
+ p->p_flag &= ~P_CONTINUED;
+ PROC_LOCK(p->p_pptr);
+ sigqueue_take(p->p_ksi);
+ PROC_UNLOCK(p->p_pptr);
+ }
}
ret = sigqueue_add(sigqueue, sig, ksi);
@@ -2174,7 +2184,10 @@ do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
* Otherwise, process goes back to sleep state.
*/
p->p_flag &= ~P_STOPPED_SIG;
- p->p_flag |= P_CONTINUED;
+ if (p->p_numthreads == p->p_suspcount) {
+ p->p_flag |= P_CONTINUED;
+ childproc_continued(p);
+ }
if (action == SIG_DFL) {
sigqueue_delete(sigqueue, sig);
} else if (action == SIG_CATCH) {
@@ -2249,12 +2262,19 @@ do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
(td0->td_flags & TDF_SINTR) &&
!TD_IS_SUSPENDED(td0)) {
thread_suspend_one(td0);
- } else if (td != td0) {
+ } else {
td0->td_flags |= TDF_ASTPENDING;
}
}
- thread_stopped(p);
if (p->p_numthreads == p->p_suspcount) {
+ /*
+ * only thread sending signal to another
+ * process can reach here, if thread is sending
+ * signal to its process, because thread does
+ * not suspend itself here, p_numthreads
+ * should never be equal to p_suspcount.
+ */
+ thread_stopped(p);
mtx_unlock_spin(&sched_lock);
sigqueue_delete_proc(p, p->p_xstat);
} else
@@ -2646,7 +2666,9 @@ thread_stopped(struct proc *p)
mtx_lock(&ps->ps_mtx);
if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
mtx_unlock(&ps->ps_mtx);
- psignal(p->p_pptr, SIGCHLD);
+ childproc_stopped(p,
+ (p->p_flag & P_TRACED) ?
+ CLD_TRAPPED : CLD_STOPPED);
} else
mtx_unlock(&ps->ps_mtx);
PROC_UNLOCK(p->p_pptr);
@@ -2826,6 +2848,61 @@ sigexit(td, sig)
/* NOTREACHED */
}
+/*
+ * Send queued SIGCHLD to parent when child process is stopped
+ * or exited.
+ */
+void
+childproc_stopped(struct proc *p, int reason)
+{
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+ PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
+
+ if (p->p_ksi != NULL) {
+ p->p_ksi->ksi_signo = SIGCHLD;
+ p->p_ksi->ksi_code = reason;
+ p->p_ksi->ksi_status = p->p_xstat;
+ p->p_ksi->ksi_pid = p->p_pid;
+ p->p_ksi->ksi_uid = p->p_ucred->cr_ruid;
+ if (KSI_ONQ(p->p_ksi))
+ return;
+ }
+ tdsignal(p->p_pptr, NULL, SIGCHLD, p->p_ksi);
+}
+
+void
+childproc_continued(struct proc *p)
+{
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+ PROC_LOCK_ASSERT(p->p_pptr, MA_NOTOWNED);
+
+ PROC_LOCK(p->p_pptr);
+ if (p->p_ksi != NULL) {
+ p->p_ksi->ksi_signo = SIGCHLD;
+ p->p_ksi->ksi_code = CLD_CONTINUED;
+ p->p_ksi->ksi_status = SIGCONT;
+ p->p_ksi->ksi_pid = p->p_pid;
+ p->p_ksi->ksi_uid = p->p_ucred->cr_ruid;
+ if (KSI_ONQ(p->p_ksi))
+ return;
+ }
+ tdsignal(p->p_pptr, NULL, SIGCHLD, p->p_ksi);
+ PROC_UNLOCK(p->p_pptr);
+}
+
+void
+childproc_exited(struct proc *p)
+{
+ int reason;
+
+ reason = CLD_EXITED;
+ if (WCOREDUMP(p->p_xstat))
+ reason = CLD_DUMPED;
+ else if (WIFSIGNALED(p->p_xstat))
+ reason = CLD_KILLED;
+ childproc_stopped(p, reason);
+}
+
static char corefilename[MAXPATHLEN] = {"%N.core"};
SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
sizeof(corefilename), "process corefile name format string");
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 12be17b..a4ff105 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -78,6 +78,12 @@ TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
struct mtx kse_zombie_lock;
MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
+static int queue_sigchild = 0;
+SYSCTL_DECL(_kern_sigqueue);
+SYSCTL_INT(_kern_sigqueue, OID_AUTO, queue_sigchild, CTLFLAG_RD,
+ &queue_sigchild, 0, "queue SIGCHILD");
+TUNABLE_INT("kern.sigqueue.queue_sigchild", &queue_sigchild);
+
static int
sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
{
@@ -278,6 +284,15 @@ proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td)
TAILQ_INIT(&p->p_threads); /* all threads in proc */
TAILQ_INIT(&p->p_suspended); /* Threads suspended */
sigqueue_init(&p->p_sigqueue, p);
+ if (queue_sigchild) {
+ p->p_ksi = ksiginfo_alloc(1);
+ if (p->p_ksi != NULL) {
+ /* p_ksi may be null if ksiginfo zone is not ready */
+ p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
+ }
+ }
+ else
+ p->p_ksi = NULL;
p->p_numksegrps = 0;
p->p_numthreads = 0;
OpenPOWER on IntegriCloud