summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--bin/ps/ps.13
-rw-r--r--sys/kern/kern_exec.c6
-rw-r--r--sys/kern/kern_exit.c2
-rw-r--r--sys/kern/kern_fork.c5
-rw-r--r--sys/kern/kern_proc.c139
-rw-r--r--sys/kern/kern_sig.c6
-rw-r--r--sys/kern/kern_thread.c120
-rw-r--r--sys/rpc/svc.c3
-rw-r--r--sys/sys/proc.h17
9 files changed, 246 insertions, 55 deletions
diff --git a/bin/ps/ps.1 b/bin/ps/ps.1
index 52c31ca..17bfa41 100644
--- a/bin/ps/ps.1
+++ b/bin/ps/ps.1
@@ -29,7 +29,7 @@
.\" @(#)ps.1 8.3 (Berkeley) 4/18/94
.\" $FreeBSD$
.\"
-.Dd August 7, 2014
+.Dd December 9, 2014
.Dt PS 1
.Os
.Sh NAME
@@ -332,6 +332,7 @@ the include file
.It Dv "P_SINGLE_BOUNDARY" Ta No "0x400000" Ta "Threads should suspend at user boundary"
.It Dv "P_HWPMC" Ta No "0x800000" Ta "Process is using HWPMCs"
.It Dv "P_JAILED" Ta No "0x1000000" Ta "Process is in jail"
+.It Dv "P_TOTAL_STOP" Ta No "0x2000000" Ta "Stopped for system suspend"
.It Dv "P_INEXEC" Ta No "0x4000000" Ta "Process is in execve()"
.It Dv "P_STATCHILD" Ta No "0x8000000" Ta "Child process stopped or exited"
.It Dv "P_INMEM" Ta No "0x10000000" Ta "Loaded into memory"
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 039fdd0..e56831d 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -292,7 +292,7 @@ kern_execve(td, args, mac_p)
args->endp - args->begin_envv);
if (p->p_flag & P_HADTHREADS) {
PROC_LOCK(p);
- if (thread_single(SINGLE_BOUNDARY)) {
+ if (thread_single(p, SINGLE_BOUNDARY)) {
PROC_UNLOCK(p);
exec_free_args(args);
return (ERESTART); /* Try again later. */
@@ -311,9 +311,9 @@ kern_execve(td, args, mac_p)
* force other threads to suicide.
*/
if (error == 0)
- thread_single(SINGLE_EXIT);
+ thread_single(p, SINGLE_EXIT);
else
- thread_single_end();
+ thread_single_end(p, SINGLE_BOUNDARY);
PROC_UNLOCK(p);
}
if ((td->td_pflags & TDP_EXECVMSPC) != 0) {
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 72290d5..3f2b13a 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -208,7 +208,7 @@ exit1(struct thread *td, int rv)
* re-check all suspension request, the thread should
* either be suspended there or exit.
*/
- if (!thread_single(SINGLE_EXIT))
+ if (!thread_single(p, SINGLE_EXIT))
/*
* All other activity in this process is now
* stopped. Threading support has been turned
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 4277ffa..d3a7741 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -323,7 +323,7 @@ fork_norfproc(struct thread *td, int flags)
if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
(flags & (RFCFDG | RFFDG))) {
PROC_LOCK(p1);
- if (thread_single(SINGLE_BOUNDARY)) {
+ if (thread_single(p1, SINGLE_BOUNDARY)) {
PROC_UNLOCK(p1);
return (ERESTART);
}
@@ -354,7 +354,7 @@ fail:
if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
(flags & (RFCFDG | RFFDG))) {
PROC_LOCK(p1);
- thread_single_end();
+ thread_single_end(p1, SINGLE_BOUNDARY);
PROC_UNLOCK(p1);
}
return (error);
@@ -390,6 +390,7 @@ do_fork(struct thread *td, int flags, struct proc *p2, struct thread *td2,
p2->p_pid = trypid;
AUDIT_ARG_PID(p2->p_pid);
LIST_INSERT_HEAD(&allproc, p2, p_list);
+ allproc_gen++;
LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
tidhash_add(td2);
PROC_LOCK(p2);
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index bdee5f4..d0b7b1c 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -2854,3 +2854,142 @@ static SYSCTL_NODE(_kern_proc, KERN_PROC_OSREL, osrel, CTLFLAG_RW |
static SYSCTL_NODE(_kern_proc, KERN_PROC_SIGTRAMP, sigtramp, CTLFLAG_RD |
CTLFLAG_MPSAFE, sysctl_kern_proc_sigtramp,
"Process signal trampoline location");
+
+int allproc_gen;
+
+void
+stop_all_proc(void)
+{
+ struct proc *cp, *p;
+ int r, gen;
+ bool restart, seen_stopped, seen_exiting, stopped_some;
+
+ cp = curproc;
+ /*
+ * stop_all_proc() assumes that all process which have
+ * usermode must be stopped, except current process, for
+ * obvious reasons. Since other threads in the process
+ * establishing global stop could unstop something, disable
+ * calls from multithreaded processes as precaution. The
+ * service must not be user-callable anyway.
+ */
+ KASSERT((cp->p_flag & P_HADTHREADS) == 0 ||
+ (cp->p_flag & P_KTHREAD) != 0, ("mt stop_all_proc"));
+
+allproc_loop:
+ sx_xlock(&allproc_lock);
+ gen = allproc_gen;
+ seen_exiting = seen_stopped = stopped_some = restart = false;
+ LIST_REMOVE(cp, p_list);
+ LIST_INSERT_HEAD(&allproc, cp, p_list);
+ for (;;) {
+ p = LIST_NEXT(cp, p_list);
+ if (p == NULL)
+ break;
+ LIST_REMOVE(cp, p_list);
+ LIST_INSERT_AFTER(p, cp, p_list);
+ PROC_LOCK(p);
+ if ((p->p_flag & (P_KTHREAD | P_SYSTEM |
+ P_TOTAL_STOP)) != 0) {
+ PROC_UNLOCK(p);
+ continue;
+ }
+ if ((p->p_flag & P_WEXIT) != 0) {
+ seen_exiting = true;
+ PROC_UNLOCK(p);
+ continue;
+ }
+ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
+ /*
+ * Stopped processes are tolerated when there
+ * are no other processes which might continue
+ * them. P_STOPPED_SINGLE but not
+ * P_TOTAL_STOP process still has at least one
+ * thread running.
+ */
+ seen_stopped = true;
+ PROC_UNLOCK(p);
+ continue;
+ }
+ _PHOLD(p);
+ sx_xunlock(&allproc_lock);
+ r = thread_single(p, SINGLE_ALLPROC);
+ if (r != 0)
+ restart = true;
+ else
+ stopped_some = true;
+ _PRELE(p);
+ PROC_UNLOCK(p);
+ sx_xlock(&allproc_lock);
+ }
+ /* Catch forked children we did not see in iteration. */
+ if (gen != allproc_gen)
+ restart = true;
+ sx_xunlock(&allproc_lock);
+ if (restart || stopped_some || seen_exiting || seen_stopped) {
+ kern_yield(PRI_USER);
+ goto allproc_loop;
+ }
+}
+
+void
+resume_all_proc(void)
+{
+ struct proc *cp, *p;
+
+ cp = curproc;
+ sx_xlock(&allproc_lock);
+ LIST_REMOVE(cp, p_list);
+ LIST_INSERT_HEAD(&allproc, cp, p_list);
+ for (;;) {
+ p = LIST_NEXT(cp, p_list);
+ if (p == NULL)
+ break;
+ LIST_REMOVE(cp, p_list);
+ LIST_INSERT_AFTER(p, cp, p_list);
+ PROC_LOCK(p);
+ if ((p->p_flag & P_TOTAL_STOP) != 0) {
+ sx_xunlock(&allproc_lock);
+ _PHOLD(p);
+ thread_single_end(p, SINGLE_ALLPROC);
+ _PRELE(p);
+ PROC_UNLOCK(p);
+ sx_xlock(&allproc_lock);
+ } else {
+ PROC_UNLOCK(p);
+ }
+ }
+ sx_xunlock(&allproc_lock);
+}
+
+#define TOTAL_STOP_DEBUG 1
+#ifdef TOTAL_STOP_DEBUG
+volatile static int ap_resume;
+#include <sys/mount.h>
+
+static int
+sysctl_debug_stop_all_proc(SYSCTL_HANDLER_ARGS)
+{
+ int error, val;
+
+ val = 0;
+ ap_resume = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (val != 0) {
+ stop_all_proc();
+ syncer_suspend();
+ while (ap_resume == 0)
+ ;
+ syncer_resume();
+ resume_all_proc();
+ }
+ return (0);
+}
+
+SYSCTL_PROC(_debug, OID_AUTO, stop_all_proc, CTLTYPE_INT | CTLFLAG_RW |
+ CTLFLAG_MPSAFE, __DEVOLATILE(int *, &ap_resume), 0,
+ sysctl_debug_stop_all_proc, "I",
+ "");
+#endif
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index 10406e0..2951ee8 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -2500,7 +2500,7 @@ ptracestop(struct thread *td, int sig)
cv_broadcast(&p->p_dbgwait);
}
stopme:
- thread_suspend_switch(td);
+ thread_suspend_switch(td, p);
if (p->p_xthread == td)
p->p_xthread = NULL;
if (!(p->p_flag & P_TRACED))
@@ -2761,7 +2761,7 @@ issignal(struct thread *td)
p->p_xstat = sig;
PROC_SLOCK(p);
sig_suspend_threads(td, p, 0);
- thread_suspend_switch(td);
+ thread_suspend_switch(td, p);
PROC_SUNLOCK(p);
mtx_lock(&ps->ps_mtx);
break;
@@ -2942,7 +2942,7 @@ sigexit(td, sig)
* XXX If another thread attempts to single-thread before us
* (e.g. via fork()), we won't get a dump at all.
*/
- if ((sigprop(sig) & SA_CORE) && (thread_single(SINGLE_NO_EXIT) == 0)) {
+ if ((sigprop(sig) & SA_CORE) && thread_single(p, SINGLE_NO_EXIT) == 0) {
p->p_sig = sig;
/*
* Log signals which would cause core dumps
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 280cb04..62d92b0 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -446,7 +446,7 @@ thread_exit(void)
if (p->p_numthreads == p->p_suspcount) {
thread_lock(p->p_singlethread);
wakeup_swapper = thread_unsuspend_one(
- p->p_singlethread);
+ p->p_singlethread, p);
thread_unlock(p->p_singlethread);
if (wakeup_swapper)
kick_proc0();
@@ -575,7 +575,7 @@ calc_remaining(struct proc *p, int mode)
remaining = p->p_numthreads;
else if (mode == SINGLE_BOUNDARY)
remaining = p->p_numthreads - p->p_boundary_count;
- else if (mode == SINGLE_NO_EXIT)
+ else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
remaining = p->p_numthreads - p->p_suspcount;
else
panic("calc_remaining: wrong mode %d", mode);
@@ -586,7 +586,7 @@ static int
remain_for_mode(int mode)
{
- return (1);
+ return (mode == SINGLE_ALLPROC ? 0 : 1);
}
static int
@@ -602,22 +602,43 @@ weed_inhib(int mode, struct thread *td2, struct proc *p)
switch (mode) {
case SINGLE_EXIT:
if (TD_IS_SUSPENDED(td2))
- wakeup_swapper |= thread_unsuspend_one(td2);
+ wakeup_swapper |= thread_unsuspend_one(td2, p);
if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
wakeup_swapper |= sleepq_abort(td2, EINTR);
break;
case SINGLE_BOUNDARY:
if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
- wakeup_swapper |= thread_unsuspend_one(td2);
+ wakeup_swapper |= thread_unsuspend_one(td2, p);
if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
wakeup_swapper |= sleepq_abort(td2, ERESTART);
break;
case SINGLE_NO_EXIT:
if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
- wakeup_swapper |= thread_unsuspend_one(td2);
+ wakeup_swapper |= thread_unsuspend_one(td2, p);
if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
wakeup_swapper |= sleepq_abort(td2, ERESTART);
break;
+ case SINGLE_ALLPROC:
+ /*
+ * ALLPROC suspend tries to avoid spurious EINTR for
+ * threads sleeping interruptable, by suspending the
+ * thread directly, similarly to sig_suspend_threads().
+ * Since such sleep is not performed at the user
+ * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
+ * is used to avoid immediate un-suspend.
+ */
+ if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
+ TDF_ALLPROCSUSP)) == 0)
+ wakeup_swapper |= thread_unsuspend_one(td2, p);
+ if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) {
+ if ((td2->td_flags & TDF_SBDRY) == 0) {
+ thread_suspend_one(td2);
+ td2->td_flags |= TDF_ALLPROCSUSP;
+ } else {
+ wakeup_swapper |= sleepq_abort(td2, ERESTART);
+ }
+ }
+ break;
}
return (wakeup_swapper);
}
@@ -636,19 +657,29 @@ weed_inhib(int mode, struct thread *td2, struct proc *p)
* any sleeping threads that are interruptable. (PCATCH).
*/
int
-thread_single(int mode)
+thread_single(struct proc *p, int mode)
{
struct thread *td;
struct thread *td2;
- struct proc *p;
int remaining, wakeup_swapper;
td = curthread;
- p = td->td_proc;
+ KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
+ mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
+ ("invalid mode %d", mode));
+ /*
+ * If allowing non-ALLPROC singlethreading for non-curproc
+ * callers, calc_remaining() and remain_for_mode() should be
+ * adjusted to also account for td->td_proc != p. For now
+ * this is not implemented because it is not used.
+ */
+ KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
+ (mode != SINGLE_ALLPROC && td->td_proc == p),
+ ("mode %d proc %p curproc %p", mode, p, td->td_proc));
mtx_assert(&Giant, MA_NOTOWNED);
PROC_LOCK_ASSERT(p, MA_OWNED);
- if ((p->p_flag & P_HADTHREADS) == 0)
+ if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
return (0);
/* Is someone already single threading? */
@@ -665,6 +696,8 @@ thread_single(int mode)
else
p->p_flag &= ~P_SINGLE_BOUNDARY;
}
+ if (mode == SINGLE_ALLPROC)
+ p->p_flag |= P_TOTAL_STOP;
p->p_flag |= P_STOPPED_SINGLE;
PROC_SLOCK(p);
p->p_singlethread = td;
@@ -678,13 +711,13 @@ thread_single(int mode)
continue;
thread_lock(td2);
td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
- if (TD_IS_INHIBITED(td2))
+ if (TD_IS_INHIBITED(td2)) {
wakeup_swapper |= weed_inhib(mode, td2, p);
#ifdef SMP
- else if (TD_IS_RUNNING(td2) && td != td2) {
+ } else if (TD_IS_RUNNING(td2) && td != td2) {
forward_signal(td2);
- }
#endif
+ }
thread_unlock(td2);
}
if (wakeup_swapper)
@@ -702,7 +735,7 @@ stopme:
* Wake us up when everyone else has suspended.
* In the mean time we suspend as well.
*/
- thread_suspend_switch(td);
+ thread_suspend_switch(td, p);
remaining = calc_remaining(p, mode);
}
if (mode == SINGLE_EXIT) {
@@ -812,8 +845,9 @@ thread_suspend_check(int return_instead)
* Ignore suspend requests for stop signals if they
* are deferred.
*/
- if (P_SHOULDSTOP(p) == P_STOPPED_SIG &&
- td->td_flags & TDF_SBDRY) {
+ if ((P_SHOULDSTOP(p) == P_STOPPED_SIG ||
+ (p->p_flag & P_TOTAL_STOP) != 0) &&
+ (td->td_flags & TDF_SBDRY) != 0) {
KASSERT(return_instead,
("TDF_SBDRY set for unsafe thread_suspend_check"));
return (0);
@@ -840,7 +874,7 @@ thread_suspend_check(int return_instead)
if (p->p_numthreads == p->p_suspcount + 1) {
thread_lock(p->p_singlethread);
wakeup_swapper =
- thread_unsuspend_one(p->p_singlethread);
+ thread_unsuspend_one(p->p_singlethread, p);
thread_unlock(p->p_singlethread);
if (wakeup_swapper)
kick_proc0();
@@ -873,11 +907,9 @@ thread_suspend_check(int return_instead)
}
void
-thread_suspend_switch(struct thread *td)
+thread_suspend_switch(struct thread *td, struct proc *p)
{
- struct proc *p;
- p = td->td_proc;
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
PROC_LOCK_ASSERT(p, MA_OWNED);
PROC_SLOCK_ASSERT(p, MA_OWNED);
@@ -885,8 +917,10 @@ thread_suspend_switch(struct thread *td)
* We implement thread_suspend_one in stages here to avoid
* dropping the proc lock while the thread lock is owned.
*/
- thread_stopped(p);
- p->p_suspcount++;
+ if (p == td->td_proc) {
+ thread_stopped(p);
+ p->p_suspcount++;
+ }
PROC_UNLOCK(p);
thread_lock(td);
td->td_flags &= ~TDF_NEEDSUSPCHK;
@@ -904,8 +938,9 @@ thread_suspend_switch(struct thread *td)
void
thread_suspend_one(struct thread *td)
{
- struct proc *p = td->td_proc;
+ struct proc *p;
+ p = td->td_proc;
PROC_SLOCK_ASSERT(p, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
@@ -916,15 +951,17 @@ thread_suspend_one(struct thread *td)
}
int
-thread_unsuspend_one(struct thread *td)
+thread_unsuspend_one(struct thread *td, struct proc *p)
{
- struct proc *p = td->td_proc;
- PROC_SLOCK_ASSERT(p, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
TD_CLR_SUSPENDED(td);
- p->p_suspcount--;
+ td->td_flags &= ~TDF_ALLPROCSUSP;
+ if (td->td_proc == p) {
+ PROC_SLOCK_ASSERT(p, MA_OWNED);
+ p->p_suspcount--;
+ }
return (setrunnable(td));
}
@@ -944,7 +981,7 @@ thread_unsuspend(struct proc *p)
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
if (TD_IS_SUSPENDED(td)) {
- wakeup_swapper |= thread_unsuspend_one(td);
+ wakeup_swapper |= thread_unsuspend_one(td, p);
}
thread_unlock(td);
}
@@ -955,9 +992,12 @@ thread_unsuspend(struct proc *p)
* threading request. Now we've downgraded to single-threaded,
* let it continue.
*/
- thread_lock(p->p_singlethread);
- wakeup_swapper = thread_unsuspend_one(p->p_singlethread);
- thread_unlock(p->p_singlethread);
+ if (p->p_singlethread->td_proc == p) {
+ thread_lock(p->p_singlethread);
+ wakeup_swapper = thread_unsuspend_one(
+ p->p_singlethread, p);
+ thread_unlock(p->p_singlethread);
+ }
}
if (wakeup_swapper)
kick_proc0();
@@ -967,15 +1007,20 @@ thread_unsuspend(struct proc *p)
* End the single threading mode..
*/
void
-thread_single_end(void)
+thread_single_end(struct proc *p, int mode)
{
struct thread *td;
- struct proc *p;
int wakeup_swapper;
- p = curproc;
+ KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
+ mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
+ ("invalid mode %d", mode));
PROC_LOCK_ASSERT(p, MA_OWNED);
- p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
+ KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
+ (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
+ ("mode %d does not match P_TOTAL_STOP", mode));
+ p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
+ P_TOTAL_STOP);
PROC_SLOCK(p);
p->p_singlethread = NULL;
wakeup_swapper = 0;
@@ -985,12 +1030,11 @@ thread_single_end(void)
* on the process. The single threader must be allowed
* to continue however as this is a bad place to stop.
*/
- if (p->p_numthreads != remain_for_mode(SINGLE_EXIT) &&
- !P_SHOULDSTOP(p)) {
+ if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
if (TD_IS_SUSPENDED(td)) {
- wakeup_swapper |= thread_unsuspend_one(td);
+ wakeup_swapper |= thread_unsuspend_one(td, p);
}
thread_unlock(td);
}
diff --git a/sys/rpc/svc.c b/sys/rpc/svc.c
index 75d9d6e..d2bd378 100644
--- a/sys/rpc/svc.c
+++ b/sys/rpc/svc.c
@@ -1190,7 +1190,8 @@ svc_run_internal(SVCGROUP *grp, bool_t ismaster)
mtx_unlock(&grp->sg_lock);
p = curproc;
PROC_LOCK(p);
- if (P_SHOULDSTOP(p)) {
+ if (P_SHOULDSTOP(p) ||
+ (p->p_flag & P_TOTAL_STOP) != 0) {
thread_suspend_check(0);
PROC_UNLOCK(p);
mtx_lock(&grp->sg_lock);
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 891d296..c397672 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -355,7 +355,7 @@ do { \
#define TDF_CANSWAP 0x00000040 /* Thread can be swapped. */
#define TDF_SLEEPABORT 0x00000080 /* sleepq_abort was called. */
#define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */
-#define TDF_UNUSED09 0x00000200 /* --available-- */
+#define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */
#define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */
#define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */
#define TDF_TIMOFAIL 0x00001000 /* Timeout from sleep after we were awake. */
@@ -629,7 +629,7 @@ struct proc {
#define P_SINGLE_BOUNDARY 0x400000 /* Threads should suspend at user boundary. */
#define P_HWPMC 0x800000 /* Process is using HWPMCs */
#define P_JAILED 0x1000000 /* Process is in jail. */
-#define P_UNUSED1 0x2000000
+#define P_TOTAL_STOP 0x2000000 /* Stopped in proc_stop_total. */
#define P_INEXEC 0x4000000 /* Process is in execve(). */
#define P_STATCHILD 0x8000000 /* Child process stopped or exited. */
#define P_INMEM 0x10000000 /* Loaded into memory. */
@@ -690,6 +690,7 @@ struct proc {
#define SINGLE_NO_EXIT 0
#define SINGLE_EXIT 1
#define SINGLE_BOUNDARY 2
+#define SINGLE_ALLPROC 3
#ifdef MALLOC_DECLARE
MALLOC_DECLARE(M_PARGS);
@@ -816,6 +817,7 @@ extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl;
extern u_long pgrphash;
extern struct sx allproc_lock;
+extern int allproc_gen;
extern struct sx proctree_lock;
extern struct mtx ppeers_lock;
extern struct proc proc0; /* Process slot for swapper. */
@@ -939,8 +941,8 @@ void thread_exit(void) __dead2;
void thread_free(struct thread *td);
void thread_link(struct thread *td, struct proc *p);
void thread_reap(void);
-int thread_single(int how);
-void thread_single_end(void);
+int thread_single(struct proc *p, int how);
+void thread_single_end(struct proc *p, int how);
void thread_stash(struct thread *td);
void thread_stopped(struct proc *p);
void childproc_stopped(struct proc *child, int reason);
@@ -948,14 +950,17 @@ void childproc_continued(struct proc *child);
void childproc_exited(struct proc *child);
int thread_suspend_check(int how);
bool thread_suspend_check_needed(void);
-void thread_suspend_switch(struct thread *);
+void thread_suspend_switch(struct thread *, struct proc *p);
void thread_suspend_one(struct thread *td);
void thread_unlink(struct thread *td);
void thread_unsuspend(struct proc *p);
-int thread_unsuspend_one(struct thread *td);
+int thread_unsuspend_one(struct thread *td, struct proc *p);
void thread_wait(struct proc *p);
struct thread *thread_find(struct proc *p, lwpid_t tid);
+void stop_all_proc(void);
+void resume_all_proc(void);
+
static __inline int
curthread_pflags_set(int flags)
{
OpenPOWER on IntegriCloud