summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-09-17 05:31:39 +0000
committerjeff <jeff@FreeBSD.org>2007-09-17 05:31:39 +0000
commit3fc0f8b973fdc2f392a3234d653e09d71e2aab98 (patch)
treee572a7b77f24339d2dc6ebf6b83c1e2c1ce0f6a5
parent74666fdfce0e06484d5f82e8fced0c16c06477d1 (diff)
downloadFreeBSD-src-3fc0f8b973fdc2f392a3234d653e09d71e2aab98.zip
FreeBSD-src-3fc0f8b973fdc2f392a3234d653e09d71e2aab98.tar.gz
- Move all of the PS_ flags into either p_flag or td_flags.
- p_sflag was mostly protected by PROC_LOCK rather than the PROC_SLOCK or previously the sched_lock. These bugs have existed for some time. - Allow swapout to try each thread in a process individually and then swapin the whole process if any of these fail. This allows us to move most scheduler related swap flags into td_flags. - Keep ki_sflag for backwards compat but change all in source tools to use the new and more correct location of P_INMEM. Reported by: pho Reviewed by: attilio, kib Approved by: re (kensmith)
-rw-r--r--bin/ps/print.c9
-rw-r--r--bin/ps/ps.c4
-rw-r--r--lib/libkvm/kvm_proc.c4
-rw-r--r--sys/amd64/amd64/genassym.c1
-rw-r--r--sys/ddb/db_ps.c2
-rw-r--r--sys/fs/procfs/procfs_status.c8
-rw-r--r--sys/i386/i386/genassym.c1
-rw-r--r--sys/i386/linux/linux_ptrace.c4
-rw-r--r--sys/kern/init_main.c9
-rw-r--r--sys/kern/kern_clock.c19
-rw-r--r--sys/kern/kern_fork.c6
-rw-r--r--sys/kern/kern_kse.c2
-rw-r--r--sys/kern/kern_proc.c7
-rw-r--r--sys/kern/kern_switch.c4
-rw-r--r--sys/kern/kern_synch.c16
-rw-r--r--sys/kern/kern_thread.c2
-rw-r--r--sys/kern/sched_4bsd.c16
-rw-r--r--sys/kern/sched_ule.c4
-rw-r--r--sys/kern/subr_kdb.c6
-rw-r--r--sys/kern/subr_trap.c23
-rw-r--r--sys/kern/sys_process.c2
-rw-r--r--sys/security/mac_lomac/mac_lomac.c3
-rw-r--r--sys/sparc64/sparc64/genassym.c1
-rw-r--r--sys/sys/proc.h21
-rw-r--r--sys/sys/user.h6
-rw-r--r--sys/vm/vm_glue.c154
-rw-r--r--sys/vm/vm_pageout.c2
-rw-r--r--usr.bin/systat/pigs.c2
-rw-r--r--usr.bin/top/machine.c2
29 files changed, 168 insertions, 172 deletions
diff --git a/bin/ps/print.c b/bin/ps/print.c
index 081c4f5..29dffb6 100644
--- a/bin/ps/print.c
+++ b/bin/ps/print.c
@@ -197,14 +197,13 @@ logname(KINFO *k, VARENT *ve)
void
state(KINFO *k, VARENT *ve)
{
- int flag, sflag, tdflags;
+ int flag, tdflags;
char *cp;
VAR *v;
char buf[16];
v = ve->var;
flag = k->ki_p->ki_flag;
- sflag = k->ki_p->ki_sflag;
tdflags = k->ki_p->ki_tdflags; /* XXXKSE */
cp = buf;
@@ -242,7 +241,7 @@ state(KINFO *k, VARENT *ve)
*cp = '?';
}
cp++;
- if (!(sflag & PS_INMEM))
+ if (!(flag & P_INMEM))
*cp++ = 'W';
if (k->ki_p->ki_nice < NZERO)
*cp++ = '<';
@@ -591,7 +590,7 @@ getpcpu(const KINFO *k)
#define fxtofl(fixpt) ((double)(fixpt) / fscale)
/* XXX - I don't like this */
- if (k->ki_p->ki_swtime == 0 || (k->ki_p->ki_sflag & PS_INMEM) == 0)
+ if (k->ki_p->ki_swtime == 0 || (k->ki_p->ki_flag & P_INMEM) == 0)
return (0.0);
if (rawcpu)
return (100.0 * fxtofl(k->ki_p->ki_pctcpu));
@@ -619,7 +618,7 @@ getpmem(KINFO *k)
if (failure)
return (0.0);
- if ((k->ki_p->ki_sflag & PS_INMEM) == 0)
+ if ((k->ki_p->ki_flag & P_INMEM) == 0)
return (0.0);
/* XXX want pmap ptpages, segtab, etc. (per architecture) */
/* XXX don't have info about shared */
diff --git a/bin/ps/ps.c b/bin/ps/ps.c
index 73a6121..ee2b7f8 100644
--- a/bin/ps/ps.c
+++ b/bin/ps/ps.c
@@ -994,13 +994,13 @@ fmt(char **(*fn)(kvm_t *, const struct kinfo_proc *, int), KINFO *ki,
return (s);
}
-#define UREADOK(ki) (forceuread || (ki->ki_p->ki_sflag & PS_INMEM))
+#define UREADOK(ki) (forceuread || (ki->ki_p->ki_flag & P_INMEM))
static void
saveuser(KINFO *ki)
{
- if (ki->ki_p->ki_sflag & PS_INMEM) {
+ if (ki->ki_p->ki_flag & P_INMEM) {
/*
* The u-area might be swapped out, and we can't get
* at it because we have a crashdump and no swap.
diff --git a/lib/libkvm/kvm_proc.c b/lib/libkvm/kvm_proc.c
index 598681b..a2e7f33 100644
--- a/lib/libkvm/kvm_proc.c
+++ b/lib/libkvm/kvm_proc.c
@@ -209,7 +209,7 @@ kvm_proclist(kd, what, arg, p, bp, maxcnt)
kp->ki_sigcatch = sigacts.ps_sigcatch;
}
#if 0
- if ((proc.p_sflag & PS_INMEM) && proc.p_stats != NULL) {
+ if ((proc.p_flag & P_INMEM) && proc.p_stats != NULL) {
if (KREAD(kd, (u_long)proc.p_stats, &pstats)) {
_kvm_err(kd, kd->program,
"can't read stats at %x", proc.p_stats);
@@ -370,7 +370,7 @@ nopgrp:
if (proc.p_state != PRS_ZOMBIE) {
kp->ki_swtime = proc.p_swtime;
kp->ki_flag = proc.p_flag;
- kp->ki_sflag = proc.p_sflag;
+ kp->ki_sflag = 0;
kp->ki_nice = proc.p_nice;
kp->ki_traceflag = proc.p_traceflag;
if (proc.p_state == PRS_NORMAL) {
diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c
index 42858b3..10ad5a3 100644
--- a/sys/amd64/amd64/genassym.c
+++ b/sys/amd64/amd64/genassym.c
@@ -74,7 +74,6 @@ __FBSDID("$FreeBSD$");
ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace));
ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap));
ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
-ASSYM(P_SFLAG, offsetof(struct proc, p_sflag));
ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
diff --git a/sys/ddb/db_ps.c b/sys/ddb/db_ps.c
index 61e36b9..9efd871 100644
--- a/sys/ddb/db_ps.c
+++ b/sys/ddb/db_ps.c
@@ -162,7 +162,7 @@ db_ps(db_expr_t addr, boolean_t hasaddr, db_expr_t count, char *modif)
state[1] = '\0';
/* Additional process state flags. */
- if (!p->p_sflag & PS_INMEM)
+ if (!p->p_flag & P_INMEM)
strlcat(state, "W", sizeof(state));
if (p->p_flag & P_TRACED)
strlcat(state, "X", sizeof(state));
diff --git a/sys/fs/procfs/procfs_status.c b/sys/fs/procfs/procfs_status.c
index 1a8148b..871d7fc 100644
--- a/sys/fs/procfs/procfs_status.c
+++ b/sys/fs/procfs/procfs_status.c
@@ -112,7 +112,6 @@ procfs_doprocstatus(PFS_FILL_ARGS)
sbuf_printf(sb, "noflags");
}
- PROC_SLOCK(p);
#ifdef KSE
if (p->p_flag & P_SA)
wmesg = "-kse- ";
@@ -128,9 +127,10 @@ procfs_doprocstatus(PFS_FILL_ARGS)
wmesg = "nochan";
}
- if (p->p_sflag & PS_INMEM) {
+ if (p->p_flag & P_INMEM) {
struct timeval start, ut, st;
+ PROC_SLOCK(p);
calcru(p, &ut, &st);
PROC_SUNLOCK(p);
start = p->p_stats->p_start;
@@ -139,10 +139,8 @@ procfs_doprocstatus(PFS_FILL_ARGS)
(intmax_t)start.tv_sec, start.tv_usec,
(intmax_t)ut.tv_sec, ut.tv_usec,
(intmax_t)st.tv_sec, st.tv_usec);
- } else {
- PROC_SUNLOCK(p);
+ } else
sbuf_printf(sb, " -1,-1 -1,-1 -1,-1");
- }
sbuf_printf(sb, " %s", wmesg);
diff --git a/sys/i386/i386/genassym.c b/sys/i386/i386/genassym.c
index 2c93d53..9169431 100644
--- a/sys/i386/i386/genassym.c
+++ b/sys/i386/i386/genassym.c
@@ -78,7 +78,6 @@ __FBSDID("$FreeBSD$");
ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace));
ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap));
ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
-ASSYM(P_SFLAG, offsetof(struct proc, p_sflag));
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
diff --git a/sys/i386/linux/linux_ptrace.c b/sys/i386/linux/linux_ptrace.c
index 68532e2..daee9e5 100644
--- a/sys/i386/linux/linux_ptrace.c
+++ b/sys/i386/linux/linux_ptrace.c
@@ -222,7 +222,7 @@ linux_proc_read_fpxregs(struct thread *td, struct linux_pt_fpxreg *fpxregs)
{
PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
- if (cpu_fxsr == 0 || (td->td_proc->p_sflag & PS_INMEM) == 0)
+ if (cpu_fxsr == 0 || (td->td_proc->p_flag & P_INMEM) == 0)
return (EIO);
bcopy(&td->td_pcb->pcb_save.sv_xmm, fpxregs, sizeof(*fpxregs));
return (0);
@@ -233,7 +233,7 @@ linux_proc_write_fpxregs(struct thread *td, struct linux_pt_fpxreg *fpxregs)
{
PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
- if (cpu_fxsr == 0 || (td->td_proc->p_sflag & PS_INMEM) == 0)
+ if (cpu_fxsr == 0 || (td->td_proc->p_flag & P_INMEM) == 0)
return (EIO);
bcopy(fpxregs, &td->td_pcb->pcb_save.sv_xmm, sizeof(*fpxregs));
return (0);
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index c0eac133..0eb26ec 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -415,8 +415,7 @@ proc0_init(void *dummy __unused)
session0.s_leader = p;
p->p_sysent = &null_sysvec;
- p->p_flag = P_SYSTEM;
- p->p_sflag = PS_INMEM;
+ p->p_flag = P_SYSTEM | P_INMEM;
p->p_state = PRS_NORMAL;
knlist_init(&p->p_klist, &p->p_mtx, NULL, NULL, NULL);
STAILQ_INIT(&p->p_ktr);
@@ -428,6 +427,7 @@ proc0_init(void *dummy __unused)
td->td_priority = PVM;
td->td_base_pri = PUSER;
td->td_oncpu = 0;
+ td->td_flags = TDF_INMEM;
p->p_peers = 0;
p->p_leader = p;
@@ -710,7 +710,7 @@ create_init(const void *udata __unused)
/* divorce init's credentials from the kernel's */
newcred = crget();
PROC_LOCK(initproc);
- initproc->p_flag |= P_SYSTEM;
+ initproc->p_flag |= P_SYSTEM | P_INMEM;
oldcred = initproc->p_ucred;
crcopy(newcred, oldcred);
#ifdef MAC
@@ -723,9 +723,6 @@ create_init(const void *udata __unused)
PROC_UNLOCK(initproc);
crfree(oldcred);
cred_update_thread(FIRST_THREAD_IN_PROC(initproc));
- PROC_SLOCK(initproc);
- initproc->p_sflag |= PS_INMEM;
- PROC_SUNLOCK(initproc);
cpu_set_fork_handler(FIRST_THREAD_IN_PROC(initproc), start_init, NULL);
}
SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL)
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index d9a82d2..210f5d3 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -201,34 +201,29 @@ hardclock_cpu(int usermode)
struct pstats *pstats;
struct thread *td = curthread;
struct proc *p = td->td_proc;
- int ast;
+ int flags;
/*
* Run current process's virtual and profile time, as needed.
*/
pstats = p->p_stats;
- ast = 0;
+ flags = 0;
if (usermode &&
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
PROC_SLOCK(p);
- if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
- p->p_sflag |= PS_ALRMPEND;
- ast = 1;
- }
+ if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
+ flags |= TDF_ALRMPEND | TDF_ASTPENDING;
PROC_SUNLOCK(p);
}
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
PROC_SLOCK(p);
- if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
- p->p_sflag |= PS_PROFPEND;
- ast = 1;
- }
+ if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
+ flags |= TDF_PROFPEND | TDF_ASTPENDING;
PROC_SUNLOCK(p);
}
thread_lock(td);
sched_tick();
- if (ast)
- td->td_flags |= TDF_ASTPENDING;
+ td->td_flags |= flags;
thread_unlock(td);
#ifdef HWPMC_HOOKS
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 1a7f4a7..f8fc9de 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -493,17 +493,15 @@ again:
td2->td_sigstk = td->td_sigstk;
td2->td_sigmask = td->td_sigmask;
+ td2->td_flags = TDF_INMEM;
/*
* Duplicate sub-structures as needed.
* Increase reference counts on shared objects.
*/
- p2->p_flag = 0;
+ p2->p_flag = P_INMEM;
if (p1->p_flag & P_PROFIL)
startprofclock(p2);
- PROC_SLOCK(p2);
- p2->p_sflag = PS_INMEM;
- PROC_SUNLOCK(p2);
td2->td_ucred = crhold(p2->p_ucred);
pargs_hold(p2->p_args);
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index acdb55d..4174bde 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -1002,6 +1002,7 @@ thread_alloc_spare(struct thread *td)
__rangeof(struct thread, td_startzero, td_endzero));
spare->td_proc = td->td_proc;
spare->td_ucred = crhold(td->td_ucred);
+ spare->td_flags = TDF_INMEM;
}
/*
@@ -1042,7 +1043,6 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
/* Let the new thread become owner of the upcall */
ku->ku_owner = td2;
td2->td_upcall = ku;
- td2->td_flags = 0;
td2->td_pflags = TDP_SA|TDP_UPCALLING;
td2->td_state = TDS_CAN_RUN;
td2->td_inhibitors = 0;
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index f9f993d..bec9ee3 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -690,14 +690,17 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
kp->ki_ssize = vm->vm_ssize;
} else if (p->p_state == PRS_ZOMBIE)
kp->ki_stat = SZOMB;
- kp->ki_sflag = p->p_sflag;
+ if (kp->ki_flag & P_INMEM)
+ kp->ki_sflag = PS_INMEM;
+ else
+ kp->ki_sflag = 0;
kp->ki_swtime = p->p_swtime;
kp->ki_pid = p->p_pid;
kp->ki_nice = p->p_nice;
rufetch(p, &kp->ki_rusage);
kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime);
PROC_SUNLOCK(p);
- if ((p->p_sflag & PS_INMEM) && p->p_stats != NULL) {
+ if ((p->p_flag & P_INMEM) && p->p_stats != NULL) {
kp->ki_start = p->p_stats->p_start;
timevaladd(&kp->ki_start, &boottime);
PROC_SLOCK(p);
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index aa75dab..e16e38e 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -558,8 +558,8 @@ runq_remove_idx(struct runq *rq, struct td_sched *ts, u_char *idx)
struct rqhead *rqh;
u_char pri;
- KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
- ("runq_remove_idx: process swapped out"));
+ KASSERT(ts->ts_thread->td_flags & TDF_INMEM,
+ ("runq_remove_idx: thread swapped out"));
pri = ts->ts_rqindex;
KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
rqh = &rq->rq_queues[pri];
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 96b4eda..8cdd9fe 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -463,16 +463,10 @@ mi_switch(int flags, struct thread *newtd)
void
setrunnable(struct thread *td)
{
- struct proc *p;
- p = td->td_proc;
THREAD_LOCK_ASSERT(td, MA_OWNED);
- switch (p->p_state) {
- case PRS_ZOMBIE:
- panic("setrunnable(1)");
- default:
- break;
- }
+ KASSERT(td->td_proc->p_state != PRS_ZOMBIE,
+ ("setrunnable: pid %d is a zombie", td->td_proc->p_pid));
switch (td->td_state) {
case TDS_RUNNING:
case TDS_RUNQ:
@@ -491,9 +485,9 @@ setrunnable(struct thread *td)
printf("state is 0x%x", td->td_state);
panic("setrunnable(2)");
}
- if ((p->p_sflag & PS_INMEM) == 0) {
- if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
- p->p_sflag |= PS_SWAPINREQ;
+ if ((td->td_flags & TDF_INMEM) == 0) {
+ if ((td->td_flags & TDF_SWAPINREQ) == 0) {
+ td->td_flags |= TDF_SWAPINREQ;
/*
* due to a LOR between the thread lock and
* the sleepqueue chain locks, use
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index f12e724..a8a3581 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -531,7 +531,7 @@ thread_link(struct thread *td, struct proc *p)
*/
td->td_state = TDS_INACTIVE;
td->td_proc = p;
- td->td_flags = 0;
+ td->td_flags = TDF_INMEM;
LIST_INIT(&td->td_contested);
sigqueue_init(&td->td_sigqueue, p);
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index d96c27e..3692f0f 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -1086,8 +1086,8 @@ sched_add(struct thread *td, int flags)
("sched_add: trying to run inhibited thread"));
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
("sched_add: bad thread state"));
- KASSERT(td->td_proc->p_sflag & PS_INMEM,
- ("sched_add: process swapped out"));
+ KASSERT(td->td_flags & TDF_INMEM,
+ ("sched_add: thread swapped out"));
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
td, td->td_proc->p_comm, td->td_priority, curthread,
curthread->td_proc->p_comm);
@@ -1155,8 +1155,8 @@ sched_add(struct thread *td, int flags)
("sched_add: trying to run inhibited thread"));
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
("sched_add: bad thread state"));
- KASSERT(td->td_proc->p_sflag & PS_INMEM,
- ("sched_add: process swapped out"));
+ KASSERT(td->td_flags & TDF_INMEM,
+ ("sched_add: thread swapped out"));
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
td, td->td_proc->p_comm, td->td_priority, curthread,
curthread->td_proc->p_comm);
@@ -1200,8 +1200,8 @@ sched_rem(struct thread *td)
struct td_sched *ts;
ts = td->td_sched;
- KASSERT(td->td_proc->p_sflag & PS_INMEM,
- ("sched_rem: process swapped out"));
+ KASSERT(td->td_flags & TDF_INMEM,
+ ("sched_rem: thread swapped out"));
KASSERT(TD_ON_RUNQ(td),
("sched_rem: thread not on run queue"));
mtx_assert(&sched_lock, MA_OWNED);
@@ -1253,8 +1253,8 @@ sched_choose(void)
runq_remove(rq, ts);
ts->ts_flags |= TSF_DIDRUN;
- KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
- ("sched_choose: process swapped out"));
+ KASSERT(ts->ts_thread->td_flags & TDF_INMEM,
+ ("sched_choose: thread swapped out"));
return (ts->ts_thread);
}
return (PCPU_GET(idlethread));
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index a2c33dd..88c937b 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -2287,8 +2287,8 @@ tdq_add(struct tdq *tdq, struct thread *td, int flags)
("sched_add: trying to run inhibited thread"));
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
("sched_add: bad thread state"));
- KASSERT(td->td_proc->p_sflag & PS_INMEM,
- ("sched_add: process swapped out"));
+ KASSERT(td->td_flags & TDF_INMEM,
+ ("sched_add: thread swapped out"));
ts = td->td_sched;
class = PRI_BASE(td->td_pri_class);
diff --git a/sys/kern/subr_kdb.c b/sys/kern/subr_kdb.c
index 4b2991c..0ab79fb 100644
--- a/sys/kern/subr_kdb.c
+++ b/sys/kern/subr_kdb.c
@@ -400,7 +400,7 @@ kdb_thr_first(void)
p = LIST_FIRST(&allproc);
while (p != NULL) {
- if (p->p_sflag & PS_INMEM) {
+ if (p->p_flag & P_INMEM) {
thr = FIRST_THREAD_IN_PROC(p);
if (thr != NULL)
return (thr);
@@ -417,7 +417,7 @@ kdb_thr_from_pid(pid_t pid)
p = LIST_FIRST(&allproc);
while (p != NULL) {
- if (p->p_sflag & PS_INMEM && p->p_pid == pid)
+ if (p->p_flag & P_INMEM && p->p_pid == pid)
return (FIRST_THREAD_IN_PROC(p));
p = LIST_NEXT(p, p_list);
}
@@ -446,7 +446,7 @@ kdb_thr_next(struct thread *thr)
if (thr != NULL)
return (thr);
p = LIST_NEXT(p, p_list);
- if (p != NULL && (p->p_sflag & PS_INMEM))
+ if (p != NULL && (p->p_flag & P_INMEM))
thr = FIRST_THREAD_IN_PROC(p);
} while (p != NULL);
return (NULL);
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index db7c385..2a45e7d 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -148,7 +148,6 @@ ast(struct trapframe *framep)
{
struct thread *td;
struct proc *p;
- int sflag;
int flags;
int sig;
#if defined(DEV_NPX) && !defined(SMP)
@@ -174,25 +173,17 @@ ast(struct trapframe *framep)
#endif
/*
- * This updates the p_sflag's for the checks below in one
+ * This updates the td_flag's for the checks below in one
* "atomic" operation with turning off the astpending flag.
* If another AST is triggered while we are handling the
- * AST's saved in sflag, the astpending flag will be set and
+ * AST's saved in flags, the astpending flag will be set and
* ast() will be called again.
*/
- PROC_SLOCK(p);
- sflag = p->p_sflag;
- if (p->p_sflag & (PS_ALRMPEND | PS_PROFPEND))
- p->p_sflag &= ~(PS_ALRMPEND | PS_PROFPEND);
-#ifdef MAC
- if (p->p_sflag & PS_MACPEND)
- p->p_sflag &= ~PS_MACPEND;
-#endif
thread_lock(td);
- PROC_SUNLOCK(p);
flags = td->td_flags;
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
- TDF_NEEDRESCHED | TDF_INTERRUPT);
+ TDF_NEEDRESCHED | TDF_INTERRUPT | TDF_ALRMPEND | TDF_PROFPEND |
+ TDF_MACPEND);
thread_unlock(td);
PCPU_INC(cnt.v_trap);
@@ -210,7 +201,7 @@ ast(struct trapframe *framep)
td->td_profil_ticks = 0;
td->td_pflags &= ~TDP_OWEUPC;
}
- if (sflag & PS_ALRMPEND) {
+ if (flags & TDF_ALRMPEND) {
PROC_LOCK(p);
psignal(p, SIGVTALRM);
PROC_UNLOCK(p);
@@ -228,13 +219,13 @@ ast(struct trapframe *framep)
}
}
#endif
- if (sflag & PS_PROFPEND) {
+ if (flags & TDF_PROFPEND) {
PROC_LOCK(p);
psignal(p, SIGPROF);
PROC_UNLOCK(p);
}
#ifdef MAC
- if (sflag & PS_MACPEND)
+ if (flags & TDF_MACPEND)
mac_thread_userret(td);
#endif
if (flags & TDF_NEEDRESCHED) {
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index 0ac9706..bb441ee 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -104,7 +104,7 @@ struct ptrace_io_desc32 {
int error; \
\
PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \
- if ((td->td_proc->p_sflag & PS_INMEM) == 0) \
+ if ((td->td_proc->p_flag & P_INMEM) == 0) \
error = EIO; \
else \
error = (action); \
diff --git a/sys/security/mac_lomac/mac_lomac.c b/sys/security/mac_lomac/mac_lomac.c
index 008a761..2186b97 100644
--- a/sys/security/mac_lomac/mac_lomac.c
+++ b/sys/security/mac_lomac/mac_lomac.c
@@ -537,8 +537,7 @@ maybe_demote(struct mac_lomac *subjlabel, struct mac_lomac *objlabel,
subj->mac_lomac.ml_rangehigh = objlabel->ml_single;
subj->mac_lomac.ml_flags |= MAC_LOMAC_FLAG_UPDATE;
thread_lock(curthread);
- curthread->td_flags |= TDF_ASTPENDING;
- curthread->td_proc->p_sflag |= PS_MACPEND;
+ curthread->td_flags |= TDF_ASTPENDING | TDF_MACPEND;
thread_unlock(curthread);
/*
diff --git a/sys/sparc64/sparc64/genassym.c b/sys/sparc64/sparc64/genassym.c
index aabe0b5..8ef97c5 100644
--- a/sys/sparc64/sparc64/genassym.c
+++ b/sys/sparc64/sparc64/genassym.c
@@ -219,7 +219,6 @@ ASSYM(MD_UTRAP, offsetof(struct mdproc, md_utrap));
ASSYM(P_COMM, offsetof(struct proc, p_comm));
ASSYM(P_MD, offsetof(struct proc, p_md));
ASSYM(P_PID, offsetof(struct proc, p_pid));
-ASSYM(P_SFLAG, offsetof(struct proc, p_sflag));
ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace));
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index a0fd533..06c06b3 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -316,6 +316,7 @@ do { \
*/
#define TDF_BORROWING 0x00000001 /* Thread is borrowing pri from another. */
#define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */
+#define TDF_INMEM 0x00000004 /* Thread's stack is in memory. */
#define TDF_SINTR 0x00000008 /* Sleep is interruptible. */
#define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */
#define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */
@@ -335,12 +336,15 @@ do { \
#define TDF_UNUSED19 0x00080000 /* Thread is sleeping on a umtx. */
#define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */
#define TDF_DBSUSPEND 0x00200000 /* Thread is suspended by debugger */
-#define TDF_UNUSED22 0x00400000 /* --available-- */
+#define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */
#define TDF_UNUSED23 0x00800000 /* --available-- */
#define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */
#define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */
#define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */
#define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */
+#define TDF_ALRMPEND 0x10000000 /* Pending SIGVTALRM needs to be posted. */
+#define TDF_PROFPEND 0x20000000 /* Pending SIGPROF needs to be posted. */
+#define TDF_MACPEND 0x40000000 /* AST-based MAC event pending. */
/*
* "Private" flags kept in td_pflags:
@@ -496,7 +500,6 @@ struct proc {
* See the td_ or ke_ versions of the same flags.
*/
int p_flag; /* (c) P_* flags. */
- int p_sflag; /* (j) PS_* flags. */
enum {
PRS_NEW = 0, /* In creation */
PRS_NORMAL, /* threads can be run. */
@@ -618,19 +621,13 @@ struct proc {
#define P_JAILED 0x1000000 /* Process is in jail. */
#define P_INEXEC 0x4000000 /* Process is in execve(). */
#define P_STATCHILD 0x8000000 /* Child process stopped or exited. */
+#define P_INMEM 0x10000000 /* Loaded into memory. */
+#define P_SWAPPINGOUT 0x20000000 /* Process is being swapped out. */
+#define P_SWAPPINGIN 0x40000000 /* Process is being swapped in. */
#define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE)
#define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED)
-/* These flags are kept in p_sflag and are protected with proc slock. */
-#define PS_INMEM 0x00001 /* Loaded into memory. */
-#define PS_ALRMPEND 0x00020 /* Pending SIGVTALRM needs to be posted. */
-#define PS_PROFPEND 0x00040 /* Pending SIGPROF needs to be posted. */
-#define PS_SWAPINREQ 0x00100 /* Swapin request due to wakeup. */
-#define PS_SWAPPINGOUT 0x00200 /* Process is being swapped out. */
-#define PS_SWAPPINGIN 0x04000 /* Process is being swapped in. */
-#define PS_MACPEND 0x08000 /* AST-based MAC event pending. */
-
/*
* These were process status values (p_stat), now they are only used in
* legacy conversion code.
@@ -743,7 +740,7 @@ MALLOC_DECLARE(M_ZOMBIE);
KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \
("PHOLD of exiting process")); \
(p)->p_lock++; \
- if (((p)->p_sflag & PS_INMEM) == 0) \
+ if (((p)->p_flag & P_INMEM) == 0) \
faultin((p)); \
} while (0)
#define PROC_ASSERT_HELD(p) do { \
diff --git a/sys/sys/user.h b/sys/sys/user.h
index 7509124..348b3e0 100644
--- a/sys/sys/user.h
+++ b/sys/sys/user.h
@@ -207,6 +207,12 @@ void fill_kinfo_proc(struct proc *, struct kinfo_proc *);
#define ki_childstime ki_rusage_ch.ru_stime
#define ki_childutime ki_rusage_ch.ru_utime
+/*
+ * Legacy PS_ flag. This moved to p_flag but is maintained for
+ * compatibility.
+ */
+#define PS_INMEM 0x00001 /* Loaded into memory. */
+
/* ki_sessflag values */
#define KI_CTTY 0x00000001 /* controlling tty vnode active */
#define KI_SLEADER 0x00000002 /* session leader */
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 3a08855..6e4f42d 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -112,7 +112,8 @@ static void scheduler(void *);
SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL)
#ifndef NO_SWAPPING
-static void swapout(struct proc *);
+static int swapout(struct proc *);
+static void swapclear(struct proc *);
#endif
@@ -601,7 +602,7 @@ faultin(p)
#ifdef NO_SWAPPING
PROC_LOCK_ASSERT(p, MA_OWNED);
- if ((p->p_sflag & PS_INMEM) == 0)
+ if ((p->p_flag & P_INMEM) == 0)
panic("faultin: proc swapped out with NO_SWAPPING!");
#else /* !NO_SWAPPING */
struct thread *td;
@@ -611,36 +612,34 @@ faultin(p)
* If another process is swapping in this process,
* just wait until it finishes.
*/
- if (p->p_sflag & PS_SWAPPINGIN)
- msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
- else if ((p->p_sflag & PS_INMEM) == 0) {
+ if (p->p_flag & P_SWAPPINGIN) {
+ while (p->p_flag & P_SWAPPINGIN)
+ msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
+ return;
+ }
+ if ((p->p_flag & P_INMEM) == 0) {
/*
* Don't let another thread swap process p out while we are
* busy swapping it in.
*/
++p->p_lock;
- PROC_SLOCK(p);
- p->p_sflag |= PS_SWAPPINGIN;
- PROC_SUNLOCK(p);
+ p->p_flag |= P_SWAPPINGIN;
PROC_UNLOCK(p);
+ /*
+ * We hold no lock here because the list of threads
+ * can not change while all threads in the process are
+ * swapped out.
+ */
FOREACH_THREAD_IN_PROC(p, td)
vm_thread_swapin(td);
-
PROC_LOCK(p);
PROC_SLOCK(p);
- p->p_sflag &= ~PS_SWAPPINGIN;
- p->p_sflag |= PS_INMEM;
- FOREACH_THREAD_IN_PROC(p, td) {
- thread_lock(td);
- TD_CLR_SWAPPED(td);
- if (TD_CAN_RUN(td))
- setrunnable(td);
- thread_unlock(td);
- }
+ swapclear(p);
+ p->p_swtime = 0;
PROC_SUNLOCK(p);
- wakeup(&p->p_sflag);
+ wakeup(&p->p_flag);
/* Allow other threads to swap p out now. */
--p->p_lock;
@@ -684,7 +683,9 @@ loop:
ppri = INT_MIN;
sx_slock(&allproc_lock);
FOREACH_PROC_IN_SYSTEM(p) {
- if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
+ PROC_LOCK(p);
+ if (p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
+ PROC_UNLOCK(p);
continue;
}
PROC_SLOCK(p);
@@ -697,10 +698,8 @@ loop:
thread_lock(td);
if (td->td_inhibitors == TDI_SWAPPED) {
pri = p->p_swtime + td->td_slptime;
- if ((p->p_sflag & PS_SWAPINREQ) == 0) {
+ if ((td->td_flags & TDF_SWAPINREQ) == 0)
pri -= p->p_nice * 8;
- }
-
/*
* if this thread is higher priority
* and there is enough space, then select
@@ -715,6 +714,7 @@ loop:
thread_unlock(td);
}
PROC_SUNLOCK(p);
+ PROC_UNLOCK(p);
}
sx_sunlock(&allproc_lock);
@@ -738,7 +738,7 @@ loop:
* brought this process in while we traverse all threads.
* Or, this process may even be being swapped out again.
*/
- if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
+ if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
PROC_UNLOCK(p);
thread_lock(&thread0);
proc0_rescan = 0;
@@ -746,19 +746,12 @@ loop:
goto loop;
}
- PROC_SLOCK(p);
- p->p_sflag &= ~PS_SWAPINREQ;
- PROC_SUNLOCK(p);
-
/*
* We would like to bring someone in. (only if there is space).
* [What checks the space? ]
*/
faultin(p);
PROC_UNLOCK(p);
- PROC_SLOCK(p);
- p->p_swtime = 0;
- PROC_SUNLOCK(p);
thread_lock(&thread0);
proc0_rescan = 0;
thread_unlock(&thread0);
@@ -804,7 +797,7 @@ SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
/*
* Swapout is driven by the pageout daemon. Very simple, we find eligible
- * procs and unwire their u-areas. We try to always "swap" at least one
+ * procs and swap out their stacks. We try to always "swap" at least one
* process in case we need the room for a swapin.
* If any procs have been sleeping/stopped for at least maxslp seconds,
* they are swapped. Else, we swap the longest-sleeping or stopped process,
@@ -829,13 +822,8 @@ retry:
* creation. It may have no
* address space or lock yet.
*/
- PROC_SLOCK(p);
- if (p->p_state == PRS_NEW) {
- PROC_SUNLOCK(p);
+ if (p->p_state == PRS_NEW)
continue;
- }
- PROC_SUNLOCK(p);
-
/*
* An aio daemon switches its
* address space while running.
@@ -844,7 +832,6 @@ retry:
*/
if ((p->p_flag & P_SYSTEM) != 0)
continue;
-
/*
* Do not swapout a process that
* is waiting for VM data
@@ -874,7 +861,7 @@ retry:
* skipped because of the if statement above checking
* for P_SYSTEM
*/
- if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
+ if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
goto nextproc2;
switch (p->p_state) {
@@ -890,15 +877,20 @@ retry:
* Check all the thread groups..
*/
FOREACH_THREAD_IN_PROC(p, td) {
- if (PRI_IS_REALTIME(td->td_pri_class))
+ thread_lock(td);
+ if (PRI_IS_REALTIME(td->td_pri_class)) {
+ thread_unlock(td);
goto nextproc;
+ }
/*
* Guarantee swap_idle_threshold1
* time in memory.
*/
- if (td->td_slptime < swap_idle_threshold1)
+ if (td->td_slptime < swap_idle_threshold1) {
+ thread_unlock(td);
goto nextproc;
+ }
/*
* Do not swapout a process if it is
@@ -910,8 +902,10 @@ retry:
* swapping out a thread.
*/
if ((td->td_priority) < PSOCK ||
- !thread_safetoswapout(td))
+ !thread_safetoswapout(td)) {
+ thread_unlock(td);
goto nextproc;
+ }
/*
* If the system is under memory stress,
* or if we are swapping
@@ -920,11 +914,14 @@ retry:
*/
if (((action & VM_SWAP_NORMAL) == 0) &&
(((action & VM_SWAP_IDLE) == 0) ||
- (td->td_slptime < swap_idle_threshold2)))
+ (td->td_slptime < swap_idle_threshold2))) {
+ thread_unlock(td);
goto nextproc;
+ }
if (minslptime > td->td_slptime)
minslptime = td->td_slptime;
+ thread_unlock(td);
}
/*
@@ -935,8 +932,8 @@ retry:
if ((action & VM_SWAP_NORMAL) ||
((action & VM_SWAP_IDLE) &&
(minslptime > swap_idle_threshold2))) {
- swapout(p);
- didswap++;
+ if (swapout(p) == 0)
+ didswap++;
PROC_SUNLOCK(p);
PROC_UNLOCK(p);
vm_map_unlock(&vm->vm_map);
@@ -964,13 +961,35 @@ nextproc1:
}
static void
+swapclear(p)
+ struct proc *p;
+{
+ struct thread *td;
+
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+ PROC_SLOCK_ASSERT(p, MA_OWNED);
+
+ FOREACH_THREAD_IN_PROC(p, td) {
+ thread_lock(td);
+ td->td_flags |= TDF_INMEM;
+ td->td_flags &= ~TDF_SWAPINREQ;
+ TD_CLR_SWAPPED(td);
+ if (TD_CAN_RUN(td))
+ setrunnable(td);
+ thread_unlock(td);
+ }
+ p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
+ p->p_flag |= P_INMEM;
+}
+
+static int
swapout(p)
struct proc *p;
{
struct thread *td;
PROC_LOCK_ASSERT(p, MA_OWNED);
- mtx_assert(&p->p_slock, MA_OWNED | MA_NOTRECURSED);
+ PROC_SLOCK_ASSERT(p, MA_OWNED | MA_NOTRECURSED);
#if defined(SWAP_DEBUG)
printf("swapping out %d\n", p->p_pid);
#endif
@@ -980,43 +999,46 @@ swapout(p)
* by now. Assuming that there is only one pageout daemon thread,
* this process should still be in memory.
*/
- KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
+ KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
("swapout: lost a swapout race?"));
-#if defined(INVARIANTS)
- /*
- * Make sure that all threads are safe to be swapped out.
- *
- * Alternatively, we could swap out only safe threads.
- */
- FOREACH_THREAD_IN_PROC(p, td) {
- KASSERT(thread_safetoswapout(td),
- ("swapout: there is a thread not safe for swapout"));
- }
-#endif /* INVARIANTS */
- td = FIRST_THREAD_IN_PROC(p);
- ++td->td_ru.ru_nswap;
/*
* remember the process resident count
*/
p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
-
- p->p_sflag &= ~PS_INMEM;
- p->p_sflag |= PS_SWAPPINGOUT;
- PROC_UNLOCK(p);
+ /*
+ * Check and mark all threads before we proceed.
+ */
+ p->p_flag &= ~P_INMEM;
+ p->p_flag |= P_SWAPPINGOUT;
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
+ if (!thread_safetoswapout(td)) {
+ thread_unlock(td);
+ swapclear(p);
+ return (EBUSY);
+ }
+ td->td_flags &= ~TDF_INMEM;
TD_SET_SWAPPED(td);
thread_unlock(td);
}
+ td = FIRST_THREAD_IN_PROC(p);
+ ++td->td_ru.ru_nswap;
PROC_SUNLOCK(p);
+ PROC_UNLOCK(p);
+ /*
+ * This list is stable because all threads are now prevented from
+ * running. The list is only modified in the context of a running
+ * thread in this process.
+ */
FOREACH_THREAD_IN_PROC(p, td)
vm_thread_swapout(td);
PROC_LOCK(p);
+ p->p_flag &= ~P_SWAPPINGOUT;
PROC_SLOCK(p);
- p->p_sflag &= ~PS_SWAPPINGOUT;
p->p_swtime = 0;
+ return (0);
}
#endif /* !NO_SWAPPING */
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 7bea8da..9478809 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1620,7 +1620,7 @@ vm_daemon()
* swapped out set the limit to nothing (will force a
* swap-out.)
*/
- if ((p->p_sflag & PS_INMEM) == 0)
+ if ((p->p_flag & P_INMEM) == 0)
limit = 0; /* XXX */
PROC_UNLOCK(p);
diff --git a/usr.bin/systat/pigs.c b/usr.bin/systat/pigs.c
index d6c939b..6d911d6 100644
--- a/usr.bin/systat/pigs.c
+++ b/usr.bin/systat/pigs.c
@@ -174,7 +174,7 @@ fetchpigs()
pt[i].pt_kp = &kpp[i];
pctp = &pt[i].pt_pctcpu;
ftime = kpp[i].ki_swtime;
- if (ftime == 0 || (kpp[i].ki_sflag & PS_INMEM) == 0)
+ if (ftime == 0 || (kpp[i].ki_flag & P_INMEM) == 0)
*pctp = 0;
else
*pctp = ((double) kpp[i].ki_pctcpu /
diff --git a/usr.bin/top/machine.c b/usr.bin/top/machine.c
index b7e03f4..dadb03d 100644
--- a/usr.bin/top/machine.c
+++ b/usr.bin/top/machine.c
@@ -675,7 +675,7 @@ format_next_process(caddr_t handle, char *(*get_userid)(int), int flags)
hp->remaining--;
/* get the process's command name */
- if ((pp->ki_sflag & PS_INMEM) == 0) {
+ if ((pp->ki_flag & P_INMEM) == 0) {
/*
* Print swapped processes as <pname>
*/
OpenPOWER on IntegriCloud