summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-09-17 05:31:39 +0000
committerjeff <jeff@FreeBSD.org>2007-09-17 05:31:39 +0000
commit3fc0f8b973fdc2f392a3234d653e09d71e2aab98 (patch)
treee572a7b77f24339d2dc6ebf6b83c1e2c1ce0f6a5 /sys/kern
parent74666fdfce0e06484d5f82e8fced0c16c06477d1 (diff)
downloadFreeBSD-src-3fc0f8b973fdc2f392a3234d653e09d71e2aab98.zip
FreeBSD-src-3fc0f8b973fdc2f392a3234d653e09d71e2aab98.tar.gz
- Move all of the PS_ flags into either p_flag or td_flags.
- p_sflag was mostly protected by PROC_LOCK rather than the PROC_SLOCK or previously the sched_lock. These bugs have existed for some time. - Allow swapout to try each thread in a process individually and then swapin the whole process if any of these fail. This allows us to move most scheduler related swap flags into td_flags. - Keep ki_sflag for backwards compat but change all in source tools to use the new and more correct location of P_INMEM. Reported by: pho Reviewed by: attilio, kib Approved by: re (kensmith)
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/init_main.c9
-rw-r--r--sys/kern/kern_clock.c19
-rw-r--r--sys/kern/kern_fork.c6
-rw-r--r--sys/kern/kern_kse.c2
-rw-r--r--sys/kern/kern_proc.c7
-rw-r--r--sys/kern/kern_switch.c4
-rw-r--r--sys/kern/kern_synch.c16
-rw-r--r--sys/kern/kern_thread.c2
-rw-r--r--sys/kern/sched_4bsd.c16
-rw-r--r--sys/kern/sched_ule.c4
-rw-r--r--sys/kern/subr_kdb.c6
-rw-r--r--sys/kern/subr_trap.c23
-rw-r--r--sys/kern/sys_process.c2
13 files changed, 47 insertions, 69 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index c0eac133..0eb26ec 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -415,8 +415,7 @@ proc0_init(void *dummy __unused)
session0.s_leader = p;
p->p_sysent = &null_sysvec;
- p->p_flag = P_SYSTEM;
- p->p_sflag = PS_INMEM;
+ p->p_flag = P_SYSTEM | P_INMEM;
p->p_state = PRS_NORMAL;
knlist_init(&p->p_klist, &p->p_mtx, NULL, NULL, NULL);
STAILQ_INIT(&p->p_ktr);
@@ -428,6 +427,7 @@ proc0_init(void *dummy __unused)
td->td_priority = PVM;
td->td_base_pri = PUSER;
td->td_oncpu = 0;
+ td->td_flags = TDF_INMEM;
p->p_peers = 0;
p->p_leader = p;
@@ -710,7 +710,7 @@ create_init(const void *udata __unused)
/* divorce init's credentials from the kernel's */
newcred = crget();
PROC_LOCK(initproc);
- initproc->p_flag |= P_SYSTEM;
+ initproc->p_flag |= P_SYSTEM | P_INMEM;
oldcred = initproc->p_ucred;
crcopy(newcred, oldcred);
#ifdef MAC
@@ -723,9 +723,6 @@ create_init(const void *udata __unused)
PROC_UNLOCK(initproc);
crfree(oldcred);
cred_update_thread(FIRST_THREAD_IN_PROC(initproc));
- PROC_SLOCK(initproc);
- initproc->p_sflag |= PS_INMEM;
- PROC_SUNLOCK(initproc);
cpu_set_fork_handler(FIRST_THREAD_IN_PROC(initproc), start_init, NULL);
}
SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL)
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index d9a82d2..210f5d3 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -201,34 +201,29 @@ hardclock_cpu(int usermode)
struct pstats *pstats;
struct thread *td = curthread;
struct proc *p = td->td_proc;
- int ast;
+ int flags;
/*
* Run current process's virtual and profile time, as needed.
*/
pstats = p->p_stats;
- ast = 0;
+ flags = 0;
if (usermode &&
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
PROC_SLOCK(p);
- if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
- p->p_sflag |= PS_ALRMPEND;
- ast = 1;
- }
+ if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
+ flags |= TDF_ALRMPEND | TDF_ASTPENDING;
PROC_SUNLOCK(p);
}
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
PROC_SLOCK(p);
- if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
- p->p_sflag |= PS_PROFPEND;
- ast = 1;
- }
+ if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
+ flags |= TDF_PROFPEND | TDF_ASTPENDING;
PROC_SUNLOCK(p);
}
thread_lock(td);
sched_tick();
- if (ast)
- td->td_flags |= TDF_ASTPENDING;
+ td->td_flags |= flags;
thread_unlock(td);
#ifdef HWPMC_HOOKS
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 1a7f4a7..f8fc9de 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -493,17 +493,15 @@ again:
td2->td_sigstk = td->td_sigstk;
td2->td_sigmask = td->td_sigmask;
+ td2->td_flags = TDF_INMEM;
/*
* Duplicate sub-structures as needed.
* Increase reference counts on shared objects.
*/
- p2->p_flag = 0;
+ p2->p_flag = P_INMEM;
if (p1->p_flag & P_PROFIL)
startprofclock(p2);
- PROC_SLOCK(p2);
- p2->p_sflag = PS_INMEM;
- PROC_SUNLOCK(p2);
td2->td_ucred = crhold(p2->p_ucred);
pargs_hold(p2->p_args);
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index acdb55d..4174bde 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -1002,6 +1002,7 @@ thread_alloc_spare(struct thread *td)
__rangeof(struct thread, td_startzero, td_endzero));
spare->td_proc = td->td_proc;
spare->td_ucred = crhold(td->td_ucred);
+ spare->td_flags = TDF_INMEM;
}
/*
@@ -1042,7 +1043,6 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
/* Let the new thread become owner of the upcall */
ku->ku_owner = td2;
td2->td_upcall = ku;
- td2->td_flags = 0;
td2->td_pflags = TDP_SA|TDP_UPCALLING;
td2->td_state = TDS_CAN_RUN;
td2->td_inhibitors = 0;
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index f9f993d..bec9ee3 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -690,14 +690,17 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
kp->ki_ssize = vm->vm_ssize;
} else if (p->p_state == PRS_ZOMBIE)
kp->ki_stat = SZOMB;
- kp->ki_sflag = p->p_sflag;
+ if (kp->ki_flag & P_INMEM)
+ kp->ki_sflag = PS_INMEM;
+ else
+ kp->ki_sflag = 0;
kp->ki_swtime = p->p_swtime;
kp->ki_pid = p->p_pid;
kp->ki_nice = p->p_nice;
rufetch(p, &kp->ki_rusage);
kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime);
PROC_SUNLOCK(p);
- if ((p->p_sflag & PS_INMEM) && p->p_stats != NULL) {
+ if ((p->p_flag & P_INMEM) && p->p_stats != NULL) {
kp->ki_start = p->p_stats->p_start;
timevaladd(&kp->ki_start, &boottime);
PROC_SLOCK(p);
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index aa75dab..e16e38e 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -558,8 +558,8 @@ runq_remove_idx(struct runq *rq, struct td_sched *ts, u_char *idx)
struct rqhead *rqh;
u_char pri;
- KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
- ("runq_remove_idx: process swapped out"));
+ KASSERT(ts->ts_thread->td_flags & TDF_INMEM,
+ ("runq_remove_idx: thread swapped out"));
pri = ts->ts_rqindex;
KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
rqh = &rq->rq_queues[pri];
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 96b4eda..8cdd9fe 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -463,16 +463,10 @@ mi_switch(int flags, struct thread *newtd)
void
setrunnable(struct thread *td)
{
- struct proc *p;
- p = td->td_proc;
THREAD_LOCK_ASSERT(td, MA_OWNED);
- switch (p->p_state) {
- case PRS_ZOMBIE:
- panic("setrunnable(1)");
- default:
- break;
- }
+ KASSERT(td->td_proc->p_state != PRS_ZOMBIE,
+ ("setrunnable: pid %d is a zombie", td->td_proc->p_pid));
switch (td->td_state) {
case TDS_RUNNING:
case TDS_RUNQ:
@@ -491,9 +485,9 @@ setrunnable(struct thread *td)
printf("state is 0x%x", td->td_state);
panic("setrunnable(2)");
}
- if ((p->p_sflag & PS_INMEM) == 0) {
- if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
- p->p_sflag |= PS_SWAPINREQ;
+ if ((td->td_flags & TDF_INMEM) == 0) {
+ if ((td->td_flags & TDF_SWAPINREQ) == 0) {
+ td->td_flags |= TDF_SWAPINREQ;
/*
* due to a LOR between the thread lock and
* the sleepqueue chain locks, use
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index f12e724..a8a3581 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -531,7 +531,7 @@ thread_link(struct thread *td, struct proc *p)
*/
td->td_state = TDS_INACTIVE;
td->td_proc = p;
- td->td_flags = 0;
+ td->td_flags = TDF_INMEM;
LIST_INIT(&td->td_contested);
sigqueue_init(&td->td_sigqueue, p);
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index d96c27e..3692f0f 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -1086,8 +1086,8 @@ sched_add(struct thread *td, int flags)
("sched_add: trying to run inhibited thread"));
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
("sched_add: bad thread state"));
- KASSERT(td->td_proc->p_sflag & PS_INMEM,
- ("sched_add: process swapped out"));
+ KASSERT(td->td_flags & TDF_INMEM,
+ ("sched_add: thread swapped out"));
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
td, td->td_proc->p_comm, td->td_priority, curthread,
curthread->td_proc->p_comm);
@@ -1155,8 +1155,8 @@ sched_add(struct thread *td, int flags)
("sched_add: trying to run inhibited thread"));
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
("sched_add: bad thread state"));
- KASSERT(td->td_proc->p_sflag & PS_INMEM,
- ("sched_add: process swapped out"));
+ KASSERT(td->td_flags & TDF_INMEM,
+ ("sched_add: thread swapped out"));
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
td, td->td_proc->p_comm, td->td_priority, curthread,
curthread->td_proc->p_comm);
@@ -1200,8 +1200,8 @@ sched_rem(struct thread *td)
struct td_sched *ts;
ts = td->td_sched;
- KASSERT(td->td_proc->p_sflag & PS_INMEM,
- ("sched_rem: process swapped out"));
+ KASSERT(td->td_flags & TDF_INMEM,
+ ("sched_rem: thread swapped out"));
KASSERT(TD_ON_RUNQ(td),
("sched_rem: thread not on run queue"));
mtx_assert(&sched_lock, MA_OWNED);
@@ -1253,8 +1253,8 @@ sched_choose(void)
runq_remove(rq, ts);
ts->ts_flags |= TSF_DIDRUN;
- KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
- ("sched_choose: process swapped out"));
+ KASSERT(ts->ts_thread->td_flags & TDF_INMEM,
+ ("sched_choose: thread swapped out"));
return (ts->ts_thread);
}
return (PCPU_GET(idlethread));
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index a2c33dd..88c937b 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -2287,8 +2287,8 @@ tdq_add(struct tdq *tdq, struct thread *td, int flags)
("sched_add: trying to run inhibited thread"));
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
("sched_add: bad thread state"));
- KASSERT(td->td_proc->p_sflag & PS_INMEM,
- ("sched_add: process swapped out"));
+ KASSERT(td->td_flags & TDF_INMEM,
+ ("sched_add: thread swapped out"));
ts = td->td_sched;
class = PRI_BASE(td->td_pri_class);
diff --git a/sys/kern/subr_kdb.c b/sys/kern/subr_kdb.c
index 4b2991c..0ab79fb 100644
--- a/sys/kern/subr_kdb.c
+++ b/sys/kern/subr_kdb.c
@@ -400,7 +400,7 @@ kdb_thr_first(void)
p = LIST_FIRST(&allproc);
while (p != NULL) {
- if (p->p_sflag & PS_INMEM) {
+ if (p->p_flag & P_INMEM) {
thr = FIRST_THREAD_IN_PROC(p);
if (thr != NULL)
return (thr);
@@ -417,7 +417,7 @@ kdb_thr_from_pid(pid_t pid)
p = LIST_FIRST(&allproc);
while (p != NULL) {
- if (p->p_sflag & PS_INMEM && p->p_pid == pid)
+ if (p->p_flag & P_INMEM && p->p_pid == pid)
return (FIRST_THREAD_IN_PROC(p));
p = LIST_NEXT(p, p_list);
}
@@ -446,7 +446,7 @@ kdb_thr_next(struct thread *thr)
if (thr != NULL)
return (thr);
p = LIST_NEXT(p, p_list);
- if (p != NULL && (p->p_sflag & PS_INMEM))
+ if (p != NULL && (p->p_flag & P_INMEM))
thr = FIRST_THREAD_IN_PROC(p);
} while (p != NULL);
return (NULL);
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index db7c385..2a45e7d 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -148,7 +148,6 @@ ast(struct trapframe *framep)
{
struct thread *td;
struct proc *p;
- int sflag;
int flags;
int sig;
#if defined(DEV_NPX) && !defined(SMP)
@@ -174,25 +173,17 @@ ast(struct trapframe *framep)
#endif
/*
- * This updates the p_sflag's for the checks below in one
+ * This updates the td_flag's for the checks below in one
* "atomic" operation with turning off the astpending flag.
* If another AST is triggered while we are handling the
- * AST's saved in sflag, the astpending flag will be set and
+ * AST's saved in flags, the astpending flag will be set and
* ast() will be called again.
*/
- PROC_SLOCK(p);
- sflag = p->p_sflag;
- if (p->p_sflag & (PS_ALRMPEND | PS_PROFPEND))
- p->p_sflag &= ~(PS_ALRMPEND | PS_PROFPEND);
-#ifdef MAC
- if (p->p_sflag & PS_MACPEND)
- p->p_sflag &= ~PS_MACPEND;
-#endif
thread_lock(td);
- PROC_SUNLOCK(p);
flags = td->td_flags;
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
- TDF_NEEDRESCHED | TDF_INTERRUPT);
+ TDF_NEEDRESCHED | TDF_INTERRUPT | TDF_ALRMPEND | TDF_PROFPEND |
+ TDF_MACPEND);
thread_unlock(td);
PCPU_INC(cnt.v_trap);
@@ -210,7 +201,7 @@ ast(struct trapframe *framep)
td->td_profil_ticks = 0;
td->td_pflags &= ~TDP_OWEUPC;
}
- if (sflag & PS_ALRMPEND) {
+ if (flags & TDF_ALRMPEND) {
PROC_LOCK(p);
psignal(p, SIGVTALRM);
PROC_UNLOCK(p);
@@ -228,13 +219,13 @@ ast(struct trapframe *framep)
}
}
#endif
- if (sflag & PS_PROFPEND) {
+ if (flags & TDF_PROFPEND) {
PROC_LOCK(p);
psignal(p, SIGPROF);
PROC_UNLOCK(p);
}
#ifdef MAC
- if (sflag & PS_MACPEND)
+ if (flags & TDF_MACPEND)
mac_thread_userret(td);
#endif
if (flags & TDF_NEEDRESCHED) {
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index 0ac9706..bb441ee 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -104,7 +104,7 @@ struct ptrace_io_desc32 {
int error; \
\
PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \
- if ((td->td_proc->p_sflag & PS_INMEM) == 0) \
+ if ((td->td_proc->p_flag & P_INMEM) == 0) \
error = EIO; \
else \
error = (action); \
OpenPOWER on IntegriCloud