summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2003-02-17 09:55:10 +0000
committerjulian <julian@FreeBSD.org>2003-02-17 09:55:10 +0000
commitaf55753a063a04a847a53c2946cd5fbf413a1e0f (patch)
tree775429b184310789a1eb3bc1be8451f6faf1abf4 /sys/kern
parent653bc68f5387baeae2f13b7f346d29659904e188 (diff)
downloadFreeBSD-src-af55753a063a04a847a53c2946cd5fbf413a1e0f.zip
FreeBSD-src-af55753a063a04a847a53c2946cd5fbf413a1e0f.tar.gz
Move a bunch of flags from the KSE to the thread.
I was in two minds as to where to put them in the first case.. I should have listenned to the other mind. Submitted by: parts by davidxu@ Reviewed by: jeff@ mini@
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_clock.c7
-rw-r--r--sys/kern/kern_intr.c2
-rw-r--r--sys/kern/kern_kse.c5
-rw-r--r--sys/kern/kern_sig.c6
-rw-r--r--sys/kern/kern_switch.c2
-rw-r--r--sys/kern/kern_synch.c3
-rw-r--r--sys/kern/kern_thread.c5
-rw-r--r--sys/kern/ksched.c6
-rw-r--r--sys/kern/sched_4bsd.c4
-rw-r--r--sys/kern/sched_ule.c12
-rw-r--r--sys/kern/subr_prof.c10
-rw-r--r--sys/kern/subr_smp.c4
-rw-r--r--sys/kern/subr_trap.c17
13 files changed, 39 insertions, 44 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index da75813..460d26f 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -174,12 +174,12 @@ hardclock_process(frame)
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
p->p_sflag |= PS_ALRMPEND;
- td->td_kse->ke_flags |= KEF_ASTPENDING;
+ td->td_flags |= TDF_ASTPENDING;
}
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
p->p_sflag |= PS_PROFPEND;
- td->td_kse->ke_flags |= KEF_ASTPENDING;
+ td->td_flags |= TDF_ASTPENDING;
}
}
mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
@@ -435,6 +435,7 @@ profclock(frame)
int i;
#endif
+ td = curthread;
if (CLKF_USERMODE(frame)) {
/*
* Came from user mode; CPU was in user state.
@@ -445,7 +446,7 @@ profclock(frame)
td = curthread;
if ((td->td_proc->p_sflag & PS_PROFIL) &&
!(td->td_flags & TDF_UPCALLING))
- addupc_intr(td->td_kse, CLKF_PC(frame), 1);
+ addupc_intr(td, CLKF_PC(frame), 1);
}
#ifdef GPROF
else {
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index bd3d531..5330868 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -400,7 +400,7 @@ ithread_schedule(struct ithd *ithread, int do_switch)
ctd->td_state = TDS_CAN_RUN; /* XXXKSE */
mi_switch();
} else {
- curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
+ curthread->td_flags |= TDF_NEEDRESCHED;
}
} else {
CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d",
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 02812d5..7ec5b57 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -652,8 +652,6 @@ kse_create(struct thread *td, struct kse_create_args *uap)
#endif
mtx_lock_spin(&sched_lock);
kse_link(newke, newkg);
- if (p->p_sflag & PS_NEEDSIGCHK)
- newke->ke_flags |= KEF_ASTPENDING;
/* Add engine */
kse_reassign(newke);
mtx_unlock_spin(&sched_lock);
@@ -1065,8 +1063,7 @@ thread_statclock(int user)
return (-1);
if (user) {
/* Current always do via ast() */
- td->td_kse->ke_flags |= KEF_ASTPENDING; /* XXX TDF_ASTPENDING */
- td->td_flags |= TDF_USTATCLOCK;
+ td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
td->td_uuticks++;
} else {
if (td->td_mailbox != NULL)
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index 0f53b44..8aaf3d9 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -193,7 +193,7 @@ cursig(struct thread *td)
void
signotify(struct proc *p)
{
- struct kse *ke;
+ struct thread *td;
struct ksegrp *kg;
PROC_LOCK_ASSERT(p, MA_OWNED);
@@ -202,8 +202,8 @@ signotify(struct proc *p)
p->p_sflag |= PS_NEEDSIGCHK;
/* XXXKSE for now punish all KSEs */
FOREACH_KSEGRP_IN_PROC(p, kg) {
- FOREACH_KSE_IN_GROUP(kg, ke) {
- ke->ke_flags |= KEF_ASTPENDING;
+ FOREACH_THREAD_IN_GROUP(kg, td) {
+ td->td_flags |= TDF_ASTPENDING;
}
}
}
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 5cefb1c..ea4e5ba 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -220,6 +220,8 @@ kse_reassign(struct kse *ke)
kg->kg_last_assigned = td;
td->td_kse = ke;
ke->ke_thread = td;
+ if (td->td_proc->p_sflag & PS_NEEDSIGCHK)
+ td->td_flags |= TDF_ASTPENDING;
sched_add(ke);
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
return;
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 1c4412e..9a1509f 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -448,7 +448,6 @@ mi_switch(void)
struct bintime new_switchtime;
struct thread *td = curthread; /* XXX */
struct proc *p = td->td_proc; /* XXX */
- struct kse *ke = td->td_kse;
u_int sched_nest;
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
@@ -489,7 +488,7 @@ mi_switch(void)
if (p->p_cpulimit != RLIM_INFINITY &&
p->p_runtime.sec > p->p_cpulimit) {
p->p_sflag |= PS_XCPU;
- ke->ke_flags |= KEF_ASTPENDING;
+ td->td_flags |= TDF_ASTPENDING;
}
/*
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 02812d5..7ec5b57 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -652,8 +652,6 @@ kse_create(struct thread *td, struct kse_create_args *uap)
#endif
mtx_lock_spin(&sched_lock);
kse_link(newke, newkg);
- if (p->p_sflag & PS_NEEDSIGCHK)
- newke->ke_flags |= KEF_ASTPENDING;
/* Add engine */
kse_reassign(newke);
mtx_unlock_spin(&sched_lock);
@@ -1065,8 +1063,7 @@ thread_statclock(int user)
return (-1);
if (user) {
/* Current always do via ast() */
- td->td_kse->ke_flags |= KEF_ASTPENDING; /* XXX TDF_ASTPENDING */
- td->td_flags |= TDF_USTATCLOCK;
+ td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
td->td_uuticks++;
} else {
if (td->td_mailbox != NULL)
diff --git a/sys/kern/ksched.c b/sys/kern/ksched.c
index ba53cc2..b51b314 100644
--- a/sys/kern/ksched.c
+++ b/sys/kern/ksched.c
@@ -186,7 +186,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
rtp_to_pri(&rtp, kg);
FOREACH_THREAD_IN_GROUP(kg, td) { /* XXXKSE */
if (TD_IS_RUNNING(td)) {
- td->td_kse->ke_flags |= KEF_NEEDRESCHED;
+ td->td_flags |= TDF_NEEDRESCHED;
} else if (TD_ON_RUNQ(td)) {
if (td->td_priority > kg->kg_user_pri) {
sched_prio(td, kg->kg_user_pri);
@@ -216,7 +216,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
*/
FOREACH_THREAD_IN_GROUP(kg, td) {
if (TD_IS_RUNNING(td)) {
- td->td_kse->ke_flags |= KEF_NEEDRESCHED;
+ td->td_flags |= TDF_NEEDRESCHED;
} else if (TD_ON_RUNQ(td)) {
if (td->td_priority > kg->kg_user_pri) {
sched_prio(td, kg->kg_user_pri);
@@ -242,7 +242,7 @@ int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *t
int ksched_yield(register_t *ret, struct ksched *ksched)
{
mtx_lock_spin(&sched_lock);
- curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
+ curthread->td_flags |= TDF_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
return 0;
}
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 325911f..605fcbd 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -123,7 +123,7 @@ maybe_resched(struct thread *td)
mtx_assert(&sched_lock, MA_OWNED);
if (td->td_priority < curthread->td_priority && curthread->td_kse)
- curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
+ curthread->td_flags |= TDF_NEEDRESCHED;
}
/*
@@ -535,7 +535,7 @@ sched_switchout(struct thread *td)
td->td_lastcpu = ke->ke_oncpu;
td->td_last_kse = ke;
ke->ke_oncpu = NOCPU;
- ke->ke_flags &= ~KEF_NEEDRESCHED;
+ td->td_flags &= ~TDF_NEEDRESCHED;
/*
* At the last moment, if this thread is still marked RUNNING,
* then put it back on the run queue as it has not been suspended
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index b96725b..d6a2910 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -492,7 +492,7 @@ sched_switchout(struct thread *td)
td->td_last_kse = ke;
td->td_lastcpu = ke->ke_oncpu;
ke->ke_oncpu = NOCPU;
- ke->ke_flags &= ~KEF_NEEDRESCHED;
+ td->td_flags &= ~TDF_NEEDRESCHED;
if (TD_IS_RUNNING(td)) {
setrunqueue(td);
@@ -518,7 +518,7 @@ sched_switchin(struct thread *td)
#if SCHED_STRICT_RESCHED
if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE &&
td->td_priority != td->td_ksegrp->kg_user_pri)
- curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
+ curthread->td_flags |= TDF_NEEDRESCHED;
#endif
}
@@ -530,7 +530,7 @@ sched_nice(struct ksegrp *kg, int nice)
kg->kg_nice = nice;
sched_priority(kg);
FOREACH_THREAD_IN_GROUP(kg, td) {
- td->td_kse->ke_flags |= KEF_NEEDRESCHED;
+ td->td_flags |= TDF_NEEDRESCHED;
}
}
@@ -584,7 +584,7 @@ sched_wakeup(struct thread *td)
setrunqueue(td);
#if SCHED_STRICT_RESCHED
if (td->td_priority < curthread->td_priority)
- curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
+ curthread->td_flags |= TDF_NEEDRESCHED;
#endif
}
@@ -686,7 +686,7 @@ sched_clock(struct thread *td)
if (nke && nke->ke_thread &&
nke->ke_thread->td_priority < td->td_priority)
- ke->ke_flags |= KEF_NEEDRESCHED;
+ td->td_flags |= TDF_NEEDRESCHED;
#endif
/*
* We used a tick charge it to the ksegrp so that we can compute our
@@ -704,7 +704,7 @@ sched_clock(struct thread *td)
if (ke->ke_slice == 0) {
td->td_priority = sched_priority(kg);
ke->ke_slice = sched_slice(kg);
- ke->ke_flags |= KEF_NEEDRESCHED;
+ td->td_flags |= TDF_NEEDRESCHED;
ke->ke_runq = NULL;
}
}
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index b8cd61e..894309d 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -478,7 +478,7 @@ done2:
* inaccurate.
*/
void
-addupc_intr(struct kse *ke, uintptr_t pc, u_int ticks)
+addupc_intr(struct thread *td, uintptr_t pc, u_int ticks)
{
struct uprof *prof;
caddr_t addr;
@@ -487,7 +487,7 @@ addupc_intr(struct kse *ke, uintptr_t pc, u_int ticks)
if (ticks == 0)
return;
- prof = &ke->ke_proc->p_stats->p_prof;
+ prof = &td->td_proc->p_stats->p_prof;
if (pc < prof->pr_off ||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
return; /* out of range; ignore */
@@ -497,7 +497,7 @@ addupc_intr(struct kse *ke, uintptr_t pc, u_int ticks)
mtx_lock_spin(&sched_lock);
prof->pr_addr = pc;
prof->pr_ticks = ticks;
- ke->ke_flags |= KEF_OWEUPC | KEF_ASTPENDING ;
+ td->td_flags |= TDF_OWEUPC | TDF_ASTPENDING ;
mtx_unlock_spin(&sched_lock);
}
}
@@ -508,9 +508,9 @@ addupc_intr(struct kse *ke, uintptr_t pc, u_int ticks)
* XXXKSE, don't use kse unless we got sched lock.
*/
void
-addupc_task(struct kse *ke, uintptr_t pc, u_int ticks)
+addupc_task(struct thread *td, uintptr_t pc, u_int ticks)
{
- struct proc *p = ke->ke_proc;
+ struct proc *p = td->td_proc;
struct uprof *prof;
caddr_t addr;
u_int i;
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index 7965b78..908d587 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -123,7 +123,7 @@ forward_signal(struct thread *td)
int id;
/*
- * signotify() has already set KEF_ASTPENDING and PS_NEEDSIGCHECK on
+ * signotify() has already set TDF_ASTPENDING and PS_NEEDSIGCHECK on
* this process, so all we need to do is poke it if it is currently
* executing so that it executes ast().
*/
@@ -169,7 +169,7 @@ forward_roundrobin(void)
id = pc->pc_cpumask;
if (id != PCPU_GET(cpumask) && (id & stopped_cpus) == 0 &&
td != pc->pc_idlethread) {
- td->td_kse->ke_flags |= KEF_NEEDRESCHED;
+ td->td_flags |= TDF_NEEDRESCHED;
map |= id;
}
}
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index 47e8472..7ffb968 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -73,7 +73,6 @@ userret(td, frame, oticks)
u_int oticks;
{
struct proc *p = td->td_proc;
- struct kse *ke = td->td_kse;
CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid,
p->p_comm);
@@ -83,7 +82,7 @@ userret(td, frame, oticks)
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
if (SIGPENDING(p) && ((p->p_sflag & PS_NEEDSIGCHK) == 0 ||
- (td->td_kse->ke_flags & KEF_ASTPENDING) == 0))
+ (td->td_flags & TDF_ASTPENDING) == 0))
printf("failed to set signal flags properly for ast()\n");
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
@@ -126,7 +125,7 @@ userret(td, frame, oticks)
mtx_lock_spin(&sched_lock);
ticks = td->td_sticks - oticks;
mtx_unlock_spin(&sched_lock);
- addupc_task(ke, TRAPF_PC(frame), (u_int)ticks * psratio);
+ addupc_task(td, TRAPF_PC(frame), (u_int)ticks * psratio);
}
}
@@ -176,16 +175,16 @@ ast(struct trapframe *framep)
mtx_lock_spin(&sched_lock);
ke = td->td_kse;
sticks = td->td_sticks;
- flags = ke->ke_flags;
+ flags = td->td_flags;
sflag = p->p_sflag;
p->p_sflag &= ~(PS_ALRMPEND | PS_NEEDSIGCHK | PS_PROFPEND | PS_XCPU);
#ifdef MAC
p->p_sflag &= ~PS_MACPEND;
#endif
- ke->ke_flags &= ~(KEF_ASTPENDING | KEF_NEEDRESCHED | KEF_OWEUPC);
+ td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDRESCHED | TDF_OWEUPC);
cnt.v_soft++;
prticks = 0;
- if (flags & KEF_OWEUPC && sflag & PS_PROFIL) {
+ if (flags & TDF_OWEUPC && sflag & PS_PROFIL) {
prticks = p->p_stats->p_prof.pr_ticks;
p->p_stats->p_prof.pr_ticks = 0;
}
@@ -200,8 +199,8 @@ ast(struct trapframe *framep)
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
- if (flags & KEF_OWEUPC && sflag & PS_PROFIL)
- addupc_task(ke, p->p_stats->p_prof.pr_addr, prticks);
+ if (flags & TDF_OWEUPC && sflag & PS_PROFIL)
+ addupc_task(td, p->p_stats->p_prof.pr_addr, prticks);
if (sflag & PS_ALRMPEND) {
PROC_LOCK(p);
psignal(p, SIGVTALRM);
@@ -240,7 +239,7 @@ ast(struct trapframe *framep)
if (sflag & PS_MACPEND)
mac_thread_userret(td);
#endif
- if (flags & KEF_NEEDRESCHED) {
+ if (flags & TDF_NEEDRESCHED) {
mtx_lock_spin(&sched_lock);
sched_prio(td, kg->kg_user_pri);
p->p_stats->p_ru.ru_nivcsw++;
OpenPOWER on IntegriCloud