summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2003-10-16 08:39:15 +0000
committerjeff <jeff@FreeBSD.org>2003-10-16 08:39:15 +0000
commit991febf6dd83fc12812470ce7a43503ff2b86f2c (patch)
tree4504a4128c016153ce48c99214c1d950dcd4159e /sys
parentbf29a9dd12c09202074e962807f15097f0ae53e3 (diff)
downloadFreeBSD-src-991febf6dd83fc12812470ce7a43503ff2b86f2c.zip
FreeBSD-src-991febf6dd83fc12812470ce7a43503ff2b86f2c.tar.gz
- Update the sched api. sched_{add,rem,clock,pctcpu} now all accept a td
argument rather than a kse.
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_clock.c6
-rw-r--r--sys/kern/kern_proc.c9
-rw-r--r--sys/kern/kern_switch.c18
-rw-r--r--sys/kern/sched_4bsd.c22
-rw-r--r--sys/kern/sched_ule.c21
-rw-r--r--sys/sys/sched.h8
6 files changed, 46 insertions, 38 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index d0558cd..c09e9d6 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -385,7 +385,6 @@ statclock(frame)
struct rusage *ru;
struct vmspace *vm;
struct thread *td;
- struct kse *ke;
struct proc *p;
long rss;
@@ -393,7 +392,6 @@ statclock(frame)
p = td->td_proc;
mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
- ke = td->td_kse;
if (CLKF_USERMODE(frame)) {
/*
* Charge the time as appropriate.
@@ -401,7 +399,7 @@ statclock(frame)
if (p->p_flag & P_SA)
thread_statclock(1);
p->p_uticks++;
- if (ke->ke_ksegrp->kg_nice > NZERO)
+ if (td->td_ksegrp->kg_nice > NZERO)
cp_time[CP_NICE]++;
else
cp_time[CP_USER]++;
@@ -433,7 +431,7 @@ statclock(frame)
}
}
- sched_clock(ke);
+ sched_clock(td);
/* Update resource usage integrals and maximums. */
if ((pstats = p->p_stats) != NULL &&
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 74bcea9..7becb69 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -763,16 +763,13 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
kp->ki_tdflags = td->td_flags;
kp->ki_pcb = td->td_pcb;
kp->ki_kstack = (void *)td->td_kstack;
+ kp->ki_pctcpu = sched_pctcpu(td);
/* Things in the kse */
-
- if (ke) {
+ if (ke)
kp->ki_rqindex = ke->ke_rqindex;
- kp->ki_pctcpu = sched_pctcpu(ke);
- } else {
+ else
kp->ki_rqindex = 0;
- kp->ki_pctcpu = 0;
- }
} else {
kp->ki_stat = SZOMB;
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 306ad63..33e27bf 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -210,7 +210,7 @@ kse_reassign(struct kse *ke)
kg->kg_last_assigned = td;
td->td_kse = ke;
ke->ke_thread = td;
- sched_add(ke);
+ sched_add(td);
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
return;
}
@@ -249,7 +249,7 @@ remrunqueue(struct thread *td)
*/
if ((td->td_proc->p_flag & P_SA) == 0) {
/* Bring its kse with it, leave the thread attached */
- sched_rem(ke);
+ sched_rem(td);
ke->ke_state = KES_THREAD;
return;
}
@@ -262,7 +262,7 @@ remrunqueue(struct thread *td)
* KSE to the next available thread. Then, we should
* see if we need to move the KSE in the run queues.
*/
- sched_rem(ke);
+ sched_rem(td);
ke->ke_state = KES_THREAD;
td2 = kg->kg_last_assigned;
KASSERT((td2 != NULL), ("last assigned has wrong value"));
@@ -294,8 +294,8 @@ adjustrunqueue( struct thread *td, int newpri)
/* We only care about the kse in the run queue. */
td->td_priority = newpri;
if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
- sched_rem(ke);
- sched_add(ke);
+ sched_rem(td);
+ sched_add(td);
}
return;
}
@@ -309,7 +309,7 @@ adjustrunqueue( struct thread *td, int newpri)
kg->kg_last_assigned =
TAILQ_PREV(td, threadqueue, td_runq);
}
- sched_rem(ke);
+ sched_rem(td);
}
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
td->td_priority = newpri;
@@ -337,7 +337,7 @@ setrunqueue(struct thread *td)
* and the KSE is always already attached.
* Totally ignore the ksegrp run queue.
*/
- sched_add(td->td_kse);
+ sched_add(td);
return;
}
@@ -360,7 +360,7 @@ setrunqueue(struct thread *td)
ke->ke_thread = NULL;
tda = kg->kg_last_assigned =
TAILQ_PREV(tda, threadqueue, td_runq);
- sched_rem(ke);
+ sched_rem(td);
}
} else {
/*
@@ -419,7 +419,7 @@ setrunqueue(struct thread *td)
td2->td_kse = ke;
ke->ke_thread = td2;
}
- sched_add(ke);
+ sched_add(ke->ke_thread);
}
}
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 67257bb..7f11c4b 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -441,14 +441,14 @@ sched_rr_interval(void)
* run much recently, and to round-robin among other processes.
*/
void
-sched_clock(struct kse *ke)
+sched_clock(struct thread *td)
{
struct ksegrp *kg;
- struct thread *td;
+ struct kse *ke;
mtx_assert(&sched_lock, MA_OWNED);
- kg = ke->ke_ksegrp;
- td = ke->ke_thread;
+ kg = td->td_ksegrp;
+ ke = td->td_kse;
ke->ke_sched->ske_cpticks++;
kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
@@ -620,8 +620,11 @@ sched_wakeup(struct thread *td)
}
void
-sched_add(struct kse *ke)
+sched_add(struct thread *td)
{
+ struct kse *ke;
+
+ ke = td->td_kse;
mtx_assert(&sched_lock, MA_OWNED);
KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
KASSERT((ke->ke_thread->td_kse != NULL),
@@ -638,8 +641,11 @@ sched_add(struct kse *ke)
}
void
-sched_rem(struct kse *ke)
+sched_rem(struct thread *td)
{
+ struct kse *ke;
+
+ ke = td->td_kse;
KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
("runq_remove: process swapped out"));
KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
@@ -714,7 +720,7 @@ sched_sizeof_thread(void)
}
fixpt_t
-sched_pctcpu(struct kse *ke)
+sched_pctcpu(struct thread *td)
{
- return (ke->ke_pctcpu);
+ return (td->td_kse->ke_pctcpu);
}
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 1f6a4d0..71646b6 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1011,11 +1011,11 @@ sched_exit_thread(struct thread *td, struct thread *child)
}
void
-sched_clock(struct kse *ke)
+sched_clock(struct thread *td)
{
struct kseq *kseq;
struct ksegrp *kg;
- struct thread *td;
+ struct kse *ke;
#if 0
struct kse *nke;
#endif
@@ -1036,7 +1036,7 @@ sched_clock(struct kse *ke)
tickincr = 1;
}
- td = ke->ke_thread;
+ ke = td->td_kse;
kg = ke->ke_ksegrp;
mtx_assert(&sched_lock, MA_OWNED);
@@ -1225,11 +1225,14 @@ retry:
}
void
-sched_add(struct kse *ke)
+sched_add(struct thread *td)
{
struct kseq *kseq;
struct ksegrp *kg;
+ struct kse *ke;
+ ke = td->td_kse;
+ kg = td->td_ksegrp;
mtx_assert(&sched_lock, MA_OWNED);
KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE"));
KASSERT((ke->ke_thread->td_kse != NULL),
@@ -1242,7 +1245,6 @@ sched_add(struct kse *ke)
KASSERT(ke->ke_runq == NULL,
("sched_add: KSE %p is still assigned to a run queue", ke));
- kg = ke->ke_ksegrp;
switch (PRI_BASE(kg->kg_pri_class)) {
case PRI_ITHD:
@@ -1283,9 +1285,12 @@ sched_add(struct kse *ke)
}
void
-sched_rem(struct kse *ke)
+sched_rem(struct thread *td)
{
struct kseq *kseq;
+ struct kse *ke;
+
+ ke = td->td_kse;
mtx_assert(&sched_lock, MA_OWNED);
KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
@@ -1298,11 +1303,13 @@ sched_rem(struct kse *ke)
}
fixpt_t
-sched_pctcpu(struct kse *ke)
+sched_pctcpu(struct thread *td)
{
fixpt_t pctcpu;
+ struct kse *ke;
pctcpu = 0;
+ ke = td->td_kse;
mtx_lock_spin(&sched_lock);
if (ke->ke_ticks) {
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 585ca5d..7fc9df2 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -66,17 +66,17 @@ void sched_wakeup(struct thread *td);
/*
* KSEs are moved on and off of run queues.
*/
-void sched_add(struct kse *ke);
+void sched_add(struct thread *td);
struct kse *sched_choose(void);
-void sched_clock(struct kse *ke);
+void sched_clock(struct thread *td);
void sched_exit_kse(struct kse *ke, struct kse *child);
void sched_fork_kse(struct kse *ke, struct kse *child);
-void sched_rem(struct kse *ke);
+void sched_rem(struct thread *td);
/*
* and they use up cpu time.
*/
-fixpt_t sched_pctcpu(struct kse *ke);
+fixpt_t sched_pctcpu(struct thread *td);
/*
* These procedures tell the process data structure allocation code how
OpenPOWER on IntegriCloud