summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_synch.c
diff options
context:
space:
mode:
authorpeter <peter@FreeBSD.org>2002-08-28 23:45:15 +0000
committerpeter <peter@FreeBSD.org>2002-08-28 23:45:15 +0000
commita2257589334d769f5e515ff0936ee5c13f1769d8 (patch)
tree3d7240b9022dd46250cd1b622c9e2722e683bca8 /sys/kern/kern_synch.c
parent7b726290f1e66d65654b382a03a1ce5fcc35cb8c (diff)
downloadFreeBSD-src-a2257589334d769f5e515ff0936ee5c13f1769d8.zip
FreeBSD-src-a2257589334d769f5e515ff0936ee5c13f1769d8.tar.gz
updatepri() works on a ksegrp (where the scheduling parameters are), so
directly give it the ksegrp instead of the thread. The only thing it used to use in the thread was the ksegrp. Reviewed by: julian
Diffstat (limited to 'sys/kern/kern_synch.c')
-rw-r--r--sys/kern/kern_synch.c35
1 files changed, 18 insertions, 17 deletions
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 1b03692..4fd3393 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -376,16 +376,11 @@ schedcpu(arg)
* least six times the loadfactor will decay p_estcpu to zero.
*/
void
-updatepri(td)
- register struct thread *td;
+updatepri(struct ksegrp *kg)
{
- register struct ksegrp *kg;
register unsigned int newcpu;
register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
- if (td == NULL)
- return;
- kg = td->td_ksegrp;
newcpu = kg->kg_estcpu;
if (kg->kg_slptime > 5 * loadfac)
kg->kg_estcpu = 0;
@@ -395,7 +390,7 @@ updatepri(td)
newcpu = decay_cpu(loadfac, newcpu);
kg->kg_estcpu = newcpu;
}
- resetpriority(td->td_ksegrp);
+ resetpriority(kg);
}
/*
@@ -705,6 +700,7 @@ wakeup(ident)
register struct slpquehead *qp;
register struct thread *td;
struct thread *ntd;
+ struct ksegrp *kg;
struct proc *p;
mtx_lock_spin(&sched_lock);
@@ -720,9 +716,10 @@ restart:
/* OPTIMIZED EXPANSION OF setrunnable(p); */
CTR3(KTR_PROC, "wakeup: thread %p (pid %d, %s)",
td, p->p_pid, p->p_comm);
- if (td->td_ksegrp->kg_slptime > 1)
- updatepri(td);
- td->td_ksegrp->kg_slptime = 0;
+ kg = td->td_ksegrp;
+ if (kg->kg_slptime > 1)
+ updatepri(kg);
+ kg->kg_slptime = 0;
if (p->p_sflag & PS_INMEM) {
setrunqueue(td);
maybe_resched(td);
@@ -754,6 +751,7 @@ wakeup_one(ident)
register struct thread *td;
register struct proc *p;
struct thread *ntd;
+ struct ksegrp *kg;
mtx_lock_spin(&sched_lock);
qp = &slpque[LOOKUP(ident)];
@@ -768,9 +766,10 @@ restart:
/* OPTIMIZED EXPANSION OF setrunnable(p); */
CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)",
td, p->p_pid, p->p_comm);
- if (td->td_ksegrp->kg_slptime > 1)
- updatepri(td);
- td->td_ksegrp->kg_slptime = 0;
+ kg = td->td_ksegrp;
+ if (kg->kg_slptime > 1)
+ updatepri(kg);
+ kg->kg_slptime = 0;
if (p->p_sflag & PS_INMEM) {
setrunqueue(td);
maybe_resched(td);
@@ -794,7 +793,7 @@ restart:
* The machine independent parts of mi_switch().
*/
void
-mi_switch()
+mi_switch(void)
{
struct bintime new_switchtime;
struct thread *td = curthread; /* XXX */
@@ -931,6 +930,7 @@ void
setrunnable(struct thread *td)
{
struct proc *p = td->td_proc;
+ struct ksegrp *kg;
mtx_assert(&sched_lock, MA_OWNED);
switch (p->p_state) {
@@ -960,9 +960,10 @@ setrunnable(struct thread *td)
case TDS_RUNQ: /* not yet had time to suspend */
break;
}
- if (td->td_ksegrp->kg_slptime > 1)
- updatepri(td);
- td->td_ksegrp->kg_slptime = 0;
+ kg = td->td_ksegrp;
+ if (kg->kg_slptime > 1)
+ updatepri(kg);
+ kg->kg_slptime = 0;
if ((p->p_sflag & PS_INMEM) == 0) {
td->td_state = TDS_SWAPPED;
if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
OpenPOWER on IntegriCloud