diff options
author | julian <julian@FreeBSD.org> | 2002-02-11 20:37:54 +0000 |
---|---|---|
committer | julian <julian@FreeBSD.org> | 2002-02-11 20:37:54 +0000 |
commit | 37369620df3d22440dcb4976ad061fe320a01bcb (patch) | |
tree | 91fc1230622927515e2c60360059682b64d84592 /sys/kern | |
parent | 72a803ac5ecbbbc7caf38fab112121c93f703b3f (diff) | |
download | FreeBSD-src-37369620df3d22440dcb4976ad061fe320a01bcb.zip FreeBSD-src-37369620df3d22440dcb4976ad061fe320a01bcb.tar.gz |
In a threaded world, differnt priorirites become properties of
different entities. Make it so.
Reviewed by: jhb@freebsd.org (john baldwin)
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/init_main.c | 8 | ||||
-rw-r--r-- | sys/kern/kern_condvar.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_intr.c | 10 | ||||
-rw-r--r-- | sys/kern/kern_mutex.c | 28 | ||||
-rw-r--r-- | sys/kern/kern_poll.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_proc.c | 12 | ||||
-rw-r--r-- | sys/kern/kern_resource.c | 32 | ||||
-rw-r--r-- | sys/kern/kern_sig.c | 7 | ||||
-rw-r--r-- | sys/kern/kern_subr.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_switch.c | 10 | ||||
-rw-r--r-- | sys/kern/kern_synch.c | 48 | ||||
-rw-r--r-- | sys/kern/ksched.c | 9 | ||||
-rw-r--r-- | sys/kern/subr_trap.c | 2 | ||||
-rw-r--r-- | sys/kern/subr_turnstile.c | 28 |
14 files changed, 101 insertions, 101 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index 3ebc140..2bbedb0 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -326,10 +326,10 @@ proc0_init(void *dummy __unused) p->p_sflag = PS_INMEM; p->p_stat = SRUN; p->p_ksegrp.kg_nice = NZERO; - p->p_ksegrp.kg_pri.pri_class = PRI_TIMESHARE; - p->p_ksegrp.kg_pri.pri_level = PVM; - p->p_ksegrp.kg_pri.pri_native = PUSER; - p->p_ksegrp.kg_pri.pri_user = PUSER; + kg->kg_pri_class = PRI_TIMESHARE; + kg->kg_user_pri = PUSER; + td->td_priority = PVM; + td->td_base_pri = PUSER; p->p_peers = 0; p->p_leader = p; diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c index fccd59b..fe88c55 100644 --- a/sys/kern/kern_condvar.c +++ b/sys/kern/kern_condvar.c @@ -177,7 +177,7 @@ cv_waitq_add(struct cv *cvp, struct thread *td) td->td_wmesg = cvp->cv_description; td->td_kse->ke_slptime = 0; /* XXXKSE */ td->td_ksegrp->kg_slptime = 0; /* XXXKSE */ - td->td_ksegrp->kg_pri.pri_native = td->td_ksegrp->kg_pri.pri_level; + td->td_base_pri = td->td_priority; CTR3(KTR_PROC, "cv_waitq_add: thread %p (pid %d, %s)", td, td->td_proc->p_pid, td->td_proc->p_comm); TAILQ_INSERT_TAIL(&cvp->cv_waitq, td, td_slpq); @@ -487,7 +487,7 @@ cv_wakeup(struct cv *cvp) td->td_proc->p_stat = SRUN; if (td->td_proc->p_sflag & PS_INMEM) { setrunqueue(td); - maybe_resched(td->td_ksegrp); + maybe_resched(td); } else { td->td_proc->p_sflag |= PS_SWAPINREQ; wakeup(&proc0); /* XXXKSE */ diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c index 890b92a..3a8bc8a 100644 --- a/sys/kern/kern_intr.c +++ b/sys/kern/kern_intr.c @@ -131,14 +131,14 @@ ithread_update(struct ithd *ithd) strncpy(p->p_comm, ithd->it_name, sizeof(ithd->it_name)); ih = TAILQ_FIRST(&ithd->it_handlers); if (ih == NULL) { - td->td_ksegrp->kg_pri.pri_level = PRI_MAX_ITHD; + td->td_priority = PRI_MAX_ITHD; ithd->it_flags &= ~IT_ENTROPY; return; } entropy = 0; - td->td_ksegrp->kg_pri.pri_level = ih->ih_pri; - td->td_ksegrp->kg_pri.pri_native = ih->ih_pri; + td->td_priority = ih->ih_pri; + td->td_base_pri = ih->ih_pri; TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) { if (strlen(p->p_comm) + strlen(ih->ih_name) + 1 < sizeof(p->p_comm)) { @@ -198,8 +198,8 @@ ithread_create(struct ithd **ithread, int vector, int flags, return (error); } td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ - td->td_ksegrp->kg_pri.pri_class = PRI_ITHD; - td->td_ksegrp->kg_pri.pri_level = PRI_MAX_ITHD; + td->td_ksegrp->kg_pri_class = PRI_ITHD; + td->td_priority = PRI_MAX_ITHD; p->p_stat = SWAIT; ithd->it_td = td; td->td_ithd = ithd; diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index d5e5f93..a8abdb4 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -67,8 +67,6 @@ #define mtx_owner(m) (mtx_unowned((m)) ? NULL \ : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) -#define SET_PRIO(td, pri) (td)->td_ksegrp->kg_pri.pri_level = (pri) - /* * Lock classes for sleep and spin mutexes. */ @@ -90,7 +88,7 @@ static void propagate_priority(struct thread *td) { struct ksegrp *kg = td->td_ksegrp; - int pri = kg->kg_pri.pri_level; + int pri = td->td_priority; struct mtx *m = td->td_blocked; mtx_assert(&sched_lock, MA_OWNED); @@ -112,13 +110,13 @@ propagate_priority(struct thread *td) MPASS(td->td_proc->p_magic == P_MAGIC); KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex")); - if (kg->kg_pri.pri_level <= pri) /* lower is higher priority */ + if (td->td_priority <= pri) /* lower is higher priority */ return; /* * Bump this thread's priority. */ - SET_PRIO(td, pri); + td->td_priority = pri; /* * If lock holder is actually running, just bump priority. @@ -174,7 +172,7 @@ propagate_priority(struct thread *td) } td1 = TAILQ_PREV(td, threadqueue, td_blkq); - if (td1->td_ksegrp->kg_pri.pri_level <= pri) { + if (td1->td_priority <= pri) { continue; } @@ -188,7 +186,7 @@ propagate_priority(struct thread *td) TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq); TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) { MPASS(td1->td_proc->p_magic == P_MAGIC); - if (td1->td_ksegrp->kg_pri.pri_level > pri) + if (td1->td_priority > pri) break; } @@ -327,8 +325,8 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) MPASS(td1 != NULL); m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; - if (td1->td_ksegrp->kg_pri.pri_level < kg->kg_pri.pri_level) - SET_PRIO(td, td1->td_ksegrp->kg_pri.pri_level); + if (td1->td_priority < td->td_priority) + td->td_priority = td1->td_priority; mtx_unlock_spin(&sched_lock); return; } @@ -377,7 +375,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); } else { TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) - if (td1->td_ksegrp->kg_pri.pri_level > kg->kg_pri.pri_level) + if (td1->td_priority > td->td_priority) break; if (td1) TAILQ_INSERT_BEFORE(td1, td, td_blkq); @@ -499,14 +497,14 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) pri = PRI_MAX; LIST_FOREACH(m1, &td->td_contested, mtx_contested) { - int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_ksegrp->kg_pri.pri_level; + int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority; if (cp < pri) pri = cp; } - if (pri > kg->kg_pri.pri_native) - pri = kg->kg_pri.pri_native; - SET_PRIO(td, pri); + if (pri > td->td_base_pri) + pri = td->td_base_pri; + td->td_priority = pri; if (LOCK_LOG_TEST(&m->mtx_object, opts)) CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", @@ -516,7 +514,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) td1->td_proc->p_stat = SRUN; setrunqueue(td1); - if (td->td_critnest == 1 && td1->td_ksegrp->kg_pri.pri_level < pri) { + if (td->td_critnest == 1 && td1->td_priority < pri) { #ifdef notyet if (td->td_ithd != NULL) { struct ithd *it = td->td_ithd; diff --git a/sys/kern/kern_poll.c b/sys/kern/kern_poll.c index 5a2051f..7a45f74 100644 --- a/sys/kern/kern_poll.c +++ b/sys/kern/kern_poll.c @@ -446,7 +446,7 @@ poll_idle(void) rtp.type = RTP_PRIO_IDLE; mtx_lock_spin(&sched_lock); rtp_to_pri(&rtp, &td->td_ksegrp->kg_pri); - pri = td->td_ksegrp->kg_pri.pri_level; + pri = td->td_priority; mtx_unlock_spin(&sched_lock); for (;;) { diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index 54caef2..e9b1b4c 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -532,7 +532,6 @@ fill_kinfo_proc(p, kp) kp->ki_dsize = vm->vm_dsize; kp->ki_ssize = vm->vm_ssize; } - td = FIRST_THREAD_IN_PROC(p); if ((p->p_sflag & PS_INMEM) && p->p_stats) { kp->ki_start = p->p_stats->p_start; kp->ki_rusage = p->p_stats->p_ru; @@ -556,11 +555,14 @@ fill_kinfo_proc(p, kp) /* vvv XXXKSE */ kp->ki_runtime = p->p_runtime; kp->ki_pctcpu = p->p_kse.ke_pctcpu; - kp->ki_estcpu = p->p_ksegrp.kg_estcpu; - kp->ki_slptime = p->p_ksegrp.kg_slptime; + kp->ki_estcpu = td->td_ksegrp->kg_estcpu; + kp->ki_slptime = td->td_ksegrp->kg_slptime; kp->ki_wchan = td->td_wchan; - kp->ki_pri = p->p_ksegrp.kg_pri; - kp->ki_nice = p->p_ksegrp.kg_nice; + kp->ki_pri.pri_level = td->td_priority; + kp->ki_pri.pri_user = td->td_ksegrp->kg_user_pri; + kp->ki_pri.pri_class = td->td_ksegrp->kg_pri_class; + kp->ki_pri.pri_native = td->td_base_pri; + kp->ki_nice = td->td_ksegrp->kg_nice; kp->ki_rqindex = p->p_kse.ke_rqindex; kp->ki_oncpu = p->p_kse.ke_oncpu; kp->ki_lastcpu = td->td_lastcpu; diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c index a037de8..fb6914f 100644 --- a/sys/kern/kern_resource.c +++ b/sys/kern/kern_resource.c @@ -289,7 +289,7 @@ rtprio(td, uap) if ((error = p_cansee(curp, p))) break; mtx_lock_spin(&sched_lock); - pri_to_rtp(&p->p_ksegrp.kg_pri /* XXXKSE */ , &rtp); + pri_to_rtp(&p->p_ksegrp /* XXXKSE */ , &rtp); mtx_unlock_spin(&sched_lock); error = copyout(&rtp, uap->rtp, sizeof(struct rtprio)); break; @@ -321,7 +321,7 @@ rtprio(td, uap) } } mtx_lock_spin(&sched_lock); - error = rtp_to_pri(&rtp, &p->p_ksegrp.kg_pri); + error = rtp_to_pri(&rtp, &p->p_ksegrp); mtx_unlock_spin(&sched_lock); break; default: @@ -335,48 +335,50 @@ done2: } int -rtp_to_pri(struct rtprio *rtp, struct priority *pri) +rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg) { if (rtp->prio > RTP_PRIO_MAX) return (EINVAL); switch (RTP_PRIO_BASE(rtp->type)) { case RTP_PRIO_REALTIME: - pri->pri_level = PRI_MIN_REALTIME + rtp->prio; + kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio; break; case RTP_PRIO_NORMAL: - pri->pri_level = PRI_MIN_TIMESHARE + rtp->prio; + kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio; break; case RTP_PRIO_IDLE: - pri->pri_level = PRI_MIN_IDLE + rtp->prio; + kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio; break; default: return (EINVAL); } - pri->pri_class = rtp->type; - pri->pri_native = pri->pri_level; - pri->pri_user = pri->pri_level; + kg->kg_pri_class = rtp->type; + if (curthread->td_ksegrp == kg) { + curthread->td_base_pri = kg->kg_user_pri; + curthread->td_priority = kg->kg_user_pri; /* XXX dubious */ + } return (0); } void -pri_to_rtp(struct priority *pri, struct rtprio *rtp) +pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp) { - switch (PRI_BASE(pri->pri_class)) { + switch (PRI_BASE(kg->kg_pri_class)) { case PRI_REALTIME: - rtp->prio = pri->pri_level - PRI_MIN_REALTIME; + rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME; break; case PRI_TIMESHARE: - rtp->prio = pri->pri_level - PRI_MIN_TIMESHARE; + rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE; break; case PRI_IDLE: - rtp->prio = pri->pri_level - PRI_MIN_IDLE; + rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE; break; default: break; } - rtp->type = pri->pri_class; + rtp->type = kg->kg_pri_class; } #if defined(COMPAT_43) || defined(COMPAT_SUNOS) diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index 2d56a10..379c017 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -1492,10 +1492,9 @@ runfast: * Maybe just one would be enough? */ mtx_lock_spin(&sched_lock); - FOREACH_KSEGRP_IN_PROC(p, kg) { - if (kg->kg_pri.pri_level > PUSER) { - kg->kg_pri.pri_level = PUSER; - } + + if (FIRST_THREAD_IN_PROC(p)->td_priority > PUSER) { + FIRST_THREAD_IN_PROC(p)->td_priority = PUSER; } run: /* If we jump here, sched_lock has to be owned. */ diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c index 250077c..9f2ede2 100644 --- a/sys/kern/kern_subr.c +++ b/sys/kern/kern_subr.c @@ -388,7 +388,7 @@ uio_yield() td = curthread; mtx_lock_spin(&sched_lock); DROP_GIANT(); - td->td_ksegrp->kg_pri.pri_level = td->td_ksegrp->kg_pri.pri_user; + td->td_priority = td->td_ksegrp->kg_user_pri; /* XXXKSE */ setrunqueue(td); td->td_proc->p_stats->p_ru.ru_nivcsw++; mi_switch(); diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c index 1a2afa4..bd9c4bf 100644 --- a/sys/kern/kern_switch.c +++ b/sys/kern/kern_switch.c @@ -181,7 +181,6 @@ runq_add(struct runq *rq, struct kse *ke) struct rqhead *rqh; int pri; - struct ksegrp *kg = ke->ke_ksegrp; #ifdef INVARIANTS struct proc *p = ke->ke_proc; #endif @@ -192,12 +191,12 @@ runq_add(struct runq *rq, struct kse *ke) p, p->p_comm)); KASSERT(runq_find(rq, ke) == 0, ("runq_add: proc %p (%s) already in run queue", ke, p->p_comm)); - pri = kg->kg_pri.pri_level / RQ_PPQ; + pri = ke->ke_thread->td_priority / RQ_PPQ; ke->ke_rqindex = pri; runq_setbit(rq, pri); rqh = &rq->rq_queues[pri]; CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p", - ke->ke_proc, kg->kg_pri.pri_level, pri, rqh); + ke->ke_proc, ke->ke_thread->td_priority, pri, rqh); TAILQ_INSERT_TAIL(rqh, ke, ke_procq); ke->ke_flags |= KEF_ONRUNQ; } @@ -279,9 +278,6 @@ runq_init(struct runq *rq) void runq_remove(struct runq *rq, struct kse *ke) { -#ifdef KTR - struct ksegrp *kg = ke->ke_ksegrp; -#endif struct rqhead *rqh; int pri; @@ -291,7 +287,7 @@ runq_remove(struct runq *rq, struct kse *ke) pri = ke->ke_rqindex; rqh = &rq->rq_queues[pri]; CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p", - ke, kg->kg_pri.pri_level, pri, rqh); + ke, ke->ke_thread->td_priority, pri, rqh); KASSERT(ke != NULL, ("runq_remove: no proc on busy queue")); TAILQ_REMOVE(rqh, ke, ke_procq); if (TAILQ_EMPTY(rqh)) { diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 9f3ba01..10b6fe4 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -120,12 +120,11 @@ SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, * schedulers into account. */ void -maybe_resched(kg) - struct ksegrp *kg; +maybe_resched(struct thread *td) { mtx_assert(&sched_lock, MA_OWNED); - if (kg->kg_pri.pri_level < curthread->td_ksegrp->kg_pri.pri_level) + if (td->td_priority < curthread->td_priority) curthread->td_kse->ke_flags |= KEF_NEEDRESCHED; } @@ -257,10 +256,11 @@ schedcpu(arg) void *arg; { register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); - register struct proc *p; - register struct kse *ke; - register struct ksegrp *kg; - register int realstathz; + struct thread *td; + struct proc *p; + struct kse *ke; + struct ksegrp *kg; + int realstathz; int awake; realstathz = stathz ? stathz : hz; @@ -321,15 +321,16 @@ schedcpu(arg) } kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); resetpriority(kg); - if (kg->kg_pri.pri_level >= PUSER && + td = FIRST_THREAD_IN_PROC(p); + if (td->td_priority >= PUSER && (p->p_sflag & PS_INMEM)) { int changedqueue = - ((kg->kg_pri.pri_level / RQ_PPQ) != - (kg->kg_pri.pri_user / RQ_PPQ)); + ((td->td_priority / RQ_PPQ) != + (kg->kg_user_pri / RQ_PPQ)); - kg->kg_pri.pri_level = kg->kg_pri.pri_user; + td->td_priority = kg->kg_user_pri; FOREACH_KSE_IN_GROUP(kg, ke) { - if ((ke->ke_oncpu == NOCPU) && /* idle */ + if ((ke->ke_oncpu == NOCPU) && (p->p_stat == SRUN) && /* XXXKSE */ changedqueue) { remrunqueue(ke->ke_thread); @@ -459,7 +460,7 @@ msleep(ident, mtx, priority, wmesg, timo) td->td_wmesg = wmesg; td->td_kse->ke_slptime = 0; /* XXXKSE */ td->td_ksegrp->kg_slptime = 0; - td->td_ksegrp->kg_pri.pri_level = priority & PRIMASK; + td->td_priority = priority & PRIMASK; CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)", td, p->p_pid, p->p_comm, wmesg, ident); TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq); @@ -628,7 +629,7 @@ restart: td->td_proc->p_stat = SRUN; if (p->p_sflag & PS_INMEM) { setrunqueue(td); - maybe_resched(td->td_ksegrp); + maybe_resched(td); } else { p->p_sflag |= PS_SWAPINREQ; wakeup((caddr_t)&proc0); @@ -673,7 +674,7 @@ wakeup_one(ident) td->td_proc->p_stat = SRUN; if (p->p_sflag & PS_INMEM) { setrunqueue(td); - maybe_resched(td->td_ksegrp); + maybe_resched(td); break; } else { p->p_sflag |= PS_SWAPINREQ; @@ -829,7 +830,7 @@ setrunnable(struct thread *td) wakeup((caddr_t)&proc0); } else { setrunqueue(td); - maybe_resched(td->td_ksegrp); + maybe_resched(td); } mtx_unlock_spin(&sched_lock); } @@ -844,16 +845,19 @@ resetpriority(kg) register struct ksegrp *kg; { register unsigned int newpriority; + struct thread *td; mtx_lock_spin(&sched_lock); - if (kg->kg_pri.pri_class == PRI_TIMESHARE) { + if (kg->kg_pri_class == PRI_TIMESHARE) { newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + NICE_WEIGHT * (kg->kg_nice - PRIO_MIN); newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), PRI_MAX_TIMESHARE); - kg->kg_pri.pri_user = newpriority; + kg->kg_user_pri = newpriority; + } + FOREACH_THREAD_IN_GROUP(kg, td) { + maybe_resched(td); } - maybe_resched(kg); mtx_unlock_spin(&sched_lock); } @@ -943,8 +947,8 @@ schedclock(td) kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { resetpriority(td->td_ksegrp); - if (kg->kg_pri.pri_level >= PUSER) - kg->kg_pri.pri_level = kg->kg_pri.pri_user; + if (td->td_priority >= PUSER) + td->td_priority = kg->kg_user_pri; } } else { panic("schedclock"); @@ -961,7 +965,7 @@ yield(struct thread *td, struct yield_args *uap) mtx_assert(&Giant, MA_NOTOWNED); mtx_lock_spin(&sched_lock); - kg->kg_pri.pri_level = PRI_MAX_TIMESHARE; + td->td_priority = PRI_MAX_TIMESHARE; setrunqueue(td); kg->kg_proc->p_stats->p_ru.ru_nvcsw++; mi_switch(); diff --git a/sys/kern/ksched.c b/sys/kern/ksched.c index 6ad2a07..c9081c3 100644 --- a/sys/kern/ksched.c +++ b/sys/kern/ksched.c @@ -100,7 +100,7 @@ getscheduler(register_t *ret, struct ksched *ksched, struct thread *td) int e = 0; mtx_lock_spin(&sched_lock); - pri_to_rtp(&td->td_ksegrp->kg_pri, &rtp); + pri_to_rtp(td->td_ksegrp, &rtp); mtx_unlock_spin(&sched_lock); switch (rtp.type) { @@ -145,7 +145,7 @@ int ksched_getparam(register_t *ret, struct ksched *ksched, struct rtprio rtp; mtx_lock_spin(&sched_lock); - pri_to_rtp(&td->td_ksegrp->kg_pri, &rtp); + pri_to_rtp(td->td_ksegrp, &rtp); mtx_unlock_spin(&sched_lock); if (RTP_PRIO_IS_REALTIME(rtp.type)) param->sched_priority = rtpprio_to_p4prio(rtp.prio); @@ -165,6 +165,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched, { int e = 0; struct rtprio rtp; + struct ksegrp *kg = td->td_ksegrp; switch(policy) { @@ -179,7 +180,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched, ? RTP_PRIO_FIFO : RTP_PRIO_REALTIME; mtx_lock_spin(&sched_lock); - rtp_to_pri(&rtp, &td->td_ksegrp->kg_pri); + rtp_to_pri(&rtp, kg); td->td_last_kse->ke_flags |= KEF_NEEDRESCHED; /* XXXKSE */ mtx_unlock_spin(&sched_lock); } @@ -194,7 +195,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched, rtp.type = RTP_PRIO_NORMAL; rtp.prio = p4prio_to_rtpprio(param->sched_priority); mtx_lock_spin(&sched_lock); - rtp_to_pri(&rtp, &td->td_ksegrp->kg_pri); + rtp_to_pri(&rtp, kg); /* XXX Simply revert to whatever we had for last * normal scheduler priorities. diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c index 722ef92..9c24e17 100644 --- a/sys/kern/subr_trap.c +++ b/sys/kern/subr_trap.c @@ -80,7 +80,7 @@ userret(td, frame, oticks) mtx_unlock(&Giant); mtx_lock_spin(&sched_lock); - kg->kg_pri.pri_level = kg->kg_pri.pri_user; + td->td_priority = kg->kg_user_pri; if (ke->ke_flags & KEF_NEEDRESCHED) { DROP_GIANT(); setrunqueue(td); diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c index d5e5f93..a8abdb4 100644 --- a/sys/kern/subr_turnstile.c +++ b/sys/kern/subr_turnstile.c @@ -67,8 +67,6 @@ #define mtx_owner(m) (mtx_unowned((m)) ? NULL \ : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) -#define SET_PRIO(td, pri) (td)->td_ksegrp->kg_pri.pri_level = (pri) - /* * Lock classes for sleep and spin mutexes. */ @@ -90,7 +88,7 @@ static void propagate_priority(struct thread *td) { struct ksegrp *kg = td->td_ksegrp; - int pri = kg->kg_pri.pri_level; + int pri = td->td_priority; struct mtx *m = td->td_blocked; mtx_assert(&sched_lock, MA_OWNED); @@ -112,13 +110,13 @@ propagate_priority(struct thread *td) MPASS(td->td_proc->p_magic == P_MAGIC); KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex")); - if (kg->kg_pri.pri_level <= pri) /* lower is higher priority */ + if (td->td_priority <= pri) /* lower is higher priority */ return; /* * Bump this thread's priority. */ - SET_PRIO(td, pri); + td->td_priority = pri; /* * If lock holder is actually running, just bump priority. @@ -174,7 +172,7 @@ propagate_priority(struct thread *td) } td1 = TAILQ_PREV(td, threadqueue, td_blkq); - if (td1->td_ksegrp->kg_pri.pri_level <= pri) { + if (td1->td_priority <= pri) { continue; } @@ -188,7 +186,7 @@ propagate_priority(struct thread *td) TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq); TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) { MPASS(td1->td_proc->p_magic == P_MAGIC); - if (td1->td_ksegrp->kg_pri.pri_level > pri) + if (td1->td_priority > pri) break; } @@ -327,8 +325,8 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) MPASS(td1 != NULL); m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; - if (td1->td_ksegrp->kg_pri.pri_level < kg->kg_pri.pri_level) - SET_PRIO(td, td1->td_ksegrp->kg_pri.pri_level); + if (td1->td_priority < td->td_priority) + td->td_priority = td1->td_priority; mtx_unlock_spin(&sched_lock); return; } @@ -377,7 +375,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); } else { TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) - if (td1->td_ksegrp->kg_pri.pri_level > kg->kg_pri.pri_level) + if (td1->td_priority > td->td_priority) break; if (td1) TAILQ_INSERT_BEFORE(td1, td, td_blkq); @@ -499,14 +497,14 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) pri = PRI_MAX; LIST_FOREACH(m1, &td->td_contested, mtx_contested) { - int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_ksegrp->kg_pri.pri_level; + int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority; if (cp < pri) pri = cp; } - if (pri > kg->kg_pri.pri_native) - pri = kg->kg_pri.pri_native; - SET_PRIO(td, pri); + if (pri > td->td_base_pri) + pri = td->td_base_pri; + td->td_priority = pri; if (LOCK_LOG_TEST(&m->mtx_object, opts)) CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", @@ -516,7 +514,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) td1->td_proc->p_stat = SRUN; setrunqueue(td1); - if (td->td_critnest == 1 && td1->td_ksegrp->kg_pri.pri_level < pri) { + if (td->td_critnest == 1 && td1->td_priority < pri) { #ifdef notyet if (td->td_ithd != NULL) { struct ithd *it = td->td_ithd; |