summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/sched_core.c755
1 files changed, 334 insertions, 421 deletions
diff --git a/sys/kern/sched_core.c b/sys/kern/sched_core.c
index b2de346..97cd474 100644
--- a/sys/kern/sched_core.c
+++ b/sys/kern/sched_core.c
@@ -30,8 +30,6 @@ __FBSDID("$FreeBSD$");
#include "opt_hwpmc_hooks.h"
#include "opt_sched.h"
-#define kse td_sched
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kdb.h>
@@ -98,8 +96,8 @@ __FBSDID("$FreeBSD$");
#define NS_MAX_SLEEP_TIME (HZ_TO_NS(MAX_SLEEP_TIME))
#define STARVATION_TIME (MAX_SLEEP_TIME)
-#define CURRENT_SCORE(kg) \
- (MAX_SCORE * NS_TO_HZ((kg)->kg_slptime) / MAX_SLEEP_TIME)
+#define CURRENT_SCORE(ts) \
+ (MAX_SCORE * NS_TO_HZ((ts)->ts_slptime) / MAX_SLEEP_TIME)
#define SCALE_USER_PRI(x, upri) \
MAX(x * (upri + 1) / (MAX_USER_PRI/2), min_timeslice)
@@ -114,21 +112,21 @@ __FBSDID("$FreeBSD$");
* Calculate a score which a thread must have to prove itself is
* an interactive thread.
*/
-#define INTERACTIVE_SCORE(ke) \
- (PROC_NICE((ke)->ke_proc) * MAX_SCORE / 40 + INTERACTIVE_BASE_SCORE)
+#define INTERACTIVE_SCORE(ts) \
+ (PROC_NICE((ts)->ts_proc) * MAX_SCORE / 40 + INTERACTIVE_BASE_SCORE)
/* Test if a thread is an interactive thread */
-#define THREAD_IS_INTERACTIVE(ke) \
- ((ke)->ke_ksegrp->kg_user_pri <= \
- PROC_PRI((ke)->ke_proc) - INTERACTIVE_SCORE(ke))
+#define THREAD_IS_INTERACTIVE(ts) \
+ ((ts)->ts_thread->td_user_pri <= \
+ PROC_PRI((ts)->ts_proc) - INTERACTIVE_SCORE(ts))
/*
* Calculate how long a thread must sleep to prove itself is an
* interactive sleep.
*/
-#define INTERACTIVE_SLEEP_TIME(ke) \
+#define INTERACTIVE_SLEEP_TIME(ts) \
(HZ_TO_NS(MAX_SLEEP_TIME * \
- (MAX_SCORE / 2 + INTERACTIVE_SCORE((ke)) + 1) / MAX_SCORE - 1))
+ (MAX_SCORE / 2 + INTERACTIVE_SCORE((ts)) + 1) / MAX_SCORE - 1))
#define CHILD_WEIGHT 90
#define PARENT_WEIGHT 90
@@ -158,7 +156,7 @@ typedef u_int32_t kqb_word_t;
/*
* Head of run queues.
*/
-TAILQ_HEAD(krqhead, kse);
+TAILQ_HEAD(krqhead, td_sched);
/*
* Bit array which maintains the status of a run queue. When a queue is
@@ -185,63 +183,48 @@ struct krunq {
* The schedulable entity that can be given a context to run. A process may
* have several of these.
*/
-struct kse {
- struct thread *ke_thread; /* (*) Active associated thread. */
- TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */
- int ke_flags; /* (j) KEF_* flags. */
- fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
- u_char ke_rqindex; /* (j) Run queue index. */
+struct td_sched {
+ struct thread *ts_thread; /* (*) Active associated thread. */
+ TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */
+ int ts_flags; /* (j) TSF_* flags. */
+ fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
+ u_char ts_rqindex; /* (j) Run queue index. */
enum {
- KES_THREAD = 0x0, /* slaved to thread state */
- KES_ONRUNQ
- } ke_state; /* (j) thread sched specific status. */
- int ke_slice; /* Time slice in ticks */
- struct kseq *ke_kseq; /* Kseq the thread belongs to */
- struct krunq *ke_runq; /* Assiociated runqueue */
+ TSS_THREAD = 0x0, /* slaved to thread state */
+ TSS_ONRUNQ
+ } ts_state; /* (j) thread sched specific status. */
+ int ts_slice; /* Time slice in ticks */
+ struct kseq *ts_kseq; /* Kseq the thread belongs to */
+ struct krunq *ts_runq; /* Assiociated runqueue */
#ifdef SMP
- int ke_cpu; /* CPU that we have affinity for. */
- int ke_wakeup_cpu; /* CPU that has activated us. */
+ int ts_cpu; /* CPU that we have affinity for. */
+ int ts_wakeup_cpu; /* CPU that has activated us. */
#endif
- int ke_activated; /* How is the thread activated. */
- uint64_t ke_timestamp; /* Last timestamp dependent on state.*/
- unsigned ke_lastran; /* Last timestamp the thread ran. */
+ int ts_activated; /* How is the thread activated. */
+ uint64_t ts_timestamp; /* Last timestamp dependent on state.*/
+ unsigned ts_lastran; /* Last timestamp the thread ran. */
/* The following variables are only used for pctcpu calculation */
- int ke_ltick; /* Last tick that we were running on */
- int ke_ftick; /* First tick that we were running on */
- int ke_ticks; /* Tick count */
-};
+ int ts_ltick; /* Last tick that we were running on */
+ int ts_ftick; /* First tick that we were running on */
+ int ts_ticks; /* Tick count */
-#define td_kse td_sched
-#define ke_proc ke_thread->td_proc
-#define ke_ksegrp ke_thread->td_ksegrp
-
-/* flags kept in ke_flags */
-#define KEF_BOUND 0x0001 /* Thread can not migrate. */
-#define KEF_PREEMPTED 0x0002 /* Thread was preempted. */
-#define KEF_MIGRATING 0x0004 /* Thread is migrating. */
-#define KEF_SLEEP 0x0008 /* Thread did sleep. */
-#define KEF_DIDRUN 0x0010 /* Thread actually ran. */
-#define KEF_EXIT 0x0020 /* Thread is being killed. */
-#define KEF_NEXTRQ 0x0400 /* Thread should be in next queue. */
-#define KEF_FIRST_SLICE 0x0800 /* Thread has first time slice left. */
-
-struct kg_sched {
- struct thread *skg_last_assigned; /* (j) Last thread assigned to */
- /* the system scheduler */
- u_long skg_slptime; /* (j) Number of ticks we vol. slept */
- u_long skg_runtime; /* (j) Temp total run time. */
- int skg_avail_opennings; /* (j) Num unfilled slots in group.*/
- int skg_concurrency; /* (j) Num threads requested in group.*/
+ u_long ts_slptime; /* (j) Number of ticks we vol. slept */
+ u_long ts_runtime; /* (j) Temp total run time. */
};
-#define kg_last_assigned kg_sched->skg_last_assigned
-#define kg_avail_opennings kg_sched->skg_avail_opennings
-#define kg_concurrency kg_sched->skg_concurrency
-#define kg_slptime kg_sched->skg_slptime
-#define kg_runtime kg_sched->skg_runtime
-#define SLOT_RELEASE(kg) (kg)->kg_avail_opennings++
-#define SLOT_USE(kg) (kg)->kg_avail_opennings--
+#define td_sched td_sched
+#define ts_proc ts_thread->td_proc
+
+/* flags kept in ts_flags */
+#define TSF_BOUND 0x0001 /* Thread can not migrate. */
+#define TSF_PREEMPTED 0x0002 /* Thread was preempted. */
+#define TSF_MIGRATING 0x0004 /* Thread is migrating. */
+#define TSF_SLEEP 0x0008 /* Thread did sleep. */
+#define TSF_DIDRUN 0x0010 /* Thread actually ran. */
+#define TSF_EXIT 0x0020 /* Thread is being killed. */
+#define TSF_NEXTRQ 0x0400 /* Thread should be in next queue. */
+#define TSF_FIRST_SLICE 0x0800 /* Thread has first time slice left. */
/*
* Cpu percentage computation macros and defines.
@@ -267,8 +250,7 @@ struct kseq {
signed char ksq_expired_nice; /* Lowest nice in nextq */
};
-static struct kse kse0;
-static struct kg_sched kg_sched0;
+static struct td_sched kse0;
static int min_timeslice = 5;
static int def_timeslice = 100;
@@ -278,7 +260,7 @@ static int sched_tdcnt;
static struct kseq kseq_global;
/*
- * One kse queue per processor.
+ * One td_sched queue per processor.
*/
#ifdef SMP
static struct kseq kseq_cpu[MAXCPU];
@@ -353,33 +335,31 @@ SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
"account for htt");
#endif
-static void slot_fill(struct ksegrp *);
-
-static void krunq_add(struct krunq *, struct kse *);
-static struct kse *krunq_choose(struct krunq *);
+static void krunq_add(struct krunq *, struct td_sched *);
+static struct td_sched *krunq_choose(struct krunq *);
static void krunq_clrbit(struct krunq *rq, int pri);
static int krunq_findbit(struct krunq *rq);
static void krunq_init(struct krunq *);
-static void krunq_remove(struct krunq *, struct kse *);
+static void krunq_remove(struct krunq *, struct td_sched *);
-static struct kse * kseq_choose(struct kseq *);
-static void kseq_load_add(struct kseq *, struct kse *);
-static void kseq_load_rem(struct kseq *, struct kse *);
-static void kseq_runq_add(struct kseq *, struct kse *);
-static void kseq_runq_rem(struct kseq *, struct kse *);
+static struct td_sched * kseq_choose(struct kseq *);
+static void kseq_load_add(struct kseq *, struct td_sched *);
+static void kseq_load_rem(struct kseq *, struct td_sched *);
+static void kseq_runq_add(struct kseq *, struct td_sched *);
+static void kseq_runq_rem(struct kseq *, struct td_sched *);
static void kseq_setup(struct kseq *);
-static int sched_is_timeshare(struct ksegrp *kg);
-static struct kse *sched_choose(void);
-static int sched_calc_pri(struct ksegrp *kg);
-static int sched_starving(struct kseq *, unsigned, struct kse *);
-static void sched_pctcpu_update(struct kse *);
+static int sched_is_timeshare(struct thread *td);
+static struct td_sched *sched_choose(void);
+static int sched_calc_pri(struct td_sched *ts);
+static int sched_starving(struct kseq *, unsigned, struct td_sched *);
+static void sched_pctcpu_update(struct td_sched *);
static void sched_thread_priority(struct thread *, u_char);
static uint64_t sched_timestamp(void);
-static int sched_recalc_pri(struct kse *ke, uint64_t now);
-static int sched_timeslice(struct kse *ke);
-static void sched_update_runtime(struct kse *ke, uint64_t now);
-static void sched_commit_runtime(struct kse *ke);
+static int sched_recalc_pri(struct td_sched *ts, uint64_t now);
+static int sched_timeslice(struct td_sched *ts);
+static void sched_update_runtime(struct td_sched *ts, uint64_t now);
+static void sched_commit_runtime(struct td_sched *ts);
/*
* Initialize a run structure.
@@ -460,59 +440,59 @@ krunq_setbit(struct krunq *rq, int pri)
* corresponding status bit.
*/
static void
-krunq_add(struct krunq *rq, struct kse *ke)
+krunq_add(struct krunq *rq, struct td_sched *ts)
{
struct krqhead *rqh;
int pri;
- pri = ke->ke_thread->td_priority;
- ke->ke_rqindex = pri;
+ pri = ts->ts_thread->td_priority;
+ ts->ts_rqindex = pri;
krunq_setbit(rq, pri);
rqh = &rq->rq_queues[pri];
- if (ke->ke_flags & KEF_PREEMPTED)
- TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
+ if (ts->ts_flags & TSF_PREEMPTED)
+ TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
else
- TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
+ TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
}
/*
* Find the highest priority process on the run queue.
*/
-static struct kse *
+static struct td_sched *
krunq_choose(struct krunq *rq)
{
struct krqhead *rqh;
- struct kse *ke;
+ struct td_sched *ts;
int pri;
mtx_assert(&sched_lock, MA_OWNED);
if ((pri = krunq_findbit(rq)) != -1) {
rqh = &rq->rq_queues[pri];
- ke = TAILQ_FIRST(rqh);
- KASSERT(ke != NULL, ("krunq_choose: no thread on busy queue"));
+ ts = TAILQ_FIRST(rqh);
+ KASSERT(ts != NULL, ("krunq_choose: no thread on busy queue"));
#ifdef SMP
if (pri <= PRI_MAX_ITHD || runq_fuzz <= 0)
- return (ke);
+ return (ts);
/*
* In the first couple of entries, check if
* there is one for our CPU as a preference.
*/
- struct kse *ke2 = ke;
+ struct td_sched *ts2 = ts;
const int mycpu = PCPU_GET(cpuid);
const int mymask = 1 << mycpu;
int count = runq_fuzz;
- while (count-- && ke2) {
- const int cpu = ke2->ke_wakeup_cpu;
+ while (count-- && ts2) {
+ const int cpu = ts2->ts_wakeup_cpu;
if (cpu_sibling[cpu] & mymask) {
- ke = ke2;
+ ts = ts2;
break;
}
- ke2 = TAILQ_NEXT(ke2, ke_procq);
+ ts2 = TAILQ_NEXT(ts2, ts_procq);
}
#endif
- return (ke);
+ return (ts);
}
return (NULL);
@@ -521,77 +501,77 @@ krunq_choose(struct krunq *rq)
/*
* Remove the KSE from the queue specified by its priority, and clear the
* corresponding status bit if the queue becomes empty.
- * Caller must set ke->ke_state afterwards.
+ * Caller must set ts->ts_state afterwards.
*/
static void
-krunq_remove(struct krunq *rq, struct kse *ke)
+krunq_remove(struct krunq *rq, struct td_sched *ts)
{
struct krqhead *rqh;
int pri;
- KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
+ KASSERT(ts->ts_proc->p_sflag & PS_INMEM,
("runq_remove: process swapped out"));
- pri = ke->ke_rqindex;
+ pri = ts->ts_rqindex;
rqh = &rq->rq_queues[pri];
- KASSERT(ke != NULL, ("krunq_remove: no proc on busy queue"));
- TAILQ_REMOVE(rqh, ke, ke_procq);
+ KASSERT(ts != NULL, ("krunq_remove: no proc on busy queue"));
+ TAILQ_REMOVE(rqh, ts, ts_procq);
if (TAILQ_EMPTY(rqh))
krunq_clrbit(rq, pri);
}
static inline void
-kseq_runq_add(struct kseq *kseq, struct kse *ke)
+kseq_runq_add(struct kseq *kseq, struct td_sched *ts)
{
- krunq_add(ke->ke_runq, ke);
- ke->ke_kseq = kseq;
+ krunq_add(ts->ts_runq, ts);
+ ts->ts_kseq = kseq;
}
static inline void
-kseq_runq_rem(struct kseq *kseq, struct kse *ke)
+kseq_runq_rem(struct kseq *kseq, struct td_sched *ts)
{
- krunq_remove(ke->ke_runq, ke);
- ke->ke_kseq = NULL;
- ke->ke_runq = NULL;
+ krunq_remove(ts->ts_runq, ts);
+ ts->ts_kseq = NULL;
+ ts->ts_runq = NULL;
}
static inline void
-kseq_load_add(struct kseq *kseq, struct kse *ke)
+kseq_load_add(struct kseq *kseq, struct td_sched *ts)
{
kseq->ksq_load++;
- if ((ke->ke_proc->p_flag & P_NOLOAD) == 0)
+ if ((ts->ts_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt++;
}
static inline void
-kseq_load_rem(struct kseq *kseq, struct kse *ke)
+kseq_load_rem(struct kseq *kseq, struct td_sched *ts)
{
kseq->ksq_load--;
- if ((ke->ke_proc->p_flag & P_NOLOAD) == 0)
+ if ((ts->ts_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
}
/*
* Pick the highest priority task we have and return it.
*/
-static struct kse *
+static struct td_sched *
kseq_choose(struct kseq *kseq)
{
struct krunq *swap;
- struct kse *ke;
+ struct td_sched *ts;
mtx_assert(&sched_lock, MA_OWNED);
- ke = krunq_choose(kseq->ksq_curr);
- if (ke != NULL)
- return (ke);
+ ts = krunq_choose(kseq->ksq_curr);
+ if (ts != NULL)
+ return (ts);
kseq->ksq_expired_nice = PRIO_MAX + 1;
kseq->ksq_expired_tick = 0;
swap = kseq->ksq_curr;
kseq->ksq_curr = kseq->ksq_next;
kseq->ksq_next = swap;
- ke = krunq_choose(kseq->ksq_curr);
- if (ke != NULL)
- return (ke);
+ ts = krunq_choose(kseq->ksq_curr);
+ if (ts != NULL)
+ return (ts);
return krunq_choose(&kseq->ksq_idle);
}
@@ -604,50 +584,48 @@ sched_timestamp(void)
}
static inline int
-sched_timeslice(struct kse *ke)
+sched_timeslice(struct td_sched *ts)
{
- struct proc *p = ke->ke_proc;
+ struct proc *p = ts->ts_proc;
- if (ke->ke_proc->p_nice < 0)
+ if (ts->ts_proc->p_nice < 0)
return SCALE_USER_PRI(def_timeslice*4, PROC_USER_PRI(p));
else
return SCALE_USER_PRI(def_timeslice, PROC_USER_PRI(p));
}
static inline int
-sched_is_timeshare(struct ksegrp *kg)
+sched_is_timeshare(struct thread *td)
{
- return (kg->kg_pri_class == PRI_TIMESHARE);
+ return (td->td_pri_class == PRI_TIMESHARE);
}
static int
-sched_calc_pri(struct ksegrp *kg)
+sched_calc_pri(struct td_sched *ts)
{
int score, pri;
- if (sched_is_timeshare(kg)) {
- score = CURRENT_SCORE(kg) - MAX_SCORE / 2;
- pri = PROC_PRI(kg->kg_proc) - score;
+ if (sched_is_timeshare(ts->ts_thread)) {
+ score = CURRENT_SCORE(ts) - MAX_SCORE / 2;
+ pri = PROC_PRI(ts->ts_proc) - score;
if (pri < PUSER)
pri = PUSER;
else if (pri > PUSER_MAX)
pri = PUSER_MAX;
return (pri);
}
- return (kg->kg_base_user_pri);
+ return (ts->ts_thread->td_base_user_pri);
}
static int
-sched_recalc_pri(struct kse *ke, uint64_t now)
+sched_recalc_pri(struct td_sched *ts, uint64_t now)
{
uint64_t delta;
unsigned int sleep_time;
- struct ksegrp *kg;
- kg = ke->ke_ksegrp;
- delta = now - ke->ke_timestamp;
- if (__predict_false(!sched_is_timeshare(kg)))
- return (kg->kg_base_user_pri);
+ delta = now - ts->ts_timestamp;
+ if (__predict_false(!sched_is_timeshare(ts->ts_thread)))
+ return (ts->ts_thread->td_base_user_pri);
if (delta > NS_MAX_SLEEP_TIME)
sleep_time = NS_MAX_SLEEP_TIME;
@@ -656,23 +634,23 @@ sched_recalc_pri(struct kse *ke, uint64_t now)
if (__predict_false(sleep_time == 0))
goto out;
- if (ke->ke_activated != -1 &&
- sleep_time > INTERACTIVE_SLEEP_TIME(ke)) {
- kg->kg_slptime = HZ_TO_NS(MAX_SLEEP_TIME - def_timeslice);
+ if (ts->ts_activated != -1 &&
+ sleep_time > INTERACTIVE_SLEEP_TIME(ts)) {
+ ts->ts_slptime = HZ_TO_NS(MAX_SLEEP_TIME - def_timeslice);
} else {
- sleep_time *= (MAX_SCORE - CURRENT_SCORE(kg)) ? : 1;
+ sleep_time *= (MAX_SCORE - CURRENT_SCORE(ts)) ? : 1;
/*
* If thread is waking from uninterruptible sleep, it is
* unlikely an interactive sleep, limit its sleep time to
* prevent it from being an interactive thread.
*/
- if (ke->ke_activated == -1) {
- if (kg->kg_slptime >= INTERACTIVE_SLEEP_TIME(ke))
+ if (ts->ts_activated == -1) {
+ if (ts->ts_slptime >= INTERACTIVE_SLEEP_TIME(ts))
sleep_time = 0;
- else if (kg->kg_slptime + sleep_time >=
- INTERACTIVE_SLEEP_TIME(ke)) {
- kg->kg_slptime = INTERACTIVE_SLEEP_TIME(ke);
+ else if (ts->ts_slptime + sleep_time >=
+ INTERACTIVE_SLEEP_TIME(ts)) {
+ ts->ts_slptime = INTERACTIVE_SLEEP_TIME(ts);
sleep_time = 0;
}
}
@@ -680,47 +658,45 @@ sched_recalc_pri(struct kse *ke, uint64_t now)
/*
* Thread gets priority boost here.
*/
- kg->kg_slptime += sleep_time;
+ ts->ts_slptime += sleep_time;
/* Sleep time should never be larger than maximum */
- if (kg->kg_slptime > NS_MAX_SLEEP_TIME)
- kg->kg_slptime = NS_MAX_SLEEP_TIME;
+ if (ts->ts_slptime > NS_MAX_SLEEP_TIME)
+ ts->ts_slptime = NS_MAX_SLEEP_TIME;
}
out:
- return (sched_calc_pri(kg));
+ return (sched_calc_pri(ts));
}
static void
-sched_update_runtime(struct kse *ke, uint64_t now)
+sched_update_runtime(struct td_sched *ts, uint64_t now)
{
uint64_t runtime;
- struct ksegrp *kg = ke->ke_ksegrp;
- if (sched_is_timeshare(kg)) {
- if ((int64_t)(now - ke->ke_timestamp) < NS_MAX_SLEEP_TIME) {
- runtime = now - ke->ke_timestamp;
- if ((int64_t)(now - ke->ke_timestamp) < 0)
+ if (sched_is_timeshare(ts->ts_thread)) {
+ if ((int64_t)(now - ts->ts_timestamp) < NS_MAX_SLEEP_TIME) {
+ runtime = now - ts->ts_timestamp;
+ if ((int64_t)(now - ts->ts_timestamp) < 0)
runtime = 0;
} else {
runtime = NS_MAX_SLEEP_TIME;
}
- runtime /= (CURRENT_SCORE(kg) ? : 1);
- kg->kg_runtime += runtime;
- ke->ke_timestamp = now;
+ runtime /= (CURRENT_SCORE(ts) ? : 1);
+ ts->ts_runtime += runtime;
+ ts->ts_timestamp = now;
}
}
static void
-sched_commit_runtime(struct kse *ke)
+sched_commit_runtime(struct td_sched *ts)
{
- struct ksegrp *kg = ke->ke_ksegrp;
- if (kg->kg_runtime > kg->kg_slptime)
- kg->kg_slptime = 0;
+ if (ts->ts_runtime > ts->ts_slptime)
+ ts->ts_slptime = 0;
else
- kg->kg_slptime -= kg->kg_runtime;
- kg->kg_runtime = 0;
+ ts->ts_slptime -= ts->ts_runtime;
+ ts->ts_runtime = 0;
}
static void
@@ -812,13 +788,10 @@ schedinit(void)
* Set up the scheduler specific parts of proc0.
*/
proc0.p_sched = NULL; /* XXX */
- ksegrp0.kg_sched = &kg_sched0;
thread0.td_sched = &kse0;
- kse0.ke_thread = &thread0;
- kse0.ke_state = KES_THREAD;
- kse0.ke_slice = 100;
- kg_sched0.skg_concurrency = 1;
- kg_sched0.skg_avail_opennings = 0; /* we are already running */
+ kse0.ts_thread = &thread0;
+ kse0.ts_state = TSS_THREAD;
+ kse0.ts_slice = 100;
}
/*
@@ -833,32 +806,32 @@ sched_rr_interval(void)
}
static void
-sched_pctcpu_update(struct kse *ke)
+sched_pctcpu_update(struct td_sched *ts)
{
/*
* Adjust counters and watermark for pctcpu calc.
*/
- if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
+ if (ts->ts_ltick > ticks - SCHED_CPU_TICKS) {
/*
* Shift the tick count out so that the divide doesn't
* round away our results.
*/
- ke->ke_ticks <<= 10;
- ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
+ ts->ts_ticks <<= 10;
+ ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
SCHED_CPU_TICKS;
- ke->ke_ticks >>= 10;
+ ts->ts_ticks >>= 10;
} else
- ke->ke_ticks = 0;
- ke->ke_ltick = ticks;
- ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
+ ts->ts_ticks = 0;
+ ts->ts_ltick = ticks;
+ ts->ts_ftick = ts->ts_ltick - SCHED_CPU_TICKS;
}
static void
sched_thread_priority(struct thread *td, u_char prio)
{
- struct kse *ke;
+ struct td_sched *ts;
- ke = td->td_kse;
+ ts = td->td_sched;
mtx_assert(&sched_lock, MA_OWNED);
if (__predict_false(td->td_priority == prio))
return;
@@ -867,14 +840,14 @@ sched_thread_priority(struct thread *td, u_char prio)
/*
* If the priority has been elevated due to priority
* propagation, we may have to move ourselves to a new
- * queue. We still call adjustrunqueue below in case kse
+ * queue. We still call adjustrunqueue below in case td_sched
* needs to fix things up.
*/
- if (prio < td->td_priority && ke->ke_runq != NULL &&
- ke->ke_runq != ke->ke_kseq->ksq_curr) {
- krunq_remove(ke->ke_runq, ke);
- ke->ke_runq = ke->ke_kseq->ksq_curr;
- krunq_add(ke->ke_runq, ke);
+ if (prio < td->td_priority && ts->ts_runq != NULL &&
+ ts->ts_runq != ts->ts_kseq->ksq_curr) {
+ krunq_remove(ts->ts_runq, ts);
+ ts->ts_runq = ts->ts_kseq->ksq_curr;
+ krunq_add(ts->ts_runq, ts);
}
adjustrunqueue(td, prio);
} else
@@ -908,7 +881,7 @@ sched_unlend_prio(struct thread *td, u_char prio)
if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
td->td_base_pri <= PRI_MAX_TIMESHARE)
- base_pri = td->td_ksegrp->kg_user_pri;
+ base_pri = td->td_user_pri;
else
base_pri = td->td_base_pri;
if (prio >= base_pri) {
@@ -923,7 +896,7 @@ sched_prio(struct thread *td, u_char prio)
{
u_char oldprio;
- if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE)
+ if (td->td_pri_class == PRI_TIMESHARE)
prio = MIN(prio, PUSER_MAX);
/* First, update the base priority. */
@@ -949,26 +922,17 @@ sched_prio(struct thread *td, u_char prio)
}
void
-sched_user_prio(struct ksegrp *kg, u_char prio)
+sched_user_prio(struct thread *td, u_char prio)
{
- struct thread *td;
u_char oldprio;
- kg->kg_base_user_pri = prio;
-
- /* XXXKSE only for 1:1 */
-
- td = TAILQ_FIRST(&kg->kg_threads);
- if (td == NULL) {
- kg->kg_user_pri = prio;
- return;
- }
+ td->td_base_user_pri = prio;
- if (td->td_flags & TDF_UBORROWING && kg->kg_user_pri <= prio)
+ if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
return;
- oldprio = kg->kg_user_pri;
- kg->kg_user_pri = prio;
+ oldprio = td->td_user_pri;
+ td->td_user_pri = prio;
if (TD_ON_UPILOCK(td) && oldprio != prio)
umtx_pi_adjust(td, oldprio);
@@ -981,8 +945,8 @@ sched_lend_user_prio(struct thread *td, u_char prio)
td->td_flags |= TDF_UBORROWING;
- oldprio = td->td_ksegrp->kg_user_pri;
- td->td_ksegrp->kg_user_pri = prio;
+ oldprio = td->td_user_pri;
+ td->td_user_pri = prio;
if (TD_ON_UPILOCK(td) && oldprio != prio)
umtx_pi_adjust(td, oldprio);
@@ -991,13 +955,12 @@ sched_lend_user_prio(struct thread *td, u_char prio)
void
sched_unlend_user_prio(struct thread *td, u_char prio)
{
- struct ksegrp *kg = td->td_ksegrp;
u_char base_pri;
- base_pri = kg->kg_base_user_pri;
+ base_pri = td->td_base_user_pri;
if (prio >= base_pri) {
td->td_flags &= ~TDF_UBORROWING;
- sched_user_prio(kg, base_pri);
+ sched_user_prio(td, base_pri);
} else
sched_lend_user_prio(td, prio);
}
@@ -1006,15 +969,13 @@ void
sched_switch(struct thread *td, struct thread *newtd, int flags)
{
struct kseq *ksq;
- struct kse *ke;
- struct ksegrp *kg;
+ struct td_sched *ts;
uint64_t now;
mtx_assert(&sched_lock, MA_OWNED);
now = sched_timestamp();
- ke = td->td_kse;
- kg = td->td_ksegrp;
+ ts = td->td_sched;
ksq = KSEQ_SELF();
td->td_lastcpu = td->td_oncpu;
@@ -1025,27 +986,15 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if (td == PCPU_GET(idlethread)) {
TD_SET_CAN_RUN(td);
} else {
- sched_update_runtime(ke, now);
+ sched_update_runtime(ts, now);
/* We are ending our run so make our slot available again */
- SLOT_RELEASE(td->td_ksegrp);
- kseq_load_rem(ksq, ke);
+ kseq_load_rem(ksq, ts);
if (TD_IS_RUNNING(td)) {
setrunqueue(td, (flags & SW_PREEMPT) ?
SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
SRQ_OURSELF|SRQ_YIELDING);
} else {
- if ((td->td_proc->p_flag & P_HADTHREADS) &&
- (newtd == NULL ||
- newtd->td_ksegrp != td->td_ksegrp)) {
- /*
- * We will not be on the run queue.
- * So we must be sleeping or similar.
- * Don't use the slot if we will need it
- * for newtd.
- */
- slot_fill(td->td_ksegrp);
- }
- ke->ke_flags &= ~KEF_NEXTRQ;
+ ts->ts_flags &= ~TSF_NEXTRQ;
}
}
@@ -1054,17 +1003,16 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
* If we bring in a thread account for it as if it had been
* added to the run queue and then chosen.
*/
- SLOT_USE(newtd->td_ksegrp);
- newtd->td_kse->ke_flags |= KEF_DIDRUN;
- newtd->td_kse->ke_timestamp = now;
+ newtd->td_sched->ts_flags |= TSF_DIDRUN;
+ newtd->td_sched->ts_timestamp = now;
TD_SET_RUNNING(newtd);
- kseq_load_add(ksq, newtd->td_kse);
+ kseq_load_add(ksq, newtd->td_sched);
} else {
newtd = choosethread();
- /* sched_choose sets ke_timestamp, just reuse it */
+ /* sched_choose sets ts_timestamp, just reuse it */
}
if (td != newtd) {
- ke->ke_lastran = tick;
+ ts->ts_lastran = tick;
#ifdef HWPMC_HOOKS
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
@@ -1085,17 +1033,15 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
void
sched_nice(struct proc *p, int nice)
{
- struct ksegrp *kg;
struct thread *td;
PROC_LOCK_ASSERT(p, MA_OWNED);
mtx_assert(&sched_lock, MA_OWNED);
p->p_nice = nice;
- FOREACH_KSEGRP_IN_PROC(p, kg) {
- if (kg->kg_pri_class == PRI_TIMESHARE) {
- sched_user_prio(kg, sched_calc_pri(kg));
- FOREACH_THREAD_IN_GROUP(kg, td)
- td->td_flags |= TDF_NEEDRESCHED;
+ FOREACH_THREAD_IN_PROC(p, td) {
+ if (td->td_pri_class == PRI_TIMESHARE) {
+ sched_user_prio(td, sched_calc_pri(td->td_sched));
+ td->td_flags |= TDF_NEEDRESCHED;
}
}
}
@@ -1103,33 +1049,31 @@ sched_nice(struct proc *p, int nice)
void
sched_sleep(struct thread *td)
{
- struct kse *ke;
+ struct td_sched *ts;
mtx_assert(&sched_lock, MA_OWNED);
- ke = td->td_kse;
+ ts = td->td_sched;
if (td->td_flags & TDF_SINTR)
- ke->ke_activated = 0;
+ ts->ts_activated = 0;
else
- ke->ke_activated = -1;
- ke->ke_flags |= KEF_SLEEP;
+ ts->ts_activated = -1;
+ ts->ts_flags |= TSF_SLEEP;
}
void
sched_wakeup(struct thread *td)
{
- struct kse *ke;
- struct ksegrp *kg;
+ struct td_sched *ts;
struct kseq *kseq, *mykseq;
uint64_t now;
mtx_assert(&sched_lock, MA_OWNED);
- ke = td->td_kse;
- kg = td->td_ksegrp;
+ ts = td->td_sched;
mykseq = KSEQ_SELF();
- if (ke->ke_flags & KEF_SLEEP) {
- ke->ke_flags &= ~KEF_SLEEP;
- if (sched_is_timeshare(kg)) {
- sched_commit_runtime(ke);
+ if (ts->ts_flags & TSF_SLEEP) {
+ ts->ts_flags &= ~TSF_SLEEP;
+ if (sched_is_timeshare(td)) {
+ sched_commit_runtime(ts);
now = sched_timestamp();
kseq = KSEQ_CPU(td->td_lastcpu);
#ifdef SMP
@@ -1137,7 +1081,7 @@ sched_wakeup(struct thread *td)
now = now - mykseq->ksq_last_timestamp +
kseq->ksq_last_timestamp;
#endif
- sched_user_prio(kg, sched_recalc_pri(ke, now));
+ sched_user_prio(td, sched_recalc_pri(ts, now));
}
}
setrunqueue(td, SRQ_BORING);
@@ -1152,52 +1096,44 @@ sched_fork(struct thread *td, struct thread *childtd)
{
mtx_assert(&sched_lock, MA_OWNED);
- sched_fork_ksegrp(td, childtd->td_ksegrp);
sched_fork_thread(td, childtd);
}
void
-sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
-{
- struct ksegrp *kg = td->td_ksegrp;
-
- mtx_assert(&sched_lock, MA_OWNED);
- child->kg_slptime = kg->kg_slptime * CHILD_WEIGHT / 100;
- if (child->kg_pri_class == PRI_TIMESHARE)
- sched_user_prio(child, sched_calc_pri(child));
- kg->kg_slptime = kg->kg_slptime * PARENT_WEIGHT / 100;
-}
-
-void
sched_fork_thread(struct thread *td, struct thread *child)
{
- struct kse *ke;
- struct kse *ke2;
+ struct td_sched *ts;
+ struct td_sched *ts2;
sched_newthread(child);
- ke = td->td_kse;
- ke2 = child->td_kse;
- ke2->ke_slice = (ke->ke_slice + 1) >> 1;
- ke2->ke_flags |= KEF_FIRST_SLICE | (ke->ke_flags & KEF_NEXTRQ);
- ke2->ke_activated = 0;
- ke->ke_slice >>= 1;
- if (ke->ke_slice == 0) {
- ke->ke_slice = 1;
+ ts = td->td_sched;
+ ts2 = child->td_sched;
+
+ ts2->ts_slptime = ts2->ts_slptime * CHILD_WEIGHT / 100;
+ if (child->td_pri_class == PRI_TIMESHARE)
+ sched_user_prio(child, sched_calc_pri(ts2));
+ ts->ts_slptime = ts->ts_slptime * PARENT_WEIGHT / 100;
+ ts2->ts_slice = (ts->ts_slice + 1) >> 1;
+ ts2->ts_flags |= TSF_FIRST_SLICE | (ts->ts_flags & TSF_NEXTRQ);
+ ts2->ts_activated = 0;
+ ts->ts_slice >>= 1;
+ if (ts->ts_slice == 0) {
+ ts->ts_slice = 1;
sched_tick();
}
/* Grab our parents cpu estimation information. */
- ke2->ke_ticks = ke->ke_ticks;
- ke2->ke_ltick = ke->ke_ltick;
- ke2->ke_ftick = ke->ke_ftick;
+ ts2->ts_ticks = ts->ts_ticks;
+ ts2->ts_ltick = ts->ts_ltick;
+ ts2->ts_ftick = ts->ts_ftick;
}
void
-sched_class(struct ksegrp *kg, int class)
+sched_class(struct thread *td, int class)
{
mtx_assert(&sched_lock, MA_OWNED);
- kg->kg_pri_class = class;
+ td->td_pri_class = class;
}
/*
@@ -1208,42 +1144,37 @@ sched_exit(struct proc *p, struct thread *childtd)
{
mtx_assert(&sched_lock, MA_OWNED);
sched_exit_thread(FIRST_THREAD_IN_PROC(p), childtd);
- sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd);
}
void
-sched_exit_ksegrp(struct ksegrp *parentkg, struct thread *td)
+sched_exit_thread(struct thread *td, struct thread *childtd)
{
- if (td->td_ksegrp->kg_slptime < parentkg->kg_slptime) {
- parentkg->kg_slptime = parentkg->kg_slptime /
+ struct td_sched *childke = childtd->td_sched;
+ struct td_sched *parentke = td->td_sched;
+
+ if (childke->ts_slptime < parentke->ts_slptime) {
+ parentke->ts_slptime = parentke->ts_slptime /
(EXIT_WEIGHT) * (EXIT_WEIGHT - 1) +
- td->td_ksegrp->kg_slptime / EXIT_WEIGHT;
+ parentke->ts_slptime / EXIT_WEIGHT;
}
-}
-
-void
-sched_exit_thread(struct thread *td, struct thread *childtd)
-{
- struct kse *childke = childtd->td_kse;
- struct kse *parentke = td->td_kse;
kseq_load_rem(KSEQ_SELF(), childke);
sched_update_runtime(childke, sched_timestamp());
sched_commit_runtime(childke);
- if ((childke->ke_flags & KEF_FIRST_SLICE) &&
+ if ((childke->ts_flags & TSF_FIRST_SLICE) &&
td->td_proc == childtd->td_proc->p_pptr) {
- parentke->ke_slice += childke->ke_slice;
- if (parentke->ke_slice > sched_timeslice(parentke))
- parentke->ke_slice = sched_timeslice(parentke);
+ parentke->ts_slice += childke->ts_slice;
+ if (parentke->ts_slice > sched_timeslice(parentke))
+ parentke->ts_slice = sched_timeslice(parentke);
}
}
static int
-sched_starving(struct kseq *ksq, unsigned now, struct kse *ke)
+sched_starving(struct kseq *ksq, unsigned now, struct td_sched *ts)
{
uint64_t delta;
- if (ke->ke_proc->p_nice > ksq->ksq_expired_nice)
+ if (ts->ts_proc->p_nice > ksq->ksq_expired_nice)
return (1);
if (ksq->ksq_expired_tick == 0)
return (0);
@@ -1258,11 +1189,11 @@ sched_starving(struct kseq *ksq, unsigned now, struct kse *ke)
* a cpu hog can have larger granularity.
*/
static inline int
-sched_timeslice_split(struct kse *ke)
+sched_timeslice_split(struct td_sched *ts)
{
int score, g;
- score = (int)(MAX_SCORE - CURRENT_SCORE(ke->ke_ksegrp));
+ score = (int)(MAX_SCORE - CURRENT_SCORE(ts));
if (score == 0)
score = 1;
#ifdef SMP
@@ -1270,7 +1201,7 @@ sched_timeslice_split(struct kse *ke)
#else
g = granularity * ((1 << score) - 1);
#endif
- return (ke->ke_slice >= g && ke->ke_slice % g == 0);
+ return (ts->ts_slice >= g && ts->ts_slice % g == 0);
}
void
@@ -1278,8 +1209,7 @@ sched_tick(void)
{
struct thread *td;
struct proc *p;
- struct kse *ke;
- struct ksegrp *kg;
+ struct td_sched *ts;
struct kseq *kseq;
uint64_t now;
int cpuid;
@@ -1288,10 +1218,9 @@ sched_tick(void)
mtx_assert(&sched_lock, MA_OWNED);
td = curthread;
- ke = td->td_kse;
- kg = td->td_ksegrp;
+ ts = td->td_sched;
p = td->td_proc;
- class = PRI_BASE(kg->kg_pri_class);
+ class = PRI_BASE(td->td_pri_class);
now = sched_timestamp();
cpuid = PCPU_GET(cpuid);
kseq = KSEQ_CPU(cpuid);
@@ -1301,8 +1230,8 @@ sched_tick(void)
/*
* Processes of equal idle priority are run round-robin.
*/
- if (td != PCPU_GET(idlethread) && --ke->ke_slice <= 0) {
- ke->ke_slice = def_timeslice;
+ if (td != PCPU_GET(idlethread) && --ts->ts_slice <= 0) {
+ ts->ts_slice = def_timeslice;
td->td_flags |= TDF_NEEDRESCHED;
}
return;
@@ -1313,8 +1242,8 @@ sched_tick(void)
* Realtime scheduling, do round robin for RR class, FIFO
* is not affected.
*/
- if (PRI_NEED_RR(kg->kg_pri_class) && --ke->ke_slice <= 0) {
- ke->ke_slice = def_timeslice;
+ if (PRI_NEED_RR(td->td_pri_class) && --ts->ts_slice <= 0) {
+ ts->ts_slice = def_timeslice;
td->td_flags |= TDF_NEEDRESCHED;
}
return;
@@ -1326,26 +1255,26 @@ sched_tick(void)
if (class != PRI_TIMESHARE || (p->p_flag & P_KTHREAD) != 0)
return;
- if (--ke->ke_slice <= 0) {
+ if (--ts->ts_slice <= 0) {
td->td_flags |= TDF_NEEDRESCHED;
- sched_update_runtime(ke, now);
- sched_commit_runtime(ke);
- sched_user_prio(kg, sched_calc_pri(kg));
- ke->ke_slice = sched_timeslice(ke);
- ke->ke_flags &= ~KEF_FIRST_SLICE;
- if (ke->ke_flags & KEF_BOUND || td->td_pinned) {
+ sched_update_runtime(ts, now);
+ sched_commit_runtime(ts);
+ sched_user_prio(td, sched_calc_pri(ts));
+ ts->ts_slice = sched_timeslice(ts);
+ ts->ts_flags &= ~TSF_FIRST_SLICE;
+ if (ts->ts_flags & TSF_BOUND || td->td_pinned) {
if (kseq->ksq_expired_tick == 0)
kseq->ksq_expired_tick = tick;
} else {
if (kseq_global.ksq_expired_tick == 0)
kseq_global.ksq_expired_tick = tick;
}
- if (!THREAD_IS_INTERACTIVE(ke) ||
- sched_starving(kseq, tick, ke) ||
- sched_starving(&kseq_global, tick, ke)) {
+ if (!THREAD_IS_INTERACTIVE(ts) ||
+ sched_starving(kseq, tick, ts) ||
+ sched_starving(&kseq_global, tick, ts)) {
/* The thead becomes cpu hog, schedule it off. */
- ke->ke_flags |= KEF_NEXTRQ;
- if (ke->ke_flags & KEF_BOUND || td->td_pinned) {
+ ts->ts_flags |= TSF_NEXTRQ;
+ if (ts->ts_flags & TSF_BOUND || td->td_pinned) {
if (p->p_nice < kseq->ksq_expired_nice)
kseq->ksq_expired_nice = p->p_nice;
} else {
@@ -1361,7 +1290,7 @@ sched_tick(void)
* chunks. This essentially does round-robin between
* interactive threads.
*/
- if (THREAD_IS_INTERACTIVE(ke) && sched_timeslice_split(ke))
+ if (THREAD_IS_INTERACTIVE(ts) && sched_timeslice_split(ts))
td->td_flags |= TDF_NEEDRESCHED;
}
}
@@ -1369,20 +1298,18 @@ sched_tick(void)
void
sched_clock(struct thread *td)
{
- struct ksegrp *kg;
- struct kse *ke;
+ struct td_sched *ts;
mtx_assert(&sched_lock, MA_OWNED);
- ke = td->td_kse;
- kg = ke->ke_ksegrp;
+ ts = td->td_sched;
/* Adjust ticks for pctcpu */
- ke->ke_ticks++;
- ke->ke_ltick = ticks;
+ ts->ts_ticks++;
+ ts->ts_ltick = ticks;
/* Go up to one second beyond our max and then trim back down */
- if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
- sched_pctcpu_update(ke);
+ if (ts->ts_ftick + SCHED_CPU_TICKS + hz < ts->ts_ltick)
+ sched_pctcpu_update(ts);
}
static int
@@ -1406,52 +1333,50 @@ sched_runnable(void)
void
sched_userret(struct thread *td)
{
- struct ksegrp *kg;
KASSERT((td->td_flags & TDF_BORROWING) == 0,
("thread with borrowed priority returning to userland"));
- kg = td->td_ksegrp;
- if (td->td_priority != kg->kg_user_pri) {
+ if (td->td_priority != td->td_user_pri) {
mtx_lock_spin(&sched_lock);
- td->td_priority = kg->kg_user_pri;
- td->td_base_pri = kg->kg_user_pri;
+ td->td_priority = td->td_user_pri;
+ td->td_base_pri = td->td_user_pri;
mtx_unlock_spin(&sched_lock);
}
}
-struct kse *
+struct td_sched *
sched_choose(void)
{
- struct kse *ke;
+ struct td_sched *ts;
struct kseq *kseq;
#ifdef SMP
- struct kse *kecpu;
+ struct td_sched *kecpu;
mtx_assert(&sched_lock, MA_OWNED);
kseq = &kseq_global;
- ke = kseq_choose(&kseq_global);
+ ts = kseq_choose(&kseq_global);
kecpu = kseq_choose(KSEQ_SELF());
- if (ke == NULL ||
+ if (ts == NULL ||
(kecpu != NULL &&
- kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) {
- ke = kecpu;
+ kecpu->ts_thread->td_priority < ts->ts_thread->td_priority)) {
+ ts = kecpu;
kseq = KSEQ_SELF();
}
#else
kseq = &kseq_global;
- ke = kseq_choose(kseq);
+ ts = kseq_choose(kseq);
#endif
- if (ke != NULL) {
- kseq_runq_rem(kseq, ke);
- ke->ke_state = KES_THREAD;
- ke->ke_flags &= ~KEF_PREEMPTED;
- ke->ke_timestamp = sched_timestamp();
+ if (ts != NULL) {
+ kseq_runq_rem(kseq, ts);
+ ts->ts_state = TSS_THREAD;
+ ts->ts_flags &= ~TSF_PREEMPTED;
+ ts->ts_timestamp = sched_timestamp();
}
- return (ke);
+ return (ts);
}
#ifdef SMP
@@ -1543,8 +1468,7 @@ void
sched_add(struct thread *td, int flags)
{
struct kseq *ksq;
- struct ksegrp *kg;
- struct kse *ke;
+ struct td_sched *ts;
struct thread *mytd;
int class;
int nextrq;
@@ -1558,34 +1482,33 @@ sched_add(struct thread *td, int flags)
mtx_assert(&sched_lock, MA_OWNED);
mytd = curthread;
- ke = td->td_kse;
- kg = td->td_ksegrp;
- KASSERT(ke->ke_state != KES_ONRUNQ,
- ("sched_add: kse %p (%s) already in run queue", ke,
- ke->ke_proc->p_comm));
- KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
+ ts = td->td_sched;
+ KASSERT(ts->ts_state != TSS_ONRUNQ,
+ ("sched_add: td_sched %p (%s) already in run queue", ts,
+ ts->ts_proc->p_comm));
+ KASSERT(ts->ts_proc->p_sflag & PS_INMEM,
("sched_add: process swapped out"));
- KASSERT(ke->ke_runq == NULL,
- ("sched_add: KSE %p is still assigned to a run queue", ke));
+ KASSERT(ts->ts_runq == NULL,
+ ("sched_add: KSE %p is still assigned to a run queue", ts));
- class = PRI_BASE(kg->kg_pri_class);
+ class = PRI_BASE(td->td_pri_class);
#ifdef SMP
mycpu = PCPU_GET(cpuid);
myksq = KSEQ_CPU(mycpu);
- ke->ke_wakeup_cpu = mycpu;
+ ts->ts_wakeup_cpu = mycpu;
#endif
- nextrq = (ke->ke_flags & KEF_NEXTRQ);
- ke->ke_flags &= ~KEF_NEXTRQ;
+ nextrq = (ts->ts_flags & TSF_NEXTRQ);
+ ts->ts_flags &= ~TSF_NEXTRQ;
if (flags & SRQ_PREEMPTED)
- ke->ke_flags |= KEF_PREEMPTED;
+ ts->ts_flags |= TSF_PREEMPTED;
ksq = &kseq_global;
#ifdef SMP
if (td->td_pinned != 0) {
cpu = td->td_lastcpu;
ksq = KSEQ_CPU(cpu);
pinned = 1;
- } else if ((ke)->ke_flags & KEF_BOUND) {
- cpu = ke->ke_cpu;
+ } else if ((ts)->ts_flags & TSF_BOUND) {
+ cpu = ts->ts_cpu;
ksq = KSEQ_CPU(cpu);
pinned = 1;
} else {
@@ -1596,22 +1519,22 @@ sched_add(struct thread *td, int flags)
switch (class) {
case PRI_ITHD:
case PRI_REALTIME:
- ke->ke_runq = ksq->ksq_curr;
+ ts->ts_runq = ksq->ksq_curr;
break;
case PRI_TIMESHARE:
if ((td->td_flags & TDF_BORROWING) == 0 && nextrq)
- ke->ke_runq = ksq->ksq_next;
+ ts->ts_runq = ksq->ksq_next;
else
- ke->ke_runq = ksq->ksq_curr;
+ ts->ts_runq = ksq->ksq_curr;
break;
case PRI_IDLE:
/*
* This is for priority prop.
*/
if (td->td_priority < PRI_MIN_IDLE)
- ke->ke_runq = ksq->ksq_curr;
+ ts->ts_runq = ksq->ksq_curr;
else
- ke->ke_runq = &ksq->ksq_idle;
+ ts->ts_runq = &ksq->ksq_idle;
break;
default:
panic("Unknown pri class.");
@@ -1619,33 +1542,32 @@ sched_add(struct thread *td, int flags)
}
#ifdef SMP
- if ((ke->ke_runq == kseq_global.ksq_curr ||
- ke->ke_runq == myksq->ksq_curr) &&
+ if ((ts->ts_runq == kseq_global.ksq_curr ||
+ ts->ts_runq == myksq->ksq_curr) &&
td->td_priority < mytd->td_priority) {
#else
- if (ke->ke_runq == kseq_global.ksq_curr &&
+ if (ts->ts_runq == kseq_global.ksq_curr &&
td->td_priority < mytd->td_priority) {
#endif
struct krunq *rq;
- rq = ke->ke_runq;
- ke->ke_runq = NULL;
+ rq = ts->ts_runq;
+ ts->ts_runq = NULL;
if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td))
return;
- ke->ke_runq = rq;
+ ts->ts_runq = rq;
need_resched = TDF_NEEDRESCHED;
}
- SLOT_USE(kg);
- ke->ke_state = KES_ONRUNQ;
- kseq_runq_add(ksq, ke);
- kseq_load_add(ksq, ke);
+ ts->ts_state = TSS_ONRUNQ;
+ kseq_runq_add(ksq, ts);
+ kseq_load_add(ksq, ts);
#ifdef SMP
if (pinned) {
if (cpu != mycpu) {
struct thread *running = pcpu_find(cpu)->pc_curthread;
- if (ksq->ksq_curr == ke->ke_runq &&
+ if (ksq->ksq_curr == ts->ts_runq &&
running->td_priority < td->td_priority) {
if (td->td_priority <= PRI_MAX_ITHD)
ipi_selected(1 << cpu, IPI_PREEMPT);
@@ -1676,33 +1598,32 @@ void
sched_rem(struct thread *td)
{
struct kseq *kseq;
- struct kse *ke;
+ struct td_sched *ts;
mtx_assert(&sched_lock, MA_OWNED);
- ke = td->td_kse;
- KASSERT((ke->ke_state == KES_ONRUNQ),
+ ts = td->td_sched;
+ KASSERT((ts->ts_state == TSS_ONRUNQ),
("sched_rem: KSE not on run queue"));
- kseq = ke->ke_kseq;
- SLOT_RELEASE(td->td_ksegrp);
- kseq_runq_rem(kseq, ke);
- kseq_load_rem(kseq, ke);
- ke->ke_state = KES_THREAD;
+ kseq = ts->ts_kseq;
+ kseq_runq_rem(kseq, ts);
+ kseq_load_rem(kseq, ts);
+ ts->ts_state = TSS_THREAD;
}
fixpt_t
sched_pctcpu(struct thread *td)
{
fixpt_t pctcpu;
- struct kse *ke;
+ struct td_sched *ts;
pctcpu = 0;
- ke = td->td_kse;
- if (ke == NULL)
+ ts = td->td_sched;
+ if (ts == NULL)
return (0);
mtx_lock_spin(&sched_lock);
- if (ke->ke_ticks) {
+ if (ts->ts_ticks) {
int rtick;
/*
@@ -1710,15 +1631,15 @@ sched_pctcpu(struct thread *td)
* this causes the cpu usage to decay away too quickly due to
* rounding errors.
*/
- if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
- ke->ke_ltick < (ticks - (hz / 2)))
- sched_pctcpu_update(ke);
+ if (ts->ts_ftick + SCHED_CPU_TICKS < ts->ts_ltick ||
+ ts->ts_ltick < (ticks - (hz / 2)))
+ sched_pctcpu_update(ts);
/* How many rtick per second ? */
- rtick = MIN(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
+ rtick = MIN(ts->ts_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
}
- ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
+ ts->ts_proc->p_swtime = ts->ts_ltick - ts->ts_ftick;
mtx_unlock_spin(&sched_lock);
return (pctcpu);
@@ -1727,13 +1648,13 @@ sched_pctcpu(struct thread *td)
void
sched_bind(struct thread *td, int cpu)
{
- struct kse *ke;
+ struct td_sched *ts;
mtx_assert(&sched_lock, MA_OWNED);
- ke = td->td_kse;
- ke->ke_flags |= KEF_BOUND;
+ ts = td->td_sched;
+ ts->ts_flags |= TSF_BOUND;
#ifdef SMP
- ke->ke_cpu = cpu;
+ ts->ts_cpu = cpu;
if (PCPU_GET(cpuid) == cpu)
return;
mi_switch(SW_VOL, NULL);
@@ -1744,14 +1665,14 @@ void
sched_unbind(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
- td->td_kse->ke_flags &= ~KEF_BOUND;
+ td->td_sched->ts_flags &= ~TSF_BOUND;
}
int
sched_is_bound(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
- return (td->td_kse->ke_flags & KEF_BOUND);
+ return (td->td_sched->ts_flags & TSF_BOUND);
}
int
@@ -1763,25 +1684,17 @@ sched_load(void)
void
sched_relinquish(struct thread *td)
{
- struct ksegrp *kg;
- kg = td->td_ksegrp;
mtx_lock_spin(&sched_lock);
- if (sched_is_timeshare(kg)) {
+ if (sched_is_timeshare(td)) {
sched_prio(td, PRI_MAX_TIMESHARE);
- td->td_kse->ke_flags |= KEF_NEXTRQ;
+ td->td_sched->ts_flags |= TSF_NEXTRQ;
}
mi_switch(SW_VOL, NULL);
mtx_unlock_spin(&sched_lock);
}
int
-sched_sizeof_ksegrp(void)
-{
- return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
-}
-
-int
sched_sizeof_proc(void)
{
return (sizeof(struct proc));
OpenPOWER on IntegriCloud