summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2002-10-14 20:34:31 +0000
committerjulian <julian@FreeBSD.org>2002-10-14 20:34:31 +0000
commit9ce470a533cf51e65a0e46f0d3f76a7467037479 (patch)
treeec5f595c6f180bad73ef2a0656ff0fb1edeac3a7 /sys
parente53d0f994c6510cd95298e77954eea9fa4efa067 (diff)
downloadFreeBSD-src-9ce470a533cf51e65a0e46f0d3f76a7467037479.zip
FreeBSD-src-9ce470a533cf51e65a0e46f0d3f76a7467037479.tar.gz
Tidy up the scheduler's code for changing the priority of a thread.
Logically pretty much a NOP.
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_switch.c60
-rw-r--r--sys/kern/ksched.c10
-rw-r--r--sys/kern/sched_4bsd.c37
-rw-r--r--sys/posix4/ksched.c10
-rw-r--r--sys/sys/proc.h6
5 files changed, 65 insertions, 58 deletions
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 14d6b2f..d171cb3 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -277,13 +277,14 @@ kse_reassign(struct kse *ke)
CTR1(KTR_RUNQ, "kse_reassign: ke%p idled", ke);
}
+#if 0
/*
* Remove a thread from its KSEGRP's run queue.
* This in turn may remove it from a KSE if it was already assigned
* to one, possibly causing a new thread to be assigned to the KSE
* and the KSE getting a new priority (unless it's a BOUND thread/KSE pair).
*/
-void
+static void
remrunqueue(struct thread *td)
{
struct thread *td2, *td3;
@@ -325,6 +326,51 @@ remrunqueue(struct thread *td)
kse_reassign(ke);
}
}
+#endif
+
+/*
+ * Change the priority of a thread that is on the run queue.
+ */
+void
+adjustrunqueue( struct thread *td, int newpri)
+{
+ struct ksegrp *kg;
+ struct kse *ke;
+
+ mtx_assert(&sched_lock, MA_OWNED);
+ KASSERT ((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
+ /*
+ * If it's a bound thread/KSE pair, take the shortcut. All non-KSE
+ * threads are BOUND.
+ */
+ ke = td->td_kse;
+ CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
+ if ((td->td_flags & TDF_UNBOUND) == 0) {
+ /* We only care about the kse in the run queue. */
+ if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
+ sched_rem(ke);
+ td->td_priority = newpri;
+ sched_add(ke);
+ }
+ return;
+ }
+ /*
+ * An unbound thread. This is not optimised yet.
+ */
+ kg = td->td_ksegrp;
+ kg->kg_runnable--;
+ TD_SET_CAN_RUN(td);
+ if (ke) {
+ if (kg->kg_last_assigned == td) {
+ kg->kg_last_assigned =
+ TAILQ_PREV(td, threadqueue, td_runq);
+ }
+ sched_rem(ke);
+ }
+ TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
+ td->td_priority = newpri;
+ setrunqueue(td);
+}
void
setrunqueue(struct thread *td)
@@ -663,18 +709,6 @@ runq_remove(struct runq *rq, struct kse *ke)
}
#if 0
-static void
-runq_readjust(struct runq *rq, struct kse *ke)
-{
-
- if (ke->ke_rqindex != (ke->ke_thread->td_priority / RQ_PPQ)) {
- runq_remove(rq, ke);
- runq_add(rq, ke);
- }
-}
-#endif
-
-#if 0
void
panc(char *string1, char *string2)
{
diff --git a/sys/kern/ksched.c b/sys/kern/ksched.c
index 62ab684..690c2fa 100644
--- a/sys/kern/ksched.c
+++ b/sys/kern/ksched.c
@@ -187,10 +187,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
} else if (TD_ON_RUNQ(td)) {
if (td->td_priority > kg->kg_user_pri) {
- remrunqueue(td);
- td->td_priority =
- kg->kg_user_pri;
- setrunqueue(td);
+ sched_prio(td, kg->kg_user_pri);
}
}
}
@@ -220,10 +217,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
} else if (TD_ON_RUNQ(td)) {
if (td->td_priority > kg->kg_user_pri) {
- remrunqueue(td);
- td->td_priority =
- kg->kg_user_pri;
- setrunqueue(td);
+ sched_prio(td, kg->kg_user_pri);
}
}
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 99d23aa..c57262f 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -316,30 +316,8 @@ schedcpu(void *arg)
kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
resetpriority(kg);
FOREACH_THREAD_IN_GROUP(kg, td) {
- int changedqueue;
if (td->td_priority >= PUSER) {
- /*
- * Only change the priority
- * of threads that are still at their
- * user priority.
- * XXXKSE This is problematic
- * as we may need to re-order
- * the threads on the KSEG list.
- */
- changedqueue =
- ((td->td_priority / RQ_PPQ) !=
- (kg->kg_user_pri / RQ_PPQ));
-
- td->td_priority = kg->kg_user_pri;
- if (changedqueue && TD_ON_RUNQ(td)) {
- /* this could be optimised */
- remrunqueue(td);
- td->td_priority =
- kg->kg_user_pri;
- setrunqueue(td);
- } else {
- td->td_priority = kg->kg_user_pri;
- }
+ sched_prio(td, kg->kg_user_pri);
}
}
} /* end of ksegrp loop */
@@ -491,14 +469,20 @@ sched_nice(struct ksegrp *kg, int nice)
resetpriority(kg);
}
+/*
+ * Adjust the priority of a thread.
+ * This may include moving the thread within the KSEGRP,
+ * changing the assignment of a kse to the thread,
+ * and moving a KSE in the system run queue.
+ */
void
sched_prio(struct thread *td, u_char prio)
{
- td->td_priority = prio;
if (TD_ON_RUNQ(td)) {
- remrunqueue(td);
- setrunqueue(td);
+ adjustrunqueue(td, prio);
+ } else {
+ td->td_priority = prio;
}
}
@@ -527,6 +511,7 @@ sched_switchout(struct thread *td)
KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
td->td_lastcpu = ke->ke_oncpu;
+ td->td_last_kse = ke;
ke->ke_oncpu = NOCPU;
ke->ke_flags &= ~KEF_NEEDRESCHED;
/*
diff --git a/sys/posix4/ksched.c b/sys/posix4/ksched.c
index 62ab684..690c2fa 100644
--- a/sys/posix4/ksched.c
+++ b/sys/posix4/ksched.c
@@ -187,10 +187,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
} else if (TD_ON_RUNQ(td)) {
if (td->td_priority > kg->kg_user_pri) {
- remrunqueue(td);
- td->td_priority =
- kg->kg_user_pri;
- setrunqueue(td);
+ sched_prio(td, kg->kg_user_pri);
}
}
}
@@ -220,10 +217,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
} else if (TD_ON_RUNQ(td)) {
if (td->td_priority > kg->kg_user_pri) {
- remrunqueue(td);
- td->td_priority =
- kg->kg_user_pri;
- setrunqueue(td);
+ sched_prio(td, kg->kg_user_pri);
}
}
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index a10ac1c..14e9f4e 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -268,8 +268,8 @@ struct thread {
#define td_startzero td_flags
int td_flags; /* (j) TDF_* flags. */
int td_inhibitors; /* (j) Why can not run */
- struct kse *td_last_kse; /* Where it wants to be if possible. */
- struct kse *td_kse; /* Current KSE if running. */
+ struct kse *td_last_kse; /* (j) Previous value of td_kse */
+ struct kse *td_kse; /* (j) Current KSE if running. */
int td_dupfd; /* (k) Ret value from fdopen. XXX */
void *td_wchan; /* (j) Sleep address. */
const char *td_wmesg; /* (j) Reason for sleep. */
@@ -844,6 +844,7 @@ struct proc *pfind(pid_t); /* Find process by id. */
struct pgrp *pgfind(pid_t); /* Find process group by id. */
struct proc *zpfind(pid_t); /* Find zombie process by id. */
+void adjustrunqueue(struct thread *, int newpri);
void ast(struct trapframe *framep);
struct thread *choosethread(void);
int cr_cansignal(struct ucred *cred, struct proc *proc, int signum);
@@ -871,7 +872,6 @@ void threadinit(void);
void proc_linkup(struct proc *p, struct ksegrp *kg,
struct kse *ke, struct thread *td);
void proc_reparent(struct proc *child, struct proc *newparent);
-void remrunqueue(struct thread *);
int securelevel_ge(struct ucred *cr, int level);
int securelevel_gt(struct ucred *cr, int level);
void setrunnable(struct thread *);
OpenPOWER on IntegriCloud