summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2003-07-08 06:19:40 +0000
committerjeff <jeff@FreeBSD.org>2003-07-08 06:19:40 +0000
commitf90fe69bb8bcf6a9534648edceeea0d265e8b883 (patch)
tree48ed368bd6d13750bc0e7f7951066ed22fa260dd /sys/kern
parentbba10d998e419f6ed907ed3c7ad0e83367130a8b (diff)
downloadFreeBSD-src-f90fe69bb8bcf6a9534648edceeea0d265e8b883.zip
FreeBSD-src-f90fe69bb8bcf6a9534648edceeea0d265e8b883.tar.gz
- When stealing a kse in kseq_move() ignore the current kseq's min nice
value. We want to steal any thread, even one that is not given a slice on its current queue.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/sched_ule.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index f4c324c..a24154c 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -237,7 +237,7 @@ void sched_pctcpu_update(struct kse *ke);
int sched_pickcpu(void);
/* Operations on per processor queues */
-static struct kse * kseq_choose(struct kseq *kseq);
+static struct kse * kseq_choose(struct kseq *kseq, int steal);
static void kseq_setup(struct kseq *kseq);
static void kseq_add(struct kseq *kseq, struct kse *ke);
static void kseq_rem(struct kseq *kseq, struct kse *ke);
@@ -455,7 +455,7 @@ kseq_move(struct kseq *from, int cpu)
{
struct kse *ke;
- ke = kseq_choose(from);
+ ke = kseq_choose(from, 1);
runq_remove(ke->ke_runq, ke);
ke->ke_state = KES_THREAD;
kseq_rem(from, ke);
@@ -465,8 +465,14 @@ kseq_move(struct kseq *from, int cpu)
}
#endif
+/*
+ * Pick the highest priority task we have and return it. If steal is 1 we
+ * will return kses that have been denied slices due to their nice being too
+ * low. In the future we should prohibit stealing interrupt threads as well.
+ */
+
struct kse *
-kseq_choose(struct kseq *kseq)
+kseq_choose(struct kseq *kseq, int steal)
{
struct kse *ke;
struct runq *swap;
@@ -492,7 +498,7 @@ kseq_choose(struct kseq *kseq)
* TIMESHARE kse group and its nice was too far out
* of the range that receives slices.
*/
- if (ke->ke_slice == 0) {
+ if (ke->ke_slice == 0 && steal == 0) {
runq_remove(ke->ke_runq, ke);
sched_slice(ke);
ke->ke_runq = kseq->ksq_next;
@@ -1062,7 +1068,7 @@ sched_clock(struct kse *ke)
*/
kseq = KSEQ_SELF();
#if 0
- if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq)) != NULL) {
+ if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq, 0)) != NULL) {
if (sched_strict &&
nke->ke_thread->td_priority < td->td_priority)
td->td_flags |= TDF_NEEDRESCHED;
@@ -1159,7 +1165,7 @@ sched_userret(struct thread *td)
#else
kseq->ksq_load > 1 &&
#endif
- (ke = kseq_choose(kseq)) != NULL &&
+ (ke = kseq_choose(kseq, 0)) != NULL &&
ke->ke_thread->td_priority < td->td_priority)
curthread->td_flags |= TDF_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
@@ -1177,7 +1183,7 @@ sched_choose(void)
retry:
#endif
kseq = KSEQ_SELF();
- ke = kseq_choose(kseq);
+ ke = kseq_choose(kseq, 0);
if (ke) {
runq_remove(ke->ke_runq, ke);
ke->ke_state = KES_THREAD;
OpenPOWER on IntegriCloud