summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2004-07-08 21:45:04 +0000
committerjhb <jhb@FreeBSD.org>2004-07-08 21:45:04 +0000
commiteeb3c914453162ca5f06e82aeddf24e35a8c86ad (patch)
treebca8a9a7f71e2c142c28fc30b672069a531506be /sys
parentacd87011239f21612790ec0ff26db6af4ecdb3cd (diff)
downloadFreeBSD-src-eeb3c914453162ca5f06e82aeddf24e35a8c86ad.zip
FreeBSD-src-eeb3c914453162ca5f06e82aeddf24e35a8c86ad.tar.gz
- Move contents of sched_add() into a sched_add_internal() function that
takes an argument to specify if it should preempt or not. Don't preempt when sched_add_internal() is called from kseq_idled() or kseq_assign() as in those cases we are about to call mi_switch() anyways. Also, doing so during the first context switch on an AP leads to a NULL pointer deref because curthread is NULL. - Reenable preemption for ULE. Submitted by: Taku YAMAMOTO taku at tackymt.homeip.net
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/sched_ule.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index b37ab0c..f35c744 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -262,6 +262,7 @@ static struct kseq kseq_cpu;
#define KSEQ_CPU(x) (&kseq_cpu)
#endif
+static void sched_add_internal(struct thread *td, int preemptive);
static void sched_slice(struct kse *ke);
static void sched_priority(struct ksegrp *kg);
static int sched_interact_score(struct ksegrp *kg);
@@ -616,7 +617,7 @@ kseq_idled(struct kseq *kseq)
kseq_runq_rem(steal, ke);
kseq_load_rem(steal, ke);
ke->ke_cpu = PCPU_GET(cpuid);
- sched_add(ke->ke_thread);
+ sched_add_internal(ke->ke_thread, 0);
return (0);
}
}
@@ -644,7 +645,7 @@ kseq_assign(struct kseq *kseq)
for (; ke != NULL; ke = nke) {
nke = ke->ke_assign;
ke->ke_flags &= ~KEF_ASSIGNED;
- sched_add(ke->ke_thread);
+ sched_add_internal(ke->ke_thread, 0);
}
}
@@ -1542,6 +1543,13 @@ restart:
void
sched_add(struct thread *td)
{
+
+ sched_add_internal(td, 1);
+}
+
+static void
+sched_add_internal(struct thread *td, int preemptive)
+{
struct kseq *kseq;
struct ksegrp *kg;
struct kse *ke;
@@ -1623,7 +1631,6 @@ sched_add(struct thread *td)
if (td->td_priority < curthread->td_priority)
curthread->td_flags |= TDF_NEEDRESCHED;
-#if 0
#ifdef SMP
/*
* Only try to preempt if the thread is unpinned or pinned to the
@@ -1631,9 +1638,8 @@ sched_add(struct thread *td)
*/
if (KSE_CAN_MIGRATE(ke, class) || ke->ke_cpu == PCPU_GET(cpuid))
#endif
- if (maybe_preempt(td))
+ if (preemptive && maybe_preempt(td))
return;
-#endif
ke->ke_ksegrp->kg_runq_kses++;
ke->ke_state = KES_ONRUNQ;
OpenPOWER on IntegriCloud