summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2004-02-01 06:20:18 +0000
committerjeff <jeff@FreeBSD.org>2004-02-01 06:20:18 +0000
commit371f8838d17c51a366148891b8df059a38604325 (patch)
treead377fd88ee86a680f325effb9c3e7ea41b7eabd /sys/kern
parent8b93703f2c0b903c5eccfdab4cdd0714e496bea7 (diff)
downloadFreeBSD-src-371f8838d17c51a366148891b8df059a38604325.zip
FreeBSD-src-371f8838d17c51a366148891b8df059a38604325.tar.gz
- Disable ithread binding in all cases for now. This doesn't make as much
sense with sched_4bsd as it does with sched_ule. - Use P_NOLOAD instead of the absence of td->td_ithd to determine whether or not a thread should be accounted for in sched_tdcnt.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/sched_4bsd.c18
1 files changed, 5 insertions, 13 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 81e12e3..e30c5b9 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -81,18 +81,10 @@ struct ke_sched {
/*
* KSE_CAN_MIGRATE macro returns true if the kse can migrate between
- * cpus. Currently ithread cpu binding is disabled on x86 due to a
- * bug in the Xeon round-robin interrupt delivery that delivers all
- * interrupts to cpu 0.
+ * cpus.
*/
-#ifdef __i386__
#define KSE_CAN_MIGRATE(ke) \
((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
-#else
-#define KSE_CAN_MIGRATE(ke) \
- PRI_BASE((ke)->ke_ksegrp->kg_pri_class) != PRI_ITHD && \
- ((ke)->ke_thread->td_pinned == 0 &&((ke)->ke_flags & KEF_BOUND) == 0)
-#endif
static struct ke_sched ke_sched;
struct ke_sched *kse0_sched = &ke_sched;
@@ -564,7 +556,7 @@ sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
void
sched_exit_thread(struct thread *td, struct thread *child)
{
- if (td->td_ithd == NULL)
+ if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
}
@@ -651,7 +643,7 @@ sched_switch(struct thread *td)
mtx_assert(&sched_lock, MA_OWNED);
KASSERT((ke->ke_state == KES_THREAD), ("sched_switch: kse state?"));
- if ((td->td_flags & TDF_IDLETD) == 0 && td->td_ithd == NULL)
+ if ((p->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
td->td_lastcpu = td->td_oncpu;
td->td_last_kse = ke;
@@ -724,7 +716,7 @@ sched_add(struct thread *td)
#else
ke->ke_runq = &runq;
#endif
- if (td->td_ithd == NULL)
+ if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt++;
runq_add(ke->ke_runq, ke);
}
@@ -741,7 +733,7 @@ sched_rem(struct thread *td)
("sched_rem: KSE not on run queue"));
mtx_assert(&sched_lock, MA_OWNED);
- if (td->td_ithd == NULL)
+ if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
runq_remove(ke->ke_sched->ske_runq, ke);
OpenPOWER on IntegriCloud