summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2003-11-04 07:45:41 +0000
committerjeff <jeff@FreeBSD.org>2003-11-04 07:45:41 +0000
commit23d5b8343405aa50b1140e2fac18951575a55548 (patch)
tree498fcf67b17bfabf55a3d1aed03c06f3573091c6 /sys/kern
parentf692f640895d645706322a61176089c98dee8fb2 (diff)
downloadFreeBSD-src-23d5b8343405aa50b1140e2fac18951575a55548.zip
FreeBSD-src-23d5b8343405aa50b1140e2fac18951575a55548.tar.gz
- Add initial support for pinning and binding.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/sched_ule.c55
1 files changed, 53 insertions, 2 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 10f41aa..73ed185 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resource.h>
+#include <sys/resourcevar.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/sx.h>
@@ -105,6 +106,8 @@ struct ke_sched {
#define ke_assign ke_procq.tqe_next
#define KEF_ASSIGNED KEF_SCHED0 /* KSE is being migrated. */
+#define KEF_PINNED KEF_SCHED1 /* KSE is temporarily bound. */
+#define KEF_BOUND KEF_SCHED2 /* KSE can not migrate. */
struct kg_sched {
int skg_slptime; /* Number of ticks we vol. slept */
@@ -263,7 +266,8 @@ static __inline void kseq_setidle(struct kseq *kseq);
static void kseq_notify(struct kse *ke, int cpu);
static void kseq_assign(struct kseq *);
static struct kse *kseq_steal(struct kseq *kseq);
-#define KSE_CAN_MIGRATE(ke, class) ((class) != PRI_ITHD)
+#define KSE_CAN_MIGRATE(ke, class) \
+ ((class) != PRI_ITHD && ((ke)->ke_flags & (KEF_BOUND|KEF_PINNED)) == 0)
#endif
void
@@ -1370,7 +1374,7 @@ sched_add(struct thread *td)
/*
* If there are any idle processors, give them our extra load.
*/
- if (kseq_idle && class != PRI_ITHD &&
+ if (kseq_idle && KSE_CAN_MIGRATE(ke, class) &&
kseq->ksq_load_transferable >= kseq->ksq_cpus) {
int cpu;
@@ -1462,6 +1466,53 @@ sched_pctcpu(struct thread *td)
return (pctcpu);
}
+void
+sched_pin(struct thread *td)
+{
+ mtx_assert(&sched_lock, MA_OWNED);
+ td->td_kse->ke_flags |= KEF_PINNED;
+}
+
+void
+sched_unpin(struct thread *td)
+{
+ mtx_assert(&sched_lock, MA_OWNED);
+ td->td_kse->ke_flags &= ~KEF_PINNED;
+}
+
+void
+sched_bind(struct thread *td, int cpu)
+{
+ struct kse *ke;
+
+ mtx_assert(&sched_lock, MA_OWNED);
+ ke = td->td_kse;
+#ifndef SMP
+ ke->ke_flags |= KEF_BOUND;
+#else
+ if (PCPU_GET(cpuid) == cpu) {
+ ke->ke_flags |= KEF_BOUND;
+ return;
+ }
+ /* sched_rem without the runq_remove */
+ ke->ke_state = KES_THREAD;
+ ke->ke_ksegrp->kg_runq_kses--;
+ kseq_rem(KSEQ_CPU(ke->ke_cpu), ke);
+ ke->ke_cpu = cpu;
+ kseq_notify(ke, cpu);
+ /* When we return from mi_switch we'll be on the correct cpu. */
+ td->td_proc->p_stats->p_ru.ru_nvcsw++;
+ mi_switch();
+#endif
+}
+
+void
+sched_unbind(struct thread *td)
+{
+ mtx_assert(&sched_lock, MA_OWNED);
+ td->td_kse->ke_flags &= ~KEF_BOUND;
+}
+
int
sched_sizeof_kse(void)
{
OpenPOWER on IntegriCloud