summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2003-04-10 17:35:44 +0000
committerjulian <julian@FreeBSD.org>2003-04-10 17:35:44 +0000
commit6f175a0e20cb55d7b1c3f882e3f1ecbd0503a094 (patch)
treea36a5c6a854637e4e73e7256b937922489d07ddb /sys/kern
parent2a488098e335f6345e9235190cdc78fa82e9d61f (diff)
downloadFreeBSD-src-6f175a0e20cb55d7b1c3f882e3f1ecbd0503a094.zip
FreeBSD-src-6f175a0e20cb55d7b1c3f882e3f1ecbd0503a094.tar.gz
Move the _oncpu entry from the KSE to the thread.
The entry in the KSE still exists but it's purpose will change a bit when we add the ability to lock a KSE to a cpu.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/init_main.c2
-rw-r--r--sys/kern/kern_fork.c2
-rw-r--r--sys/kern/kern_kse.c1
-rw-r--r--sys/kern/kern_mutex.c3
-rw-r--r--sys/kern/kern_proc.c2
-rw-r--r--sys/kern/kern_thread.c1
-rw-r--r--sys/kern/sched_4bsd.c6
-rw-r--r--sys/kern/sched_ule.c6
-rw-r--r--sys/kern/subr_smp.c2
-rw-r--r--sys/kern/subr_turnstile.c3
-rw-r--r--sys/kern/subr_witness.c2
11 files changed, 17 insertions, 13 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index d135c9b..08f273a 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -376,7 +376,7 @@ proc0_init(void *dummy __unused)
td->td_priority = PVM;
td->td_base_pri = PUSER;
td->td_kse = ke; /* XXXKSE */
- ke->ke_oncpu = 0;
+ td->td_oncpu = 0;
ke->ke_state = KES_THREAD;
ke->ke_thread = td;
p->p_peers = 0;
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 9a74d25..4cec7e3 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -767,7 +767,7 @@ fork_exit(callout, arg, frame)
}
td = curthread;
p = td->td_proc;
- td->td_kse->ke_oncpu = PCPU_GET(cpuid);
+ td->td_oncpu = PCPU_GET(cpuid);
p->p_state = PRS_NORMAL;
/*
* Finish setting up thread glue. We need to initialize
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 5ffa8f6..bfc97ad 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -139,6 +139,7 @@ thread_ctor(void *mem, int size, void *arg)
td = (struct thread *)mem;
td->td_state = TDS_INACTIVE;
+ td->td_oncpu = NOCPU;
}
/*
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 6a734d0..0722931 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -72,7 +72,8 @@
/* XXXKSE This test will change. */
#define thread_running(td) \
- ((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU)
+ (td->td_state == TDS_RUNNING)
+ /* ((td)->td_oncpu != NOCPU) */
/*
* Lock classes for sleep and spin mutexes.
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 26fb2b7..aa1e5c5 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -737,13 +737,13 @@ fill_kinfo_proc(p, kp)
kp->ki_pri.pri_level = td->td_priority;
kp->ki_pri.pri_native = td->td_base_pri;
kp->ki_lastcpu = td->td_lastcpu;
+ kp->ki_oncpu = td->td_oncpu;
kp->ki_tdflags = td->td_flags;
kp->ki_pcb = td->td_pcb;
kp->ki_kstack = (void *)td->td_kstack;
/* Things in the kse */
kp->ki_rqindex = ke->ke_rqindex;
- kp->ki_oncpu = ke->ke_oncpu;
kp->ki_pctcpu = sched_pctcpu(ke);
} else {
kp->ki_oncpu = -1;
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 5ffa8f6..bfc97ad 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -139,6 +139,7 @@ thread_ctor(void *mem, int size, void *arg)
td = (struct thread *)mem;
td->td_state = TDS_INACTIVE;
+ td->td_oncpu = NOCPU;
}
/*
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index fe6dfd9..3c4f0fe 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -518,7 +518,7 @@ sched_sleep(struct thread *td, u_char prio)
void
sched_switchin(struct thread *td)
{
- td->td_kse->ke_oncpu = PCPU_GET(cpuid);
+ td->td_oncpu = PCPU_GET(cpuid);
}
void
@@ -532,9 +532,9 @@ sched_switchout(struct thread *td)
KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
- td->td_lastcpu = ke->ke_oncpu;
+ td->td_lastcpu = td->td_oncpu;
td->td_last_kse = ke;
- ke->ke_oncpu = NOCPU;
+ td->td_oncpu = NOCPU;
td->td_flags &= ~TDF_NEEDRESCHED;
/*
* At the last moment, if this thread is still marked RUNNING,
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 190b4d6..bd9759c 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -643,8 +643,8 @@ sched_switchout(struct thread *td)
ke = td->td_kse;
td->td_last_kse = ke;
- td->td_lastcpu = ke->ke_oncpu;
- ke->ke_oncpu = NOCPU;
+ td->td_lastcpu = td->td_oncpu;
+ td->td_oncpu = NOCPU;
td->td_flags &= ~TDF_NEEDRESCHED;
if (TD_IS_RUNNING(td)) {
@@ -667,7 +667,7 @@ sched_switchin(struct thread *td)
/* struct kse *ke = td->td_kse; */
mtx_assert(&sched_lock, MA_OWNED);
- td->td_kse->ke_oncpu = PCPU_GET(cpuid);
+ td->td_oncpu = PCPU_GET(cpuid);
#if SCHED_STRICT_RESCHED
if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE &&
td->td_priority != td->td_ksegrp->kg_user_pri)
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index f65987f..897e0f0 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -142,7 +142,7 @@ forward_signal(struct thread *td)
if (td == curthread)
return;
- id = td->td_kse->ke_oncpu;
+ id = td->td_oncpu;
if (id == NOCPU)
return;
ipi_selected(1 << id, IPI_AST);
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 6a734d0..0722931 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -72,7 +72,8 @@
/* XXXKSE This test will change. */
#define thread_running(td) \
- ((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU)
+ (td->td_state == TDS_RUNNING)
+ /* ((td)->td_oncpu != NOCPU) */
/*
* Lock classes for sleep and spin mutexes.
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index d89698b..b2171fd 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -1714,7 +1714,7 @@ witness_list(struct thread *td)
* if td is currently executing on some other CPU and holds spin locks
* as we won't display those locks. If we had a MI way of getting
* the per-cpu data for a given cpu then we could use
- * td->td_kse->ke_oncpu to get the list of spinlocks for this thread
+ * td->td_oncpu to get the list of spinlocks for this thread
* and "fix" this.
*
* That still wouldn't really fix this unless we locked sched_lock
OpenPOWER on IntegriCloud