summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2003-04-10 17:35:44 +0000
committerjulian <julian@FreeBSD.org>2003-04-10 17:35:44 +0000
commit6f175a0e20cb55d7b1c3f882e3f1ecbd0503a094 (patch)
treea36a5c6a854637e4e73e7256b937922489d07ddb /sys
parent2a488098e335f6345e9235190cdc78fa82e9d61f (diff)
downloadFreeBSD-src-6f175a0e20cb55d7b1c3f882e3f1ecbd0503a094.zip
FreeBSD-src-6f175a0e20cb55d7b1c3f882e3f1ecbd0503a094.tar.gz
Move the _oncpu entry from the KSE to the thread.
The entry in the KSE still exists but it's purpose will change a bit when we add the ability to lock a KSE to a cpu.
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/genassym.c1
-rw-r--r--sys/ddb/db_ps.c2
-rw-r--r--sys/i386/i386/genassym.c1
-rw-r--r--sys/kern/init_main.c2
-rw-r--r--sys/kern/kern_fork.c2
-rw-r--r--sys/kern/kern_kse.c1
-rw-r--r--sys/kern/kern_mutex.c3
-rw-r--r--sys/kern/kern_proc.c2
-rw-r--r--sys/kern/kern_thread.c1
-rw-r--r--sys/kern/sched_4bsd.c6
-rw-r--r--sys/kern/sched_ule.c6
-rw-r--r--sys/kern/subr_smp.c2
-rw-r--r--sys/kern/subr_turnstile.c3
-rw-r--r--sys/kern/subr_witness.c2
-rw-r--r--sys/sys/proc.h1
15 files changed, 19 insertions, 16 deletions
diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c
index 710b7b0..0021bd4 100644
--- a/sys/amd64/amd64/genassym.c
+++ b/sys/amd64/amd64/genassym.c
@@ -86,7 +86,6 @@ ASSYM(P_UAREA, offsetof(struct proc, p_uarea));
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
ASSYM(TD_WCHAN, offsetof(struct thread, td_wchan));
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
-ASSYM(TD_KSE, offsetof(struct thread, td_kse));
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
ASSYM(TD_INTR_NESTING_LEVEL, offsetof(struct thread, td_intr_nesting_level));
ASSYM(TD_CRITNEST, offsetof(struct thread, td_critnest));
diff --git a/sys/ddb/db_ps.c b/sys/ddb/db_ps.c
index 430dae8..17992ea 100644
--- a/sys/ddb/db_ps.c
+++ b/sys/ddb/db_ps.c
@@ -178,7 +178,7 @@ dumpthread(volatile struct proc *p, volatile struct thread *td)
db_printf("[RUNQ]");
break;
case TDS_RUNNING:
- db_printf("[CPU %d]", td->td_kse->ke_oncpu);
+ db_printf("[CPU %d]", td->td_oncpu);
break;
default:
panic("unknown thread state");
diff --git a/sys/i386/i386/genassym.c b/sys/i386/i386/genassym.c
index 710b7b0..0021bd4 100644
--- a/sys/i386/i386/genassym.c
+++ b/sys/i386/i386/genassym.c
@@ -86,7 +86,6 @@ ASSYM(P_UAREA, offsetof(struct proc, p_uarea));
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
ASSYM(TD_WCHAN, offsetof(struct thread, td_wchan));
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
-ASSYM(TD_KSE, offsetof(struct thread, td_kse));
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
ASSYM(TD_INTR_NESTING_LEVEL, offsetof(struct thread, td_intr_nesting_level));
ASSYM(TD_CRITNEST, offsetof(struct thread, td_critnest));
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index d135c9b..08f273a 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -376,7 +376,7 @@ proc0_init(void *dummy __unused)
td->td_priority = PVM;
td->td_base_pri = PUSER;
td->td_kse = ke; /* XXXKSE */
- ke->ke_oncpu = 0;
+ td->td_oncpu = 0;
ke->ke_state = KES_THREAD;
ke->ke_thread = td;
p->p_peers = 0;
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 9a74d25..4cec7e3 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -767,7 +767,7 @@ fork_exit(callout, arg, frame)
}
td = curthread;
p = td->td_proc;
- td->td_kse->ke_oncpu = PCPU_GET(cpuid);
+ td->td_oncpu = PCPU_GET(cpuid);
p->p_state = PRS_NORMAL;
/*
* Finish setting up thread glue. We need to initialize
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 5ffa8f6..bfc97ad 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -139,6 +139,7 @@ thread_ctor(void *mem, int size, void *arg)
td = (struct thread *)mem;
td->td_state = TDS_INACTIVE;
+ td->td_oncpu = NOCPU;
}
/*
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 6a734d0..0722931 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -72,7 +72,8 @@
/* XXXKSE This test will change. */
#define thread_running(td) \
- ((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU)
+ (td->td_state == TDS_RUNNING)
+ /* ((td)->td_oncpu != NOCPU) */
/*
* Lock classes for sleep and spin mutexes.
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 26fb2b7..aa1e5c5 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -737,13 +737,13 @@ fill_kinfo_proc(p, kp)
kp->ki_pri.pri_level = td->td_priority;
kp->ki_pri.pri_native = td->td_base_pri;
kp->ki_lastcpu = td->td_lastcpu;
+ kp->ki_oncpu = td->td_oncpu;
kp->ki_tdflags = td->td_flags;
kp->ki_pcb = td->td_pcb;
kp->ki_kstack = (void *)td->td_kstack;
/* Things in the kse */
kp->ki_rqindex = ke->ke_rqindex;
- kp->ki_oncpu = ke->ke_oncpu;
kp->ki_pctcpu = sched_pctcpu(ke);
} else {
kp->ki_oncpu = -1;
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 5ffa8f6..bfc97ad 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -139,6 +139,7 @@ thread_ctor(void *mem, int size, void *arg)
td = (struct thread *)mem;
td->td_state = TDS_INACTIVE;
+ td->td_oncpu = NOCPU;
}
/*
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index fe6dfd9..3c4f0fe 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -518,7 +518,7 @@ sched_sleep(struct thread *td, u_char prio)
void
sched_switchin(struct thread *td)
{
- td->td_kse->ke_oncpu = PCPU_GET(cpuid);
+ td->td_oncpu = PCPU_GET(cpuid);
}
void
@@ -532,9 +532,9 @@ sched_switchout(struct thread *td)
KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
- td->td_lastcpu = ke->ke_oncpu;
+ td->td_lastcpu = td->td_oncpu;
td->td_last_kse = ke;
- ke->ke_oncpu = NOCPU;
+ td->td_oncpu = NOCPU;
td->td_flags &= ~TDF_NEEDRESCHED;
/*
* At the last moment, if this thread is still marked RUNNING,
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 190b4d6..bd9759c 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -643,8 +643,8 @@ sched_switchout(struct thread *td)
ke = td->td_kse;
td->td_last_kse = ke;
- td->td_lastcpu = ke->ke_oncpu;
- ke->ke_oncpu = NOCPU;
+ td->td_lastcpu = td->td_oncpu;
+ td->td_oncpu = NOCPU;
td->td_flags &= ~TDF_NEEDRESCHED;
if (TD_IS_RUNNING(td)) {
@@ -667,7 +667,7 @@ sched_switchin(struct thread *td)
/* struct kse *ke = td->td_kse; */
mtx_assert(&sched_lock, MA_OWNED);
- td->td_kse->ke_oncpu = PCPU_GET(cpuid);
+ td->td_oncpu = PCPU_GET(cpuid);
#if SCHED_STRICT_RESCHED
if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE &&
td->td_priority != td->td_ksegrp->kg_user_pri)
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index f65987f..897e0f0 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -142,7 +142,7 @@ forward_signal(struct thread *td)
if (td == curthread)
return;
- id = td->td_kse->ke_oncpu;
+ id = td->td_oncpu;
if (id == NOCPU)
return;
ipi_selected(1 << id, IPI_AST);
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 6a734d0..0722931 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -72,7 +72,8 @@
/* XXXKSE This test will change. */
#define thread_running(td) \
- ((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU)
+ (td->td_state == TDS_RUNNING)
+ /* ((td)->td_oncpu != NOCPU) */
/*
* Lock classes for sleep and spin mutexes.
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index d89698b..b2171fd 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -1714,7 +1714,7 @@ witness_list(struct thread *td)
* if td is currently executing on some other CPU and holds spin locks
* as we won't display those locks. If we had a MI way of getting
* the per-cpu data for a given cpu then we could use
- * td->td_kse->ke_oncpu to get the list of spinlocks for this thread
+ * td->td_oncpu to get the list of spinlocks for this thread
* and "fix" this.
*
* That still wouldn't really fix this unless we locked sched_lock
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 5d2bb04..bc921a8 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -287,6 +287,7 @@ struct thread {
u_char td_lastcpu; /* (j) Last cpu we were on. */
u_char td_inktr; /* (k) Currently handling a KTR. */
u_char td_inktrace; /* (k) Currently handling a KTRACE. */
+ u_char td_oncpu; /* (j) Which cpu we are on. */
short td_locks; /* (k) DEBUG: lockmgr count of locks */
struct mtx *td_blocked; /* (j) Mutex process is blocked on. */
struct ithd *td_ithd; /* (b) For interrupt threads only. */
OpenPOWER on IntegriCloud