diff options
author | jeff <jeff@FreeBSD.org> | 2004-12-14 10:53:55 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2004-12-14 10:53:55 +0000 |
commit | c94fadce10c4a45d72c0998a27fba1edfffd16a0 (patch) | |
tree | 8217407fde0af6996b1416dfda9a7637def0a643 /sys/kern | |
parent | 422a07b8e1df1d5ade35b56f0fd3eaec2c425131 (diff) | |
download | FreeBSD-src-c94fadce10c4a45d72c0998a27fba1edfffd16a0.zip FreeBSD-src-c94fadce10c4a45d72c0998a27fba1edfffd16a0.tar.gz |
- Garbage collect several unused members of struct kse and struce ksegrp.
As best as I can tell, some of these were never used.
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_switch.c | 6 | ||||
-rw-r--r-- | sys/kern/kern_thread.c | 2 | ||||
-rw-r--r-- | sys/kern/sched_4bsd.c | 9 | ||||
-rw-r--r-- | sys/kern/sched_ule.c | 9 |
4 files changed, 0 insertions, 26 deletions
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c index 4f1c7ee..3177f72 100644 --- a/sys/kern/kern_switch.c +++ b/sys/kern/kern_switch.c @@ -159,7 +159,6 @@ retry: threadqueue, td_runq); } TAILQ_REMOVE(&kg->kg_runq, td, td_runq); - kg->kg_runnable--; } CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", td, td->td_priority); @@ -254,7 +253,6 @@ remrunqueue(struct thread *td) } td3 = TAILQ_PREV(td, threadqueue, td_runq); TAILQ_REMOVE(&kg->kg_runq, td, td_runq); - kg->kg_runnable--; if (ke->ke_state == KES_ONRUNQ) { /* * This thread has been assigned to the system run queue. @@ -310,7 +308,6 @@ adjustrunqueue( struct thread *td, int newpri) sched_rem(td); } TAILQ_REMOVE(&kg->kg_runq, td, td_runq); - kg->kg_runnable--; TD_SET_CAN_RUN(td); td->td_priority = newpri; setrunqueue(td, SRQ_BORING); @@ -514,14 +511,12 @@ setrunqueue(struct thread *td, int flags) */ TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { if (td2->td_priority > td->td_priority) { - kg->kg_runnable++; TAILQ_INSERT_BEFORE(td2, td, td_runq); break; } } if (td2 == NULL) { /* We ran off the end of the TAILQ or it was empty. */ - kg->kg_runnable++; TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq); } @@ -945,7 +940,6 @@ sched_newthread(struct thread *td) bzero(ke, sizeof(*ke)); td->td_sched = ke; ke->ke_thread = td; - ke->ke_oncpu = NOCPU; ke->ke_state = KES_THREAD; } diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index e41d813..dd3acc2 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -296,7 +296,6 @@ ksegrp_link(struct ksegrp *kg, struct proc *p) TAILQ_INIT(&kg->kg_threads); TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ - TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ kg->kg_proc = p; /* @@ -304,7 +303,6 @@ ksegrp_link(struct ksegrp *kg, struct proc *p) * and may not need clearing */ kg->kg_numthreads = 0; - kg->kg_runnable = 0; kg->kg_numupcalls = 0; /* link it in now that it's consistent */ p->p_numksegrps++; diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index ae8091f..964d6a9 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -74,12 +74,9 @@ __FBSDID("$FreeBSD$"); * for the group. */ struct kse { - TAILQ_ENTRY(kse) ke_kglist; /* (*) Queue of KSEs in ke_ksegrp. */ - TAILQ_ENTRY(kse) ke_kgrlist; /* (*) Queue of KSEs in this state. */ TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */ struct thread *ke_thread; /* (*) Active associated thread. */ fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */ - u_char ke_oncpu; /* (j) Which cpu we are on. */ char ke_rqindex; /* (j) Run queue index. */ enum { KES_THREAD = 0x0, /* slaved to thread state */ @@ -112,12 +109,10 @@ struct kg_sched { /* the system scheduler. */ int skg_avail_opennings; /* (j) Num KSEs requested in group. */ int skg_concurrency; /* (j) Num KSEs requested in group. */ - int skg_runq_kses; /* (j) Num KSEs on runq. */ }; #define kg_last_assigned kg_sched->skg_last_assigned #define kg_avail_opennings kg_sched->skg_avail_opennings #define kg_concurrency kg_sched->skg_concurrency -#define kg_runq_kses kg_sched->skg_runq_kses #define SLOT_RELEASE(kg) \ do { \ @@ -615,7 +610,6 @@ schedinit(void) ksegrp0.kg_sched = &kg_sched0; thread0.td_sched = &kse0; kse0.ke_thread = &thread0; - kse0.ke_oncpu = NOCPU; /* wrong.. can we use PCPU(cpuid) yet? */ kse0.ke_state = KES_THREAD; kg_sched0.skg_concurrency = 1; kg_sched0.skg_avail_opennings = 0; /* we are already running */ @@ -1059,7 +1053,6 @@ sched_add(struct thread *td, int flags) sched_tdcnt++; SLOT_USE(td->td_ksegrp); runq_add(ke->ke_runq, ke, flags); - ke->ke_ksegrp->kg_runq_kses++; ke->ke_state = KES_ONRUNQ; maybe_resched(td); } @@ -1082,7 +1075,6 @@ sched_rem(struct thread *td) runq_remove(ke->ke_runq, ke); ke->ke_state = KES_THREAD; - td->td_ksegrp->kg_runq_kses--; } /* @@ -1121,7 +1113,6 @@ sched_choose(void) if (ke != NULL) { runq_remove(rq, ke); ke->ke_state = KES_THREAD; - ke->ke_ksegrp->kg_runq_kses--; KASSERT(ke->ke_proc->p_sflag & PS_INMEM, ("sched_choose: process swapped out")); diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index ff2a97a..3155070 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -87,13 +87,10 @@ int tickincr = 1; * for the group. */ struct kse { - TAILQ_ENTRY(kse) ke_kglist; /* (*) Queue of threads in ke_ksegrp. */ - TAILQ_ENTRY(kse) ke_kgrlist; /* (*) Queue of threads in this state.*/ TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */ int ke_flags; /* (j) KEF_* flags. */ struct thread *ke_thread; /* (*) Active associated thread. */ fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */ - u_char ke_oncpu; /* (j) Which cpu we are on. */ char ke_rqindex; /* (j) Run queue index. */ enum { KES_THREAD = 0x0, /* slaved to thread state */ @@ -147,12 +144,10 @@ struct kg_sched { int skg_runtime; /* Number of ticks we were running */ int skg_avail_opennings; /* (j) Num unfilled slots in group.*/ int skg_concurrency; /* (j) Num threads requested in group.*/ - int skg_runq_threads; /* (j) Num KSEs on runq. */ }; #define kg_last_assigned kg_sched->skg_last_assigned #define kg_avail_opennings kg_sched->skg_avail_opennings #define kg_concurrency kg_sched->skg_concurrency -#define kg_runq_threads kg_sched->skg_runq_threads #define kg_runtime kg_sched->skg_runtime #define kg_slptime kg_sched->skg_slptime @@ -1175,7 +1170,6 @@ schedinit(void) ksegrp0.kg_sched = &kg_sched0; thread0.td_sched = &kse0; kse0.ke_thread = &thread0; - kse0.ke_oncpu = NOCPU; /* wrong.. can we use PCPU(cpuid) yet? */ kse0.ke_state = KES_THREAD; kg_sched0.skg_concurrency = 1; kg_sched0.skg_avail_opennings = 0; /* we are already running */ @@ -1815,7 +1809,6 @@ sched_add_internal(struct thread *td, int preemptive) if (preemptive && maybe_preempt(td)) return; SLOT_USE(td->td_ksegrp); - ke->ke_ksegrp->kg_runq_threads++; ke->ke_state = KES_ONRUNQ; kseq_runq_add(kseq, ke); @@ -1846,7 +1839,6 @@ sched_rem(struct thread *td) SLOT_RELEASE(td->td_ksegrp); ke->ke_state = KES_THREAD; - ke->ke_ksegrp->kg_runq_threads--; kseq = KSEQ_CPU(ke->ke_cpu); kseq_runq_rem(kseq, ke); kseq_load_rem(kseq, ke); @@ -1899,7 +1891,6 @@ sched_bind(struct thread *td, int cpu) return; /* sched_rem without the runq_remove */ ke->ke_state = KES_THREAD; - ke->ke_ksegrp->kg_runq_threads--; kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); kseq_notify(ke, cpu); /* When we return from mi_switch we'll be on the correct cpu. */ |