summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2004-07-18 23:36:13 +0000
committerjulian <julian@FreeBSD.org>2004-07-18 23:36:13 +0000
commita488bebcd21f1e39697ac09fdac970a06d6fb621 (patch)
tree548243b8828b679ca121764ef843b623c31cf5b9 /sys
parentd538dc62fd30a8ac9beb2f57ffc4321e42569aab (diff)
downloadFreeBSD-src-a488bebcd21f1e39697ac09fdac970a06d6fb621.zip
FreeBSD-src-a488bebcd21f1e39697ac09fdac970a06d6fb621.tar.gz
When calling scheduler entrypoints for creating new threads and processes,
specify "us" as the thread not the process/ksegrp/kse. You can always find the others from the thread but the converse is not true. Theorotically this would lead to runtime being allocated to the wrong entity in some cases though it is not clear how often this actually happenned. (would only affect threaded processes and would probably be pretty benign, but it WAS a bug..) Reviewed by: peter
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_exit.c2
-rw-r--r--sys/kern/kern_fork.c2
-rw-r--r--sys/kern/kern_kse.c4
-rw-r--r--sys/kern/kern_thr.c4
-rw-r--r--sys/kern/kern_thread.c4
-rw-r--r--sys/kern/sched_4bsd.c28
-rw-r--r--sys/kern/sched_ule.c33
-rw-r--r--sys/sys/sched.h12
8 files changed, 46 insertions, 43 deletions
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 8efb312..a268771 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -526,7 +526,7 @@ retry:
PCPU_SET(switchticks, ticks);
cnt.v_swtch++;
- sched_exit(p->p_pptr, p);
+ sched_exit(p->p_pptr, td);
/*
* Make sure the scheduler takes this thread out of its tables etc.
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 7a9f234..e42a436 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -515,7 +515,7 @@ again:
* Allow the scheduler to adjust the priority of the child and
* parent while we hold the sched_lock.
*/
- sched_fork(p1, p2);
+ sched_fork(td, p2);
mtx_unlock_spin(&sched_lock);
p2->p_ucred = crhold(td->td_ucred);
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 29d83d3..0aae7ba 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -522,7 +522,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
return (EPROCLIM);
}
ksegrp_link(newkg, p);
- sched_fork_ksegrp(kg, newkg);
+ sched_fork_ksegrp(td, newkg);
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
} else {
@@ -569,7 +569,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
#endif
mtx_lock_spin(&sched_lock);
kse_link(newke, newkg);
- sched_fork_kse(td->td_kse, newke);
+ sched_fork_kse(td, newke);
/* Add engine */
kse_reassign(newke);
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c
index 856e2dc..0303fa9 100644
--- a/sys/kern/kern_thr.c
+++ b/sys/kern/kern_thr.c
@@ -87,7 +87,7 @@ thr_exit1(void)
ke->ke_state = KES_UNQUEUED;
ke->ke_thread = NULL;
kse_unlink(ke);
- sched_exit_kse(TAILQ_NEXT(ke, ke_kglist), ke);
+ sched_exit_kse(TAILQ_NEXT(ke, ke_kglist), td);
/*
* If we were stopped while waiting for all threads to exit and this
@@ -177,7 +177,7 @@ thr_create(struct thread *td, struct thr_create_args *uap)
td0->td_kse = ke0;
ke0->ke_thread = td0;
- sched_fork_kse(td->td_kse, ke0);
+ sched_fork_kse(td, ke0);
sched_fork_thread(td, td0);
TD_SET_CAN_RUN(td0);
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 35e1048..c5e9504 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -651,7 +651,7 @@ thread_exit(void)
upcall_remove(td);
sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
- sched_exit_kse(FIRST_KSE_IN_PROC(p), ke);
+ sched_exit_kse(FIRST_KSE_IN_PROC(p), td);
ke->ke_state = KES_UNQUEUED;
ke->ke_thread = NULL;
/*
@@ -660,7 +660,7 @@ thread_exit(void)
if (ke->ke_flags & KEF_EXIT) {
kse_unlink(ke);
if (kg->kg_kses == 0) {
- sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), kg);
+ sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
ksegrp_unlink(kg);
}
}
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index c80214e..4bff511 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -536,24 +536,24 @@ sched_clock(struct thread *td)
* aggregated all the estcpu into the 'built-in' ksegrp.
*/
void
-sched_exit(struct proc *p, struct proc *p1)
+sched_exit(struct proc *p, struct thread *td)
{
- sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
- sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
- sched_exit_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
+ sched_exit_kse(FIRST_KSE_IN_PROC(p), td);
+ sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
+ sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
}
void
-sched_exit_kse(struct kse *ke, struct kse *child)
+sched_exit_kse(struct kse *ke, struct thread *child)
{
}
void
-sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
+sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd)
{
mtx_assert(&sched_lock, MA_OWNED);
- kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu);
+ kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + childtd->td_ksegrp->kg_estcpu);
}
void
@@ -564,24 +564,24 @@ sched_exit_thread(struct thread *td, struct thread *child)
}
void
-sched_fork(struct proc *p, struct proc *p1)
+sched_fork(struct thread *td, struct proc *p1)
{
- sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
- sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
- sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
+ sched_fork_kse(td, FIRST_KSE_IN_PROC(p1));
+ sched_fork_ksegrp(td, FIRST_KSEGRP_IN_PROC(p1));
+ sched_fork_thread(td, FIRST_THREAD_IN_PROC(p1));
}
void
-sched_fork_kse(struct kse *ke, struct kse *child)
+sched_fork_kse(struct thread *td, struct kse *child)
{
child->ke_sched->ske_cpticks = 0;
}
void
-sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
+sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
{
mtx_assert(&sched_lock, MA_OWNED);
- child->kg_estcpu = kg->kg_estcpu;
+ child->kg_estcpu = td->td_ksegrp->kg_estcpu;
}
void
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 602b611..a18fe40 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1256,21 +1256,23 @@ sched_wakeup(struct thread *td)
* priority.
*/
void
-sched_fork(struct proc *p, struct proc *p1)
+sched_fork(struct thread *td, struct proc *p1)
{
mtx_assert(&sched_lock, MA_OWNED);
- p1->p_nice = p->p_nice;
- sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
- sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
- sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
+ p1->p_nice = td->td_proc->p_nice;
+ sched_fork_ksegrp(td, FIRST_KSEGRP_IN_PROC(p1));
+ sched_fork_kse(td, FIRST_KSE_IN_PROC(p1));
+ sched_fork_thread(td, FIRST_THREAD_IN_PROC(p1));
}
void
-sched_fork_kse(struct kse *ke, struct kse *child)
+sched_fork_kse(struct thread *td, struct kse *child)
{
+ struct kse *ke = td->td_kse;
+
child->ke_slice = 1; /* Attempt to quickly learn interactivity. */
child->ke_cpu = ke->ke_cpu;
child->ke_runq = NULL;
@@ -1282,8 +1284,9 @@ sched_fork_kse(struct kse *ke, struct kse *child)
}
void
-sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
+sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
{
+ struct ksegrp *kg = td->td_ksegrp;
PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
child->kg_slptime = kg->kg_slptime;
@@ -1357,24 +1360,24 @@ sched_class(struct ksegrp *kg, int class)
* Return some of the child's priority and interactivity to the parent.
*/
void
-sched_exit(struct proc *p, struct proc *child)
+sched_exit(struct proc *p, struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
- sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child));
- sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child));
+ sched_exit_kse(FIRST_KSE_IN_PROC(p), td);
+ sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
}
void
-sched_exit_kse(struct kse *ke, struct kse *child)
+sched_exit_kse(struct kse *ke, struct thread *td)
{
- kseq_load_rem(KSEQ_CPU(child->ke_cpu), child);
+ kseq_load_rem(KSEQ_CPU(td->td_kse->ke_cpu), td->td_kse);
}
void
-sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
+sched_exit_ksegrp(struct ksegrp *kg, struct thread *td)
{
- /* kg->kg_slptime += child->kg_slptime; */
- kg->kg_runtime += child->kg_runtime;
+ /* kg->kg_slptime += td->td_ksegrp->kg_slptime; */
+ kg->kg_runtime += td->td_ksegrp->kg_runtime;
sched_interact_update(kg);
}
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 461842a..6836fe0 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -45,16 +45,16 @@ int sched_runnable(void);
/*
* Proc related scheduling hooks.
*/
-void sched_exit(struct proc *p, struct proc *child);
-void sched_fork(struct proc *p, struct proc *child);
+void sched_exit(struct proc *p, struct thread *childtd);
+void sched_fork(struct thread *td, struct proc *child);
/*
* KSE Groups contain scheduling priority information. They record the
* behavior of groups of KSEs and threads.
*/
void sched_class(struct ksegrp *kg, int class);
-void sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child);
-void sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child);
+void sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd);
+void sched_fork_ksegrp(struct thread *td, struct ksegrp *child);
void sched_nice(struct proc *p, int nice);
/*
@@ -90,8 +90,8 @@ static __inline void sched_unpin(void);
/*
* These interfaces will eventually be removed.
*/
-void sched_exit_kse(struct kse *ke, struct kse *child);
-void sched_fork_kse(struct kse *ke, struct kse *child);
+void sched_exit_kse(struct kse *ke, struct thread *childtd);
+void sched_fork_kse(struct thread *td, struct kse *child);
/*
* These procedures tell the process data structure allocation code how
OpenPOWER on IntegriCloud