summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2004-07-16 21:04:55 +0000
committerjhb <jhb@FreeBSD.org>2004-07-16 21:04:55 +0000
commit0cb3276d57e68364c5cd8f83dd504672a5e5f5b0 (patch)
tree40f1025728659f06a876858c57f12a51848d214c /sys/kern
parent1115416b3b78799329cdb0c7addff7c1ad2fc348 (diff)
downloadFreeBSD-src-0cb3276d57e68364c5cd8f83dd504672a5e5f5b0.zip
FreeBSD-src-0cb3276d57e68364c5cd8f83dd504672a5e5f5b0.tar.gz
- Move TDF_OWEPREEMPT, TDF_OWEUPC, and TDF_USTATCLOCK over to td_pflags
since they are only accessed by curthread and thus do not need any locking. - Move pr_addr and pr_ticks out of struct uprof (which is per-process) and directly into struct thread as td_profil_addr and td_profil_ticks as these variables are really per-thread. (They are used to defer an addupc_intr() that was too "hard" until ast()).
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_kse.c9
-rw-r--r--sys/kern/kern_switch.c9
-rw-r--r--sys/kern/kern_synch.c3
-rw-r--r--sys/kern/sched_4bsd.c3
-rw-r--r--sys/kern/sched_ule.c3
-rw-r--r--sys/kern/subr_prof.c7
-rw-r--r--sys/kern/subr_trap.c10
7 files changed, 25 insertions, 19 deletions
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 7fa8ac7..29d83d3 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -838,8 +838,9 @@ thread_statclock(int user)
return (0);
if (user) {
/* Current always do via ast() */
+ td->td_pflags |= TDP_USTATCLOCK;
mtx_lock_spin(&sched_lock);
- td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
+ td->td_flags |= TDF_ASTPENDING;
mtx_unlock_spin(&sched_lock);
td->td_uuticks++;
} else if (td->td_mailbox != NULL)
@@ -1129,11 +1130,9 @@ thread_userret(struct thread *td, struct trapframe *frame)
* is returning from interrupt, charge thread's
* userland time for UTS.
*/
- if (td->td_flags & TDF_USTATCLOCK) {
+ if (td->td_pflags & TDP_USTATCLOCK) {
thread_update_usr_ticks(td, 1);
- mtx_lock_spin(&sched_lock);
- td->td_flags &= ~TDF_USTATCLOCK;
- mtx_unlock_spin(&sched_lock);
+ td->td_pflags &= ~TDP_USTATCLOCK;
}
/*
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 768084c..5183a35 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -451,7 +451,8 @@ critical_exit(void)
("critical_exit: td_critnest == 0"));
if (td->td_critnest == 1) {
#ifdef PREEMPTION
- if (td->td_flags & TDF_OWEPREEMPT) {
+ mtx_assert(&sched_lock, MA_NOTOWNED);
+ if (td->td_pflags & TDP_OWEPREEMPT) {
mtx_lock_spin(&sched_lock);
mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
@@ -485,7 +486,9 @@ maybe_preempt(struct thread *td)
* The new thread should not preempt the current thread if any of the
* following conditions are true:
*
- * - The current thread has a higher (numerically lower) priority.
+ * - The current thread has a higher (numerically lower) or
+ * equivalent priority. Note that this prevents curthread from
+ * trying to preempt to itself.
* - It is too early in the boot for context switches (cold is set).
* - The current thread has an inhibitor set or is in the process of
* exiting. In this case, the current thread is about to switch
@@ -515,7 +518,7 @@ maybe_preempt(struct thread *td)
if (ctd->td_critnest > 1) {
CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
ctd->td_critnest);
- ctd->td_flags |= TDF_OWEPREEMPT;
+ ctd->td_pflags |= TDP_OWEPREEMPT;
return (0);
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 7016307..0cb0282 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -297,11 +297,12 @@ mi_switch(int flags, struct thread *newtd)
mtx_assert(&Giant, MA_NOTOWNED);
#endif
KASSERT(td->td_critnest == 1 || (td->td_critnest == 2 &&
- (td->td_flags & TDF_OWEPREEMPT) != 0 && (flags & SW_INVOL) != 0 &&
+ (td->td_pflags & TDP_OWEPREEMPT) != 0 && (flags & SW_INVOL) != 0 &&
newtd == NULL),
("mi_switch: switch in a critical section"));
KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
("mi_switch: switch must be voluntary or involuntary"));
+ KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself"));
if (flags & SW_VOL)
p->p_stats->p_ru.ru_nvcsw++;
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 2a51bc0..c80214e 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -654,7 +654,8 @@ sched_switch(struct thread *td, struct thread *newtd)
sched_tdcnt++;
td->td_lastcpu = td->td_oncpu;
td->td_last_kse = ke;
- td->td_flags &= ~(TDF_NEEDRESCHED | TDF_OWEPREEMPT);
+ td->td_flags &= ~TDF_NEEDRESCHED;
+ td->td_pflags &= ~TDP_OWEPREEMPT;
td->td_oncpu = NOCPU;
/*
* At the last moment, if this thread is still marked RUNNING,
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 5ea303b..602b611 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1138,7 +1138,8 @@ sched_switch(struct thread *td, struct thread *newtd)
td->td_last_kse = ke;
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
- td->td_flags &= ~(TDF_NEEDRESCHED | TDF_OWEPREEMPT);
+ td->td_flags &= ~TDF_NEEDRESCHED;
+ td->td_pflags &= ~TDP_OWEPREEMPT;
/*
* If the KSE has been assigned it may be in the process of switching
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index 442ca6d..ba36093 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -481,10 +481,11 @@ addupc_intr(struct thread *td, uintptr_t pc, u_int ticks)
addr = prof->pr_base + i;
mtx_unlock_spin(&sched_lock);
if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
- prof->pr_addr = pc;
- prof->pr_ticks = ticks;
+ td->td_profil_addr = pc;
+ td->td_profil_ticks = ticks;
+ td->td_pflags |= TDP_OWEUPC;
mtx_lock_spin(&sched_lock);
- td->td_flags |= TDF_OWEUPC | TDF_ASTPENDING;
+ td->td_flags |= TDF_ASTPENDING;
mtx_unlock_spin(&sched_lock);
}
}
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index b7b461e..664fe63 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -178,7 +178,7 @@ ast(struct trapframe *framep)
p->p_sflag &= ~PS_MACPEND;
#endif
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
- TDF_NEEDRESCHED | TDF_OWEUPC | TDF_INTERRUPT);
+ TDF_NEEDRESCHED | TDF_INTERRUPT);
cnt.v_soft++;
mtx_unlock_spin(&sched_lock);
/*
@@ -191,10 +191,10 @@ ast(struct trapframe *framep)
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
- if (flags & TDF_OWEUPC && p->p_flag & P_PROFIL) {
- addupc_task(td, p->p_stats->p_prof.pr_addr,
- p->p_stats->p_prof.pr_ticks);
- p->p_stats->p_prof.pr_ticks = 0;
+ if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) {
+ addupc_task(td, td->td_profil_addr, td->td_profil_ticks);
+ td->td_profil_ticks = 0;
+ td->td_pflags &= ~TDP_OWEUPC;
}
if (sflag & PS_ALRMPEND) {
PROC_LOCK(p);
OpenPOWER on IntegriCloud