summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2004-07-16 21:04:55 +0000
committerjhb <jhb@FreeBSD.org>2004-07-16 21:04:55 +0000
commit0cb3276d57e68364c5cd8f83dd504672a5e5f5b0 (patch)
tree40f1025728659f06a876858c57f12a51848d214c /sys
parent1115416b3b78799329cdb0c7addff7c1ad2fc348 (diff)
downloadFreeBSD-src-0cb3276d57e68364c5cd8f83dd504672a5e5f5b0.zip
FreeBSD-src-0cb3276d57e68364c5cd8f83dd504672a5e5f5b0.tar.gz
- Move TDF_OWEPREEMPT, TDF_OWEUPC, and TDF_USTATCLOCK over to td_pflags
since they are only accessed by curthread and thus do not need any locking. - Move pr_addr and pr_ticks out of struct uprof (which is per-process) and directly into struct thread as td_profil_addr and td_profil_ticks as these variables are really per-thread. (They are used to defer an addupc_intr() that was too "hard" until ast()).
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_kse.c9
-rw-r--r--sys/kern/kern_switch.c9
-rw-r--r--sys/kern/kern_synch.c3
-rw-r--r--sys/kern/sched_4bsd.c3
-rw-r--r--sys/kern/sched_ule.c3
-rw-r--r--sys/kern/subr_prof.c7
-rw-r--r--sys/kern/subr_trap.c10
-rw-r--r--sys/sys/proc.h9
-rw-r--r--sys/sys/resourcevar.h2
9 files changed, 31 insertions, 24 deletions
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 7fa8ac7..29d83d3 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -838,8 +838,9 @@ thread_statclock(int user)
return (0);
if (user) {
/* Current always do via ast() */
+ td->td_pflags |= TDP_USTATCLOCK;
mtx_lock_spin(&sched_lock);
- td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
+ td->td_flags |= TDF_ASTPENDING;
mtx_unlock_spin(&sched_lock);
td->td_uuticks++;
} else if (td->td_mailbox != NULL)
@@ -1129,11 +1130,9 @@ thread_userret(struct thread *td, struct trapframe *frame)
* is returning from interrupt, charge thread's
* userland time for UTS.
*/
- if (td->td_flags & TDF_USTATCLOCK) {
+ if (td->td_pflags & TDP_USTATCLOCK) {
thread_update_usr_ticks(td, 1);
- mtx_lock_spin(&sched_lock);
- td->td_flags &= ~TDF_USTATCLOCK;
- mtx_unlock_spin(&sched_lock);
+ td->td_pflags &= ~TDP_USTATCLOCK;
}
/*
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 768084c..5183a35 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -451,7 +451,8 @@ critical_exit(void)
("critical_exit: td_critnest == 0"));
if (td->td_critnest == 1) {
#ifdef PREEMPTION
- if (td->td_flags & TDF_OWEPREEMPT) {
+ mtx_assert(&sched_lock, MA_NOTOWNED);
+ if (td->td_pflags & TDP_OWEPREEMPT) {
mtx_lock_spin(&sched_lock);
mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
@@ -485,7 +486,9 @@ maybe_preempt(struct thread *td)
* The new thread should not preempt the current thread if any of the
* following conditions are true:
*
- * - The current thread has a higher (numerically lower) priority.
+ * - The current thread has a higher (numerically lower) or
+ * equivalent priority. Note that this prevents curthread from
+ * trying to preempt to itself.
* - It is too early in the boot for context switches (cold is set).
* - The current thread has an inhibitor set or is in the process of
* exiting. In this case, the current thread is about to switch
@@ -515,7 +518,7 @@ maybe_preempt(struct thread *td)
if (ctd->td_critnest > 1) {
CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
ctd->td_critnest);
- ctd->td_flags |= TDF_OWEPREEMPT;
+ ctd->td_pflags |= TDP_OWEPREEMPT;
return (0);
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 7016307..0cb0282 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -297,11 +297,12 @@ mi_switch(int flags, struct thread *newtd)
mtx_assert(&Giant, MA_NOTOWNED);
#endif
KASSERT(td->td_critnest == 1 || (td->td_critnest == 2 &&
- (td->td_flags & TDF_OWEPREEMPT) != 0 && (flags & SW_INVOL) != 0 &&
+ (td->td_pflags & TDP_OWEPREEMPT) != 0 && (flags & SW_INVOL) != 0 &&
newtd == NULL),
("mi_switch: switch in a critical section"));
KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
("mi_switch: switch must be voluntary or involuntary"));
+ KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself"));
if (flags & SW_VOL)
p->p_stats->p_ru.ru_nvcsw++;
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 2a51bc0..c80214e 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -654,7 +654,8 @@ sched_switch(struct thread *td, struct thread *newtd)
sched_tdcnt++;
td->td_lastcpu = td->td_oncpu;
td->td_last_kse = ke;
- td->td_flags &= ~(TDF_NEEDRESCHED | TDF_OWEPREEMPT);
+ td->td_flags &= ~TDF_NEEDRESCHED;
+ td->td_pflags &= ~TDP_OWEPREEMPT;
td->td_oncpu = NOCPU;
/*
* At the last moment, if this thread is still marked RUNNING,
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 5ea303b..602b611 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1138,7 +1138,8 @@ sched_switch(struct thread *td, struct thread *newtd)
td->td_last_kse = ke;
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
- td->td_flags &= ~(TDF_NEEDRESCHED | TDF_OWEPREEMPT);
+ td->td_flags &= ~TDF_NEEDRESCHED;
+ td->td_pflags &= ~TDP_OWEPREEMPT;
/*
* If the KSE has been assigned it may be in the process of switching
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index 442ca6d..ba36093 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -481,10 +481,11 @@ addupc_intr(struct thread *td, uintptr_t pc, u_int ticks)
addr = prof->pr_base + i;
mtx_unlock_spin(&sched_lock);
if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
- prof->pr_addr = pc;
- prof->pr_ticks = ticks;
+ td->td_profil_addr = pc;
+ td->td_profil_ticks = ticks;
+ td->td_pflags |= TDP_OWEUPC;
mtx_lock_spin(&sched_lock);
- td->td_flags |= TDF_OWEUPC | TDF_ASTPENDING;
+ td->td_flags |= TDF_ASTPENDING;
mtx_unlock_spin(&sched_lock);
}
}
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index b7b461e..664fe63 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -178,7 +178,7 @@ ast(struct trapframe *framep)
p->p_sflag &= ~PS_MACPEND;
#endif
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
- TDF_NEEDRESCHED | TDF_OWEUPC | TDF_INTERRUPT);
+ TDF_NEEDRESCHED | TDF_INTERRUPT);
cnt.v_soft++;
mtx_unlock_spin(&sched_lock);
/*
@@ -191,10 +191,10 @@ ast(struct trapframe *framep)
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
- if (flags & TDF_OWEUPC && p->p_flag & P_PROFIL) {
- addupc_task(td, p->p_stats->p_prof.pr_addr,
- p->p_stats->p_prof.pr_ticks);
- p->p_stats->p_prof.pr_ticks = 0;
+ if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) {
+ addupc_task(td, td->td_profil_addr, td->td_profil_ticks);
+ td->td_profil_ticks = 0;
+ td->td_pflags &= ~TDP_OWEUPC;
}
if (sflag & PS_ALRMPEND) {
PROC_LOCK(p);
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 0a15daf..4a8da78 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -304,6 +304,9 @@ struct thread {
stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */
int td_kflags; /* (c) Flags for KSE threading. */
int td_xsig; /* (c) Signal for ptrace */
+ u_long td_profil_addr; /* (k) Temporary addr until AST. */
+ u_int td_profil_ticks; /* (k) Temporary ticks until AST. */
+
#define td_endzero td_base_pri
/* Copied during fork1() or thread_sched_upcall(). */
@@ -346,12 +349,9 @@ struct thread {
#define TDF_IDLETD 0x000020 /* This is one of the per-CPU idle threads. */
#define TDF_SELECT 0x000040 /* Selecting; wakeup/waiting danger. */
#define TDF_TSNOBLOCK 0x000100 /* Don't block on a turnstile due to race. */
-#define TDF_OWEPREEMPT 0x000200 /* Thread has a pending preemption. */
#define TDF_ASTPENDING 0x000800 /* Thread has some asynchronous events. */
#define TDF_TIMOFAIL 0x001000 /* Timeout from sleep after we were awake. */
#define TDF_INTERRUPT 0x002000 /* Thread is marked as interrupted. */
-#define TDF_USTATCLOCK 0x004000 /* Finish user statclock hit at next AST. */
-#define TDF_OWEUPC 0x008000 /* Owe thread an addupc() call at next AST. */
#define TDF_NEEDRESCHED 0x010000 /* Thread needs to yield. */
#define TDF_NEEDSIGCHK 0x020000 /* Thread may need signal delivery. */
#define TDF_XSIG 0x040000 /* Thread is exchanging signal under traced */
@@ -368,6 +368,9 @@ struct thread {
#define TDP_ALTSTACK 0x0020 /* Have alternate signal stack. */
#define TDP_DEADLKTREAT 0x0040 /* Lock aquisition - deadlock treatment. */
#define TDP_SA 0x0080 /* A scheduler activation based thread. */
+#define TDP_OWEPREEMPT 0x0100 /* Thread has a pending preemption. */
+#define TDP_OWEUPC 0x0200 /* Owe thread an addupc() call at next AST. */
+#define TDP_USTATCLOCK 0x0400 /* Finish user statclock hit at next AST. */
#define TDI_SUSPENDED 0x0001 /* On suspension queue. */
#define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */
diff --git a/sys/sys/resourcevar.h b/sys/sys/resourcevar.h
index b61b276..9242a59 100644
--- a/sys/sys/resourcevar.h
+++ b/sys/sys/resourcevar.h
@@ -63,8 +63,6 @@ struct pstats {
u_long pr_size; /* (c + j) Buffer size. */
u_long pr_off; /* (c + j) PC offset. */
u_long pr_scale; /* (c + j) PC scaling. */
- u_long pr_addr; /* (k) Temporary addr until AST. */
- u_int pr_ticks; /* (k) Temporary ticks until AST. */
} p_prof;
#define pstat_endcopy p_start
struct timeval p_start; /* (b) Starting time. */
OpenPOWER on IntegriCloud