summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authortjr <tjr@FreeBSD.org>2004-06-02 07:52:36 +0000
committertjr <tjr@FreeBSD.org>2004-06-02 07:52:36 +0000
commit80d36400edd04b536fd6cab0dae30c9e678e6598 (patch)
treedc1941eeb3f510a239f3f234b0c5aef4b268c958 /sys/kern
parent3bf088071bbb46981c2cb1cf2ad63132bf04048d (diff)
downloadFreeBSD-src-80d36400edd04b536fd6cab0dae30c9e678e6598.zip
FreeBSD-src-80d36400edd04b536fd6cab0dae30c9e678e6598.tar.gz
Move TDF_SA from td_flags to td_pflags (and rename it accordingly)
so that it is no longer necessary to hold sched_lock while manipulating it. Reviewed by: davidxu
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_exec.c4
-rw-r--r--sys/kern/kern_exit.c4
-rw-r--r--sys/kern/kern_kse.c20
-rw-r--r--sys/kern/kern_sig.c10
-rw-r--r--sys/kern/kern_thread.c20
5 files changed, 27 insertions, 31 deletions
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 44f5e75..0c7a6d8 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -266,9 +266,7 @@ kern_execve(td, fname, argv, envv, mac_p)
*/
p->p_flag &= ~P_SA;
td->td_mailbox = NULL;
- mtx_lock_spin(&sched_lock);
- td->td_flags &= ~TDF_SA;
- mtx_unlock_spin(&sched_lock);
+ td->td_pflags &= ~TDP_SA;
thread_single_end();
}
p->p_flag |= P_INEXEC;
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 4d10f84..4a9517b 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -162,9 +162,7 @@ exit1(struct thread *td, int rv)
* Turn off threading support.
*/
p->p_flag &= ~P_SA;
- mtx_lock_spin(&sched_lock);
- td->td_flags &= ~TDF_SA;
- mtx_unlock_spin(&sched_lock);
+ td->td_pflags &= ~TDP_SA;
thread_single_end(); /* Don't need this any more. */
}
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 88b2689..fc6a77c 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -609,7 +609,7 @@ kse_release(struct thread *td, struct kse_release_args *uap)
return (error);
TIMESPEC_TO_TIMEVAL(&tv, &timeout);
}
- if (td->td_flags & TDF_SA)
+ if (td->td_pflags & TDP_SA)
td->td_pflags |= TDP_UPCALLING;
else {
ku->ku_mflags = fuword(&ku->ku_mailbox->km_flags);
@@ -748,7 +748,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
if (virtual_cpu != 0)
ncpus = virtual_cpu;
if (!(mbx.km_flags & KMF_BOUND))
- sa = TDF_SA;
+ sa = TDP_SA;
else
ncpus = 1;
PROC_LOCK(p);
@@ -787,7 +787,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
} else {
- if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0)
+ if (!first && ((td->td_pflags & TDP_SA) ^ sa) != 0)
return (EINVAL);
newkg = kg;
}
@@ -891,14 +891,14 @@ kse_create(struct thread *td, struct kse_create_args *uap)
}
if (!sa) {
newtd->td_mailbox = mbx.km_curthread;
- newtd->td_flags &= ~TDF_SA;
+ newtd->td_pflags &= ~TDP_SA;
if (newtd != td) {
mtx_unlock_spin(&sched_lock);
cpu_set_upcall_kse(newtd, newku);
mtx_lock_spin(&sched_lock);
}
} else {
- newtd->td_flags |= TDF_SA;
+ newtd->td_pflags |= TDP_SA;
}
if (newtd != td)
setrunqueue(newtd);
@@ -1263,7 +1263,7 @@ thread_statclock(int user)
struct thread *td = curthread;
struct ksegrp *kg = td->td_ksegrp;
- if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA))
+ if (kg->kg_numupcalls == 0 || !(td->td_pflags & TDP_SA))
return (0);
if (user) {
/* Current always do via ast() */
@@ -1621,8 +1621,8 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
/* Let the new thread become owner of the upcall */
ku->ku_owner = td2;
td2->td_upcall = ku;
- td2->td_flags = TDF_SA;
- td2->td_pflags = TDP_UPCALLING;
+ td2->td_flags = 0;
+ td2->td_pflags = TDP_SA|TDP_UPCALLING;
td2->td_kse = NULL;
td2->td_state = TDS_CAN_RUN;
td2->td_inhibitors = 0;
@@ -1729,7 +1729,7 @@ thread_user_enter(struct proc *p, struct thread *td)
* but for now do it every time.
*/
kg = td->td_ksegrp;
- if (td->td_flags & TDF_SA) {
+ if (td->td_pflags & TDP_SA) {
ku = td->td_upcall;
KASSERT(ku, ("%s: no upcall owned", __func__));
KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
@@ -1788,7 +1788,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
ku = td->td_upcall;
/* Nothing to do with bound thread */
- if (!(td->td_flags & TDF_SA))
+ if (!(td->td_pflags & TDP_SA))
return (0);
/*
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index 27ea15f..adda9bd 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -1490,7 +1490,7 @@ trapsignal(struct thread *td, int sig, u_long code)
int error;
p = td->td_proc;
- if (td->td_flags & TDF_SA) {
+ if (td->td_pflags & TDP_SA) {
if (td->td_mailbox == NULL)
thread_user_enter(p, td);
PROC_LOCK(p);
@@ -1524,7 +1524,7 @@ trapsignal(struct thread *td, int sig, u_long code)
ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
&td->td_sigmask, code);
#endif
- if (!(td->td_flags & TDF_SA))
+ if (!(td->td_pflags & TDP_SA))
(*p->p_sysent->sv_sendsig)(
ps->ps_sigact[_SIG_IDX(sig)], sig,
&td->td_sigmask, code);
@@ -2291,7 +2291,7 @@ postsig(sig)
mtx_lock(&ps->ps_mtx);
}
- if (!(td->td_flags & TDF_SA && td->td_mailbox) &&
+ if (!(td->td_pflags & TDP_SA && td->td_mailbox) &&
action == SIG_DFL) {
/*
* Default action, where the default is to kill
@@ -2301,7 +2301,7 @@ postsig(sig)
sigexit(td, sig);
/* NOTREACHED */
} else {
- if (td->td_flags & TDF_SA && td->td_mailbox) {
+ if (td->td_pflags & TDP_SA && td->td_mailbox) {
if (sig == SIGKILL) {
mtx_unlock(&ps->ps_mtx);
sigexit(td, sig);
@@ -2350,7 +2350,7 @@ postsig(sig)
p->p_code = 0;
p->p_sig = 0;
}
- if (td->td_flags & TDF_SA && td->td_mailbox)
+ if (td->td_pflags & TDP_SA && td->td_mailbox)
thread_signal_add(curthread, sig);
else
(*p->p_sysent->sv_sendsig)(action, sig,
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 88b2689..fc6a77c 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -609,7 +609,7 @@ kse_release(struct thread *td, struct kse_release_args *uap)
return (error);
TIMESPEC_TO_TIMEVAL(&tv, &timeout);
}
- if (td->td_flags & TDF_SA)
+ if (td->td_pflags & TDP_SA)
td->td_pflags |= TDP_UPCALLING;
else {
ku->ku_mflags = fuword(&ku->ku_mailbox->km_flags);
@@ -748,7 +748,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
if (virtual_cpu != 0)
ncpus = virtual_cpu;
if (!(mbx.km_flags & KMF_BOUND))
- sa = TDF_SA;
+ sa = TDP_SA;
else
ncpus = 1;
PROC_LOCK(p);
@@ -787,7 +787,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
} else {
- if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0)
+ if (!first && ((td->td_pflags & TDP_SA) ^ sa) != 0)
return (EINVAL);
newkg = kg;
}
@@ -891,14 +891,14 @@ kse_create(struct thread *td, struct kse_create_args *uap)
}
if (!sa) {
newtd->td_mailbox = mbx.km_curthread;
- newtd->td_flags &= ~TDF_SA;
+ newtd->td_pflags &= ~TDP_SA;
if (newtd != td) {
mtx_unlock_spin(&sched_lock);
cpu_set_upcall_kse(newtd, newku);
mtx_lock_spin(&sched_lock);
}
} else {
- newtd->td_flags |= TDF_SA;
+ newtd->td_pflags |= TDP_SA;
}
if (newtd != td)
setrunqueue(newtd);
@@ -1263,7 +1263,7 @@ thread_statclock(int user)
struct thread *td = curthread;
struct ksegrp *kg = td->td_ksegrp;
- if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA))
+ if (kg->kg_numupcalls == 0 || !(td->td_pflags & TDP_SA))
return (0);
if (user) {
/* Current always do via ast() */
@@ -1621,8 +1621,8 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
/* Let the new thread become owner of the upcall */
ku->ku_owner = td2;
td2->td_upcall = ku;
- td2->td_flags = TDF_SA;
- td2->td_pflags = TDP_UPCALLING;
+ td2->td_flags = 0;
+ td2->td_pflags = TDP_SA|TDP_UPCALLING;
td2->td_kse = NULL;
td2->td_state = TDS_CAN_RUN;
td2->td_inhibitors = 0;
@@ -1729,7 +1729,7 @@ thread_user_enter(struct proc *p, struct thread *td)
* but for now do it every time.
*/
kg = td->td_ksegrp;
- if (td->td_flags & TDF_SA) {
+ if (td->td_pflags & TDP_SA) {
ku = td->td_upcall;
KASSERT(ku, ("%s: no upcall owned", __func__));
KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
@@ -1788,7 +1788,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
ku = td->td_upcall;
/* Nothing to do with bound thread */
- if (!(td->td_flags & TDF_SA))
+ if (!(td->td_pflags & TDP_SA))
return (0);
/*
OpenPOWER on IntegriCloud