diff options
-rw-r--r-- | sys/amd64/amd64/trap.c | 2 | ||||
-rw-r--r-- | sys/i386/i386/trap.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_exec.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_exit.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_kse.c | 20 | ||||
-rw-r--r-- | sys/kern/kern_sig.c | 10 | ||||
-rw-r--r-- | sys/kern/kern_thread.c | 20 | ||||
-rw-r--r-- | sys/sys/proc.h | 2 |
8 files changed, 30 insertions, 34 deletions
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c index 53c3b5a..bcd9bc5 100644 --- a/sys/amd64/amd64/trap.c +++ b/sys/amd64/amd64/trap.c @@ -262,7 +262,7 @@ trap(frame) break; case T_PAGEFLT: /* page fault */ - if (td->td_flags & TDF_SA) + if (td->td_pflags & TDP_SA) thread_user_enter(p, td); i = trap_pfault(&frame, TRUE); if (i == -1) diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c index c72f515..c6489f4 100644 --- a/sys/i386/i386/trap.c +++ b/sys/i386/i386/trap.c @@ -313,7 +313,7 @@ trap(frame) break; case T_PAGEFLT: /* page fault */ - if (td->td_flags & TDF_SA) + if (td->td_pflags & TDP_SA) thread_user_enter(p, td); i = trap_pfault(&frame, TRUE, eva); diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 44f5e75..0c7a6d8 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -266,9 +266,7 @@ kern_execve(td, fname, argv, envv, mac_p) */ p->p_flag &= ~P_SA; td->td_mailbox = NULL; - mtx_lock_spin(&sched_lock); - td->td_flags &= ~TDF_SA; - mtx_unlock_spin(&sched_lock); + td->td_pflags &= ~TDP_SA; thread_single_end(); } p->p_flag |= P_INEXEC; diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 4d10f84..4a9517b 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -162,9 +162,7 @@ exit1(struct thread *td, int rv) * Turn off threading support. */ p->p_flag &= ~P_SA; - mtx_lock_spin(&sched_lock); - td->td_flags &= ~TDF_SA; - mtx_unlock_spin(&sched_lock); + td->td_pflags &= ~TDP_SA; thread_single_end(); /* Don't need this any more. */ } diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c index 88b2689..fc6a77c 100644 --- a/sys/kern/kern_kse.c +++ b/sys/kern/kern_kse.c @@ -609,7 +609,7 @@ kse_release(struct thread *td, struct kse_release_args *uap) return (error); TIMESPEC_TO_TIMEVAL(&tv, &timeout); } - if (td->td_flags & TDF_SA) + if (td->td_pflags & TDP_SA) td->td_pflags |= TDP_UPCALLING; else { ku->ku_mflags = fuword(&ku->ku_mailbox->km_flags); @@ -748,7 +748,7 @@ kse_create(struct thread *td, struct kse_create_args *uap) if (virtual_cpu != 0) ncpus = virtual_cpu; if (!(mbx.km_flags & KMF_BOUND)) - sa = TDF_SA; + sa = TDP_SA; else ncpus = 1; PROC_LOCK(p); @@ -787,7 +787,7 @@ kse_create(struct thread *td, struct kse_create_args *uap) mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); } else { - if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0) + if (!first && ((td->td_pflags & TDP_SA) ^ sa) != 0) return (EINVAL); newkg = kg; } @@ -891,14 +891,14 @@ kse_create(struct thread *td, struct kse_create_args *uap) } if (!sa) { newtd->td_mailbox = mbx.km_curthread; - newtd->td_flags &= ~TDF_SA; + newtd->td_pflags &= ~TDP_SA; if (newtd != td) { mtx_unlock_spin(&sched_lock); cpu_set_upcall_kse(newtd, newku); mtx_lock_spin(&sched_lock); } } else { - newtd->td_flags |= TDF_SA; + newtd->td_pflags |= TDP_SA; } if (newtd != td) setrunqueue(newtd); @@ -1263,7 +1263,7 @@ thread_statclock(int user) struct thread *td = curthread; struct ksegrp *kg = td->td_ksegrp; - if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA)) + if (kg->kg_numupcalls == 0 || !(td->td_pflags & TDP_SA)) return (0); if (user) { /* Current always do via ast() */ @@ -1621,8 +1621,8 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) /* Let the new thread become owner of the upcall */ ku->ku_owner = td2; td2->td_upcall = ku; - td2->td_flags = TDF_SA; - td2->td_pflags = TDP_UPCALLING; + td2->td_flags = 0; + td2->td_pflags = TDP_SA|TDP_UPCALLING; td2->td_kse = NULL; td2->td_state = TDS_CAN_RUN; td2->td_inhibitors = 0; @@ -1729,7 +1729,7 @@ thread_user_enter(struct proc *p, struct thread *td) * but for now do it every time. */ kg = td->td_ksegrp; - if (td->td_flags & TDF_SA) { + if (td->td_pflags & TDP_SA) { ku = td->td_upcall; KASSERT(ku, ("%s: no upcall owned", __func__)); KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); @@ -1788,7 +1788,7 @@ thread_userret(struct thread *td, struct trapframe *frame) ku = td->td_upcall; /* Nothing to do with bound thread */ - if (!(td->td_flags & TDF_SA)) + if (!(td->td_pflags & TDP_SA)) return (0); /* diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index 27ea15f..adda9bd 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -1490,7 +1490,7 @@ trapsignal(struct thread *td, int sig, u_long code) int error; p = td->td_proc; - if (td->td_flags & TDF_SA) { + if (td->td_pflags & TDP_SA) { if (td->td_mailbox == NULL) thread_user_enter(p, td); PROC_LOCK(p); @@ -1524,7 +1524,7 @@ trapsignal(struct thread *td, int sig, u_long code) ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)], &td->td_sigmask, code); #endif - if (!(td->td_flags & TDF_SA)) + if (!(td->td_pflags & TDP_SA)) (*p->p_sysent->sv_sendsig)( ps->ps_sigact[_SIG_IDX(sig)], sig, &td->td_sigmask, code); @@ -2291,7 +2291,7 @@ postsig(sig) mtx_lock(&ps->ps_mtx); } - if (!(td->td_flags & TDF_SA && td->td_mailbox) && + if (!(td->td_pflags & TDP_SA && td->td_mailbox) && action == SIG_DFL) { /* * Default action, where the default is to kill @@ -2301,7 +2301,7 @@ postsig(sig) sigexit(td, sig); /* NOTREACHED */ } else { - if (td->td_flags & TDF_SA && td->td_mailbox) { + if (td->td_pflags & TDP_SA && td->td_mailbox) { if (sig == SIGKILL) { mtx_unlock(&ps->ps_mtx); sigexit(td, sig); @@ -2350,7 +2350,7 @@ postsig(sig) p->p_code = 0; p->p_sig = 0; } - if (td->td_flags & TDF_SA && td->td_mailbox) + if (td->td_pflags & TDP_SA && td->td_mailbox) thread_signal_add(curthread, sig); else (*p->p_sysent->sv_sendsig)(action, sig, diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 88b2689..fc6a77c 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -609,7 +609,7 @@ kse_release(struct thread *td, struct kse_release_args *uap) return (error); TIMESPEC_TO_TIMEVAL(&tv, &timeout); } - if (td->td_flags & TDF_SA) + if (td->td_pflags & TDP_SA) td->td_pflags |= TDP_UPCALLING; else { ku->ku_mflags = fuword(&ku->ku_mailbox->km_flags); @@ -748,7 +748,7 @@ kse_create(struct thread *td, struct kse_create_args *uap) if (virtual_cpu != 0) ncpus = virtual_cpu; if (!(mbx.km_flags & KMF_BOUND)) - sa = TDF_SA; + sa = TDP_SA; else ncpus = 1; PROC_LOCK(p); @@ -787,7 +787,7 @@ kse_create(struct thread *td, struct kse_create_args *uap) mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); } else { - if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0) + if (!first && ((td->td_pflags & TDP_SA) ^ sa) != 0) return (EINVAL); newkg = kg; } @@ -891,14 +891,14 @@ kse_create(struct thread *td, struct kse_create_args *uap) } if (!sa) { newtd->td_mailbox = mbx.km_curthread; - newtd->td_flags &= ~TDF_SA; + newtd->td_pflags &= ~TDP_SA; if (newtd != td) { mtx_unlock_spin(&sched_lock); cpu_set_upcall_kse(newtd, newku); mtx_lock_spin(&sched_lock); } } else { - newtd->td_flags |= TDF_SA; + newtd->td_pflags |= TDP_SA; } if (newtd != td) setrunqueue(newtd); @@ -1263,7 +1263,7 @@ thread_statclock(int user) struct thread *td = curthread; struct ksegrp *kg = td->td_ksegrp; - if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA)) + if (kg->kg_numupcalls == 0 || !(td->td_pflags & TDP_SA)) return (0); if (user) { /* Current always do via ast() */ @@ -1621,8 +1621,8 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) /* Let the new thread become owner of the upcall */ ku->ku_owner = td2; td2->td_upcall = ku; - td2->td_flags = TDF_SA; - td2->td_pflags = TDP_UPCALLING; + td2->td_flags = 0; + td2->td_pflags = TDP_SA|TDP_UPCALLING; td2->td_kse = NULL; td2->td_state = TDS_CAN_RUN; td2->td_inhibitors = 0; @@ -1729,7 +1729,7 @@ thread_user_enter(struct proc *p, struct thread *td) * but for now do it every time. */ kg = td->td_ksegrp; - if (td->td_flags & TDF_SA) { + if (td->td_pflags & TDP_SA) { ku = td->td_upcall; KASSERT(ku, ("%s: no upcall owned", __func__)); KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); @@ -1788,7 +1788,7 @@ thread_userret(struct thread *td, struct trapframe *frame) ku = td->td_upcall; /* Nothing to do with bound thread */ - if (!(td->td_flags & TDF_SA)) + if (!(td->td_pflags & TDP_SA)) return (0); /* diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 1437106..3a074dd 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -353,7 +353,6 @@ struct thread { #define TDF_OWEUPC 0x008000 /* Owe thread an addupc() call at next AST. */ #define TDF_NEEDRESCHED 0x010000 /* Thread needs to yield. */ #define TDF_NEEDSIGCHK 0x020000 /* Thread may need signal delivery. */ -#define TDF_SA 0x040000 /* A scheduler activation based thread. */ #define TDF_UMTXWAKEUP 0x080000 /* Libthr thread must not sleep on a umtx. */ #define TDF_THRWAKEUP 0x100000 /* Libthr thread must not suspend itself. */ #define TDF_DEADLKTREAT 0x800000 /* Lock aquisition - deadlock treatment. */ @@ -365,6 +364,7 @@ struct thread { #define TDP_UPCALLING 0x0008 /* This thread is doing an upcall. */ #define TDP_COWINPROGRESS 0x0010 /* Snapshot copy-on-write in progress. */ #define TDP_ALTSTACK 0x0020 /* Have alternate signal stack. */ +#define TDP_SA 0x0080 /* A scheduler activation based thread. */ #define TDI_SUSPENDED 0x0001 /* On suspension queue. */ #define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */ |