diff options
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_clock.c | 6 | ||||
-rw-r--r-- | sys/kern/kern_exec.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_exit.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_fork.c | 6 | ||||
-rw-r--r-- | sys/kern/kern_kse.c | 12 | ||||
-rw-r--r-- | sys/kern/kern_sig.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_switch.c | 10 | ||||
-rw-r--r-- | sys/kern/kern_synch.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_thread.c | 12 | ||||
-rw-r--r-- | sys/kern/sched_4bsd.c | 2 | ||||
-rw-r--r-- | sys/kern/sched_ule.c | 2 | ||||
-rw-r--r-- | sys/kern/subr_trap.c | 4 | ||||
-rw-r--r-- | sys/kern/tty.c | 2 |
13 files changed, 35 insertions, 35 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index 15626c4..50a48b1 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -164,7 +164,7 @@ hardclock_process(frame) * Run current process's virtual and profile time, as needed. */ mtx_lock_spin_flags(&sched_lock, MTX_QUIET); - if (p->p_flag & P_THREADED) { + if (p->p_flag & P_SA) { /* XXXKSE What to do? */ } else { pstats = p->p_stats; @@ -370,7 +370,7 @@ statclock(frame) /* * Charge the time as appropriate. */ - if (p->p_flag & P_THREADED) + if (p->p_flag & P_SA) thread_statclock(1); p->p_uticks++; if (ke->ke_ksegrp->kg_nice > NZERO) @@ -394,7 +394,7 @@ statclock(frame) p->p_iticks++; cp_time[CP_INTR]++; } else { - if (p->p_flag & P_THREADED) + if (p->p_flag & P_SA) thread_statclock(0); td->td_sticks++; p->p_sticks++; diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 9ef4c02..ed97677 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -190,7 +190,7 @@ kern_execve(td, fname, argv, envv, mac_p) PROC_LOCK(p); KASSERT((p->p_flag & P_INEXEC) == 0, ("%s(): process already has P_INEXEC flag", __func__)); - if (p->p_flag & P_THREADED || p->p_numthreads > 1) { + if (p->p_flag & P_SA || p->p_numthreads > 1) { if (thread_single(SINGLE_EXIT)) { PROC_UNLOCK(p); return (ERESTART); /* Try again later. */ @@ -199,7 +199,7 @@ kern_execve(td, fname, argv, envv, mac_p) * If we get here all other threads are dead, * so unset the associated flags and lose KSE mode. */ - p->p_flag &= ~P_THREADED; + p->p_flag &= ~P_SA; td->td_mailbox = NULL; thread_single_end(); } diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 283a8bd..c65dfeb 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -134,7 +134,7 @@ exit1(struct thread *td, int rv) * MUST abort all other threads before proceeding past here. */ PROC_LOCK(p); - if (p->p_flag & P_THREADED || p->p_numthreads > 1) { + if (p->p_flag & P_SA || p->p_numthreads > 1) { /* * First check if some other thread got here before us.. * if so, act apropriatly, (exit or suspend); @@ -164,7 +164,7 @@ exit1(struct thread *td, int rv) * ... * Turn off threading support. */ - p->p_flag &= ~P_THREADED; + p->p_flag &= ~P_SA; thread_single_end(); /* Don't need this any more. */ } /* diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 4c514df..4045feb 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -259,7 +259,7 @@ fork1(td, flags, pages, procp) * other side with the expectation that the process is about to * exec. */ - if (p1->p_flag & P_THREADED) { + if (p1->p_flag & P_SA) { /* * Idle the other threads for a second. * Since the user space is copied, it must remain stable. @@ -717,7 +717,7 @@ again: /* * If other threads are waiting, let them continue now */ - if (p1->p_flag & P_THREADED) { + if (p1->p_flag & P_SA) { PROC_LOCK(p1); thread_single_end(); PROC_UNLOCK(p1); @@ -732,7 +732,7 @@ again: fail: sx_xunlock(&allproc_lock); uma_zfree(proc_zone, newproc); - if (p1->p_flag & P_THREADED) { + if (p1->p_flag & P_SA) { PROC_LOCK(p1); thread_single_end(); PROC_UNLOCK(p1); diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c index 32c755a..2080493 100644 --- a/sys/kern/kern_kse.c +++ b/sys/kern/kern_kse.c @@ -393,7 +393,7 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) struct thread *td2; p = td->td_proc; - if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL)) + if (!(p->p_flag & P_SA) || (uap->tmbx == NULL)) return (EINVAL); mtx_lock_spin(&sched_lock); FOREACH_THREAD_IN_PROC(p, td2) { @@ -456,7 +456,7 @@ kse_exit(struct thread *td, struct kse_exit_args *uap) ke = td->td_kse; if (p->p_numthreads == 1) { kse_purge(p, td); - p->p_flag &= ~P_THREADED; + p->p_flag &= ~P_SA; mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); } else { @@ -545,7 +545,7 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) td2 = NULL; ku = NULL; /* KSE-enabled processes only, please. */ - if (!(p->p_flag & P_THREADED)) + if (!(p->p_flag & P_SA)) return (EINVAL); PROC_LOCK(p); mtx_lock_spin(&sched_lock); @@ -620,7 +620,7 @@ kse_create(struct thread *td, struct kse_create_args *uap) /* Easier to just set it than to test and set */ PROC_LOCK(p); - p->p_flag |= P_THREADED; + p->p_flag |= P_SA; PROC_UNLOCK(p); kg = td->td_ksegrp; if (uap->newgroup) { @@ -1764,7 +1764,7 @@ thread_single(int force_exit) PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT((td != NULL), ("curthread is NULL")); - if ((p->p_flag & P_THREADED) == 0 && p->p_numthreads == 1) + if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1) return (0); /* Is someone already single threading? */ @@ -1906,7 +1906,7 @@ thread_suspend_check(int return_instead) if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { while (mtx_owned(&Giant)) mtx_unlock(&Giant); - if (p->p_flag & P_THREADED) + if (p->p_flag & P_SA) thread_exit(); else thr_exit1(); diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index 4a71567..4d67050 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -2190,7 +2190,7 @@ postsig(sig) p->p_code = 0; p->p_sig = 0; } - if (p->p_flag & P_THREADED) + if (p->p_flag & P_SA) thread_signal_add(curthread, sig); else (*p->p_sysent->sv_sendsig)(action, sig, diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c index f25a8da..306ad63 100644 --- a/sys/kern/kern_switch.c +++ b/sys/kern/kern_switch.c @@ -142,7 +142,7 @@ retry: td = ke->ke_thread; KASSERT((td->td_kse == ke), ("kse/thread mismatch")); kg = ke->ke_ksegrp; - if (td->td_proc->p_flag & P_THREADED) { + if (td->td_proc->p_flag & P_SA) { if (kg->kg_last_assigned == td) { kg->kg_last_assigned = TAILQ_PREV(td, threadqueue, td_runq); @@ -247,7 +247,7 @@ remrunqueue(struct thread *td) /* * If it is not a threaded process, take the shortcut. */ - if ((td->td_proc->p_flag & P_THREADED) == 0) { + if ((td->td_proc->p_flag & P_SA) == 0) { /* Bring its kse with it, leave the thread attached */ sched_rem(ke); ke->ke_state = KES_THREAD; @@ -290,7 +290,7 @@ adjustrunqueue( struct thread *td, int newpri) /* * If it is not a threaded process, take the shortcut. */ - if ((td->td_proc->p_flag & P_THREADED) == 0) { + if ((td->td_proc->p_flag & P_SA) == 0) { /* We only care about the kse in the run queue. */ td->td_priority = newpri; if (ke->ke_rqindex != (newpri / RQ_PPQ)) { @@ -331,7 +331,7 @@ setrunqueue(struct thread *td) TD_SET_RUNQ(td); kg = td->td_ksegrp; kg->kg_runnable++; - if ((td->td_proc->p_flag & P_THREADED) == 0) { + if ((td->td_proc->p_flag & P_SA) == 0) { /* * Common path optimisation: Only one of everything * and the KSE is always already attached. @@ -651,7 +651,7 @@ thread_sanity_check(struct thread *td, char *string) } } - if ((p->p_flag & P_THREADED) == 0) { + if ((p->p_flag & P_SA) == 0) { if (ke == NULL) { panc(string, "non KSE thread lost kse"); } diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 0142fad..be11257 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -170,7 +170,7 @@ msleep(ident, mtx, priority, wmesg, timo) * the thread (recursion here might be bad). */ mtx_lock_spin(&sched_lock); - if (p->p_flag & P_THREADED || p->p_numthreads > 1) { + if (p->p_flag & P_SA || p->p_numthreads > 1) { /* * Just don't bother if we are exiting * and not the exiting thread or thread was marked as @@ -517,7 +517,7 @@ mi_switch(void) CTR3(KTR_PROC, "mi_switch: old thread %p (pid %d, %s)", td, p->p_pid, p->p_comm); sched_nest = sched_lock.mtx_recurse; - if (td->td_proc->p_flag & P_THREADED) + if (td->td_proc->p_flag & P_SA) thread_switchout(td); sched_switchout(td); diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 32c755a..2080493 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -393,7 +393,7 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) struct thread *td2; p = td->td_proc; - if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL)) + if (!(p->p_flag & P_SA) || (uap->tmbx == NULL)) return (EINVAL); mtx_lock_spin(&sched_lock); FOREACH_THREAD_IN_PROC(p, td2) { @@ -456,7 +456,7 @@ kse_exit(struct thread *td, struct kse_exit_args *uap) ke = td->td_kse; if (p->p_numthreads == 1) { kse_purge(p, td); - p->p_flag &= ~P_THREADED; + p->p_flag &= ~P_SA; mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); } else { @@ -545,7 +545,7 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) td2 = NULL; ku = NULL; /* KSE-enabled processes only, please. */ - if (!(p->p_flag & P_THREADED)) + if (!(p->p_flag & P_SA)) return (EINVAL); PROC_LOCK(p); mtx_lock_spin(&sched_lock); @@ -620,7 +620,7 @@ kse_create(struct thread *td, struct kse_create_args *uap) /* Easier to just set it than to test and set */ PROC_LOCK(p); - p->p_flag |= P_THREADED; + p->p_flag |= P_SA; PROC_UNLOCK(p); kg = td->td_ksegrp; if (uap->newgroup) { @@ -1764,7 +1764,7 @@ thread_single(int force_exit) PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT((td != NULL), ("curthread is NULL")); - if ((p->p_flag & P_THREADED) == 0 && p->p_numthreads == 1) + if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1) return (0); /* Is someone already single threading? */ @@ -1906,7 +1906,7 @@ thread_suspend_check(int return_instead) if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { while (mtx_owned(&Giant)) mtx_unlock(&Giant); - if (p->p_flag & P_THREADED) + if (p->p_flag & P_SA) thread_exit(); else thr_exit1(); diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index fa8f627..ec5ea5f 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -590,7 +590,7 @@ sched_switchout(struct thread *td) if (TD_IS_RUNNING(td)) { /* Put us back on the run queue (kse and all). */ setrunqueue(td); - } else if (p->p_flag & P_THREADED) { + } else if (p->p_flag & P_SA) { /* * We will not be on the run queue. So we must be * sleeping or similar. As it's available, diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 72bf3d3..bb34516 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -771,7 +771,7 @@ sched_switchout(struct thread *td) * We will not be on the run queue. So we must be * sleeping or similar. */ - if (td->td_proc->p_flag & P_THREADED) + if (td->td_proc->p_flag & P_SA) kse_reassign(ke); } diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c index 15bc58f..396afa7 100644 --- a/sys/kern/subr_trap.c +++ b/sys/kern/subr_trap.c @@ -109,7 +109,7 @@ userret(td, frame, oticks) /* * Do special thread processing, e.g. upcall tweaking and such. */ - if (p->p_flag & P_THREADED) { + if (p->p_flag & P_SA) { thread_userret(td, frame); } @@ -254,7 +254,7 @@ ast(struct trapframe *framep) } mtx_unlock(&p->p_sigacts->ps_mtx); PROC_UNLOCK(p); - if (p->p_flag & P_THREADED && sigs) { + if (p->p_flag & P_SA && sigs) { struct kse_upcall *ku = td->td_upcall; if ((void *)TRAPF_PC(framep) != ku->ku_func) { mtx_lock_spin(&sched_lock); diff --git a/sys/kern/tty.c b/sys/kern/tty.c index 685c13d..1c8aa6b 100644 --- a/sys/kern/tty.c +++ b/sys/kern/tty.c @@ -2419,7 +2419,7 @@ ttyinfo(struct tty *tp) td = FIRST_THREAD_IN_PROC(pick); sprefix = ""; - if (pick->p_flag & P_THREADED) { + if (pick->p_flag & P_SA) { stmp = "KSE" ; /* XXXKSE */ } else { if (td) { |