summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordavidxu <davidxu@FreeBSD.org>2003-06-15 00:31:24 +0000
committerdavidxu <davidxu@FreeBSD.org>2003-06-15 00:31:24 +0000
commitabb4420bbe55ce526dd8d7eb8ac3e1b33969d3fa (patch)
treef1ad1d6af931a255911ca3bbb68ac9677e8e30c0
parent11be88d0438fcd4af13ed8006b4e293451dc780c (diff)
downloadFreeBSD-src-abb4420bbe55ce526dd8d7eb8ac3e1b33969d3fa.zip
FreeBSD-src-abb4420bbe55ce526dd8d7eb8ac3e1b33969d3fa.tar.gz
Rename P_THREADED to P_SA. P_SA means a process is using scheduler
activations.
-rw-r--r--sys/alpha/alpha/trap.c2
-rw-r--r--sys/alpha/linux/linux_sysvec.c2
-rw-r--r--sys/amd64/amd64/pmap.c2
-rw-r--r--sys/amd64/amd64/trap.c2
-rw-r--r--sys/compat/svr4/svr4_sysvec.c2
-rw-r--r--sys/ddb/db_ps.c6
-rw-r--r--sys/fs/procfs/procfs_status.c2
-rw-r--r--sys/i386/i386/pmap.c2
-rw-r--r--sys/i386/i386/sys_machdep.c2
-rw-r--r--sys/i386/i386/trap.c2
-rw-r--r--sys/i386/linux/linux_sysvec.c2
-rw-r--r--sys/ia64/ia64/trap.c2
-rw-r--r--sys/kern/kern_clock.c6
-rw-r--r--sys/kern/kern_exec.c4
-rw-r--r--sys/kern/kern_exit.c4
-rw-r--r--sys/kern/kern_fork.c6
-rw-r--r--sys/kern/kern_kse.c12
-rw-r--r--sys/kern/kern_sig.c2
-rw-r--r--sys/kern/kern_switch.c10
-rw-r--r--sys/kern/kern_synch.c4
-rw-r--r--sys/kern/kern_thread.c12
-rw-r--r--sys/kern/sched_4bsd.c2
-rw-r--r--sys/kern/sched_ule.c2
-rw-r--r--sys/kern/subr_trap.c4
-rw-r--r--sys/kern/tty.c2
-rw-r--r--sys/sparc64/sparc64/trap.c2
-rw-r--r--sys/sys/proc.h2
27 files changed, 51 insertions, 51 deletions
diff --git a/sys/alpha/alpha/trap.c b/sys/alpha/alpha/trap.c
index 86311e4..0bb6cae 100644
--- a/sys/alpha/alpha/trap.c
+++ b/sys/alpha/alpha/trap.c
@@ -658,7 +658,7 @@ syscall(code, framep)
sticks = td->td_sticks;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
- if (p->p_flag & P_THREADED)
+ if (p->p_flag & P_SA)
thread_user_enter(p, td);
#ifdef DIAGNOSTIC
alpha_fpstate_check(td);
diff --git a/sys/alpha/linux/linux_sysvec.c b/sys/alpha/linux/linux_sysvec.c
index 6730c89..278014b 100644
--- a/sys/alpha/linux/linux_sysvec.c
+++ b/sys/alpha/linux/linux_sysvec.c
@@ -94,7 +94,7 @@ elf_linux_fixup(register_t **stack_base, struct image_params *imgp)
register_t *pos;
KASSERT(curthread->td_proc == imgp->proc &&
- (curthread->td_proc->p_flag & P_THREADED) == 0,
+ (curthread->td_proc->p_flag & P_SA) == 0,
("unsafe elf_linux_fixup(), should be curproc"));
args = (Elf64_Auxargs *)imgp->auxargs;
pos = *stack_base + (imgp->argc + imgp->envc + 2);
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 3e26cfa..8a3226b 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2979,7 +2979,7 @@ pmap_activate(struct thread *td)
/* XXXKSE this is wrong.
* pmap_activate is for the current thread on the current cpu
*/
- if (p->p_flag & P_THREADED) {
+ if (p->p_flag & P_SA) {
/* Make sure all other cr3 entries are updated. */
/* what if they are running? XXXKSE (maybe abort them) */
FOREACH_THREAD_IN_PROC(p, td) {
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index c4e7f16..e16f0ec 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -673,7 +673,7 @@ syscall(frame)
td->td_frame = &frame;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
- if (p->p_flag & P_THREADED)
+ if (p->p_flag & P_SA)
thread_user_enter(p, td);
params = (caddr_t)frame.tf_rsp + sizeof(register_t);
code = frame.tf_rax;
diff --git a/sys/compat/svr4/svr4_sysvec.c b/sys/compat/svr4/svr4_sysvec.c
index 9b4588c..125f460 100644
--- a/sys/compat/svr4/svr4_sysvec.c
+++ b/sys/compat/svr4/svr4_sysvec.c
@@ -210,7 +210,7 @@ svr4_fixup(register_t **stack_base, struct image_params *imgp)
register_t *pos;
KASSERT(curthread->td_proc == imgp->proc &&
- (curthread->td_proc->p_flag & P_THREADED) == 0,
+ (curthread->td_proc->p_flag & P_SA) == 0,
("unsafe svr4_fixup(), should be curproc"));
args = (Elf32_Auxargs *)imgp->auxargs;
pos = *stack_base + (imgp->argc + imgp->envc + 2);
diff --git a/sys/ddb/db_ps.c b/sys/ddb/db_ps.c
index 0b8ae9e..1917c66 100644
--- a/sys/ddb/db_ps.c
+++ b/sys/ddb/db_ps.c
@@ -127,7 +127,7 @@ db_ps(dummy1, dummy2, dummy3, dummy4)
p->p_ucred != NULL ? p->p_ucred->cr_ruid : 0, pp->p_pid,
p->p_pgrp != NULL ? p->p_pgrp->pg_id : 0, p->p_flag,
state);
- if (p->p_flag & P_THREADED)
+ if (p->p_flag & P_SA)
db_printf("(threaded) %s\n", p->p_comm);
FOREACH_THREAD_IN_PROC(p, td) {
dumpthread(p, td);
@@ -145,7 +145,7 @@ db_ps(dummy1, dummy2, dummy3, dummy4)
static void
dumpthread(volatile struct proc *p, volatile struct thread *td)
{
- if (p->p_flag & P_THREADED)
+ if (p->p_flag & P_SA)
db_printf( " thread %p ksegrp %p ", td, td->td_ksegrp);
if (TD_ON_SLEEPQ(td)) {
if (td->td_flags & TDF_CVWAITQ)
@@ -198,7 +198,7 @@ dumpthread(volatile struct proc *p, volatile struct thread *td)
default:
db_printf("[UNK: %#x]", td->td_state);
}
- if (p->p_flag & P_THREADED) {
+ if (p->p_flag & P_SA) {
if (td->td_kse)
db_printf("[kse %p]", td->td_kse);
db_printf("\n");
diff --git a/sys/fs/procfs/procfs_status.c b/sys/fs/procfs/procfs_status.c
index feb830a..69a0ae4 100644
--- a/sys/fs/procfs/procfs_status.c
+++ b/sys/fs/procfs/procfs_status.c
@@ -117,7 +117,7 @@ procfs_doprocstatus(PFS_FILL_ARGS)
}
mtx_lock_spin(&sched_lock);
- if (p->p_flag & P_THREADED)
+ if (p->p_flag & P_SA)
wmesg = "-kse- ";
else {
tdfirst = FIRST_THREAD_IN_PROC(p);
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index c9dc0af..a2401c1 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -3258,7 +3258,7 @@ pmap_activate(struct thread *td)
/* XXXKSE this is wrong.
* pmap_activate is for the current thread on the current cpu
*/
- if (p->p_flag & P_THREADED) {
+ if (p->p_flag & P_SA) {
/* Make sure all other cr3 entries are updated. */
/* what if they are running? XXXKSE (maybe abort them) */
FOREACH_THREAD_IN_PROC(p, td) {
diff --git a/sys/i386/i386/sys_machdep.c b/sys/i386/i386/sys_machdep.c
index fe99f79..2e4cd26 100644
--- a/sys/i386/i386/sys_machdep.c
+++ b/sys/i386/i386/sys_machdep.c
@@ -135,7 +135,7 @@ i386_extend_pcb(struct thread *td)
0 /* granularity */
};
- if (td->td_proc->p_flag & P_THREADED)
+ if (td->td_proc->p_flag & P_SA)
return (EINVAL); /* XXXKSE */
/* XXXKSE All the code below only works in 1:1 needs changing */
ext = (struct pcb_ext *)kmem_alloc(kernel_map, ctob(IOPAGES+1));
diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c
index 524bb7e..1f165f2 100644
--- a/sys/i386/i386/trap.c
+++ b/sys/i386/i386/trap.c
@@ -951,7 +951,7 @@ syscall(frame)
td->td_frame = &frame;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
- if (p->p_flag & P_THREADED)
+ if (p->p_flag & P_SA)
thread_user_enter(p, td);
params = (caddr_t)frame.tf_esp + sizeof(int);
code = frame.tf_eax;
diff --git a/sys/i386/linux/linux_sysvec.c b/sys/i386/linux/linux_sysvec.c
index 541b7a9..0e92296 100644
--- a/sys/i386/linux/linux_sysvec.c
+++ b/sys/i386/linux/linux_sysvec.c
@@ -236,7 +236,7 @@ elf_linux_fixup(register_t **stack_base, struct image_params *imgp)
register_t *pos;
KASSERT(curthread->td_proc == imgp->proc &&
- (curthread->td_proc->p_flag & P_THREADED) == 0,
+ (curthread->td_proc->p_flag & P_SA) == 0,
("unsafe elf_linux_fixup(), should be curproc"));
args = (Elf32_Auxargs *)imgp->auxargs;
pos = *stack_base + (imgp->argc + imgp->envc + 2);
diff --git a/sys/ia64/ia64/trap.c b/sys/ia64/ia64/trap.c
index d22b8ff0..9b57912 100644
--- a/sys/ia64/ia64/trap.c
+++ b/sys/ia64/ia64/trap.c
@@ -944,7 +944,7 @@ syscall(struct trapframe *tf)
sticks = td->td_sticks;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
- if (p->p_flag & P_THREADED)
+ if (p->p_flag & P_SA)
thread_user_enter(p, td);
if (p->p_sysent->sv_prepsyscall) {
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 15626c4..50a48b1 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -164,7 +164,7 @@ hardclock_process(frame)
* Run current process's virtual and profile time, as needed.
*/
mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
- if (p->p_flag & P_THREADED) {
+ if (p->p_flag & P_SA) {
/* XXXKSE What to do? */
} else {
pstats = p->p_stats;
@@ -370,7 +370,7 @@ statclock(frame)
/*
* Charge the time as appropriate.
*/
- if (p->p_flag & P_THREADED)
+ if (p->p_flag & P_SA)
thread_statclock(1);
p->p_uticks++;
if (ke->ke_ksegrp->kg_nice > NZERO)
@@ -394,7 +394,7 @@ statclock(frame)
p->p_iticks++;
cp_time[CP_INTR]++;
} else {
- if (p->p_flag & P_THREADED)
+ if (p->p_flag & P_SA)
thread_statclock(0);
td->td_sticks++;
p->p_sticks++;
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 9ef4c02..ed97677 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -190,7 +190,7 @@ kern_execve(td, fname, argv, envv, mac_p)
PROC_LOCK(p);
KASSERT((p->p_flag & P_INEXEC) == 0,
("%s(): process already has P_INEXEC flag", __func__));
- if (p->p_flag & P_THREADED || p->p_numthreads > 1) {
+ if (p->p_flag & P_SA || p->p_numthreads > 1) {
if (thread_single(SINGLE_EXIT)) {
PROC_UNLOCK(p);
return (ERESTART); /* Try again later. */
@@ -199,7 +199,7 @@ kern_execve(td, fname, argv, envv, mac_p)
* If we get here all other threads are dead,
* so unset the associated flags and lose KSE mode.
*/
- p->p_flag &= ~P_THREADED;
+ p->p_flag &= ~P_SA;
td->td_mailbox = NULL;
thread_single_end();
}
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 283a8bd..c65dfeb 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -134,7 +134,7 @@ exit1(struct thread *td, int rv)
* MUST abort all other threads before proceeding past here.
*/
PROC_LOCK(p);
- if (p->p_flag & P_THREADED || p->p_numthreads > 1) {
+ if (p->p_flag & P_SA || p->p_numthreads > 1) {
/*
* First check if some other thread got here before us..
* if so, act apropriatly, (exit or suspend);
@@ -164,7 +164,7 @@ exit1(struct thread *td, int rv)
* ...
* Turn off threading support.
*/
- p->p_flag &= ~P_THREADED;
+ p->p_flag &= ~P_SA;
thread_single_end(); /* Don't need this any more. */
}
/*
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 4c514df..4045feb 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -259,7 +259,7 @@ fork1(td, flags, pages, procp)
* other side with the expectation that the process is about to
* exec.
*/
- if (p1->p_flag & P_THREADED) {
+ if (p1->p_flag & P_SA) {
/*
* Idle the other threads for a second.
* Since the user space is copied, it must remain stable.
@@ -717,7 +717,7 @@ again:
/*
* If other threads are waiting, let them continue now
*/
- if (p1->p_flag & P_THREADED) {
+ if (p1->p_flag & P_SA) {
PROC_LOCK(p1);
thread_single_end();
PROC_UNLOCK(p1);
@@ -732,7 +732,7 @@ again:
fail:
sx_xunlock(&allproc_lock);
uma_zfree(proc_zone, newproc);
- if (p1->p_flag & P_THREADED) {
+ if (p1->p_flag & P_SA) {
PROC_LOCK(p1);
thread_single_end();
PROC_UNLOCK(p1);
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 32c755a..2080493 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -393,7 +393,7 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
struct thread *td2;
p = td->td_proc;
- if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL))
+ if (!(p->p_flag & P_SA) || (uap->tmbx == NULL))
return (EINVAL);
mtx_lock_spin(&sched_lock);
FOREACH_THREAD_IN_PROC(p, td2) {
@@ -456,7 +456,7 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
ke = td->td_kse;
if (p->p_numthreads == 1) {
kse_purge(p, td);
- p->p_flag &= ~P_THREADED;
+ p->p_flag &= ~P_SA;
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
} else {
@@ -545,7 +545,7 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
td2 = NULL;
ku = NULL;
/* KSE-enabled processes only, please. */
- if (!(p->p_flag & P_THREADED))
+ if (!(p->p_flag & P_SA))
return (EINVAL);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
@@ -620,7 +620,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
/* Easier to just set it than to test and set */
PROC_LOCK(p);
- p->p_flag |= P_THREADED;
+ p->p_flag |= P_SA;
PROC_UNLOCK(p);
kg = td->td_ksegrp;
if (uap->newgroup) {
@@ -1764,7 +1764,7 @@ thread_single(int force_exit)
PROC_LOCK_ASSERT(p, MA_OWNED);
KASSERT((td != NULL), ("curthread is NULL"));
- if ((p->p_flag & P_THREADED) == 0 && p->p_numthreads == 1)
+ if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1)
return (0);
/* Is someone already single threading? */
@@ -1906,7 +1906,7 @@ thread_suspend_check(int return_instead)
if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
while (mtx_owned(&Giant))
mtx_unlock(&Giant);
- if (p->p_flag & P_THREADED)
+ if (p->p_flag & P_SA)
thread_exit();
else
thr_exit1();
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index 4a71567..4d67050 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -2190,7 +2190,7 @@ postsig(sig)
p->p_code = 0;
p->p_sig = 0;
}
- if (p->p_flag & P_THREADED)
+ if (p->p_flag & P_SA)
thread_signal_add(curthread, sig);
else
(*p->p_sysent->sv_sendsig)(action, sig,
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index f25a8da..306ad63 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -142,7 +142,7 @@ retry:
td = ke->ke_thread;
KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
kg = ke->ke_ksegrp;
- if (td->td_proc->p_flag & P_THREADED) {
+ if (td->td_proc->p_flag & P_SA) {
if (kg->kg_last_assigned == td) {
kg->kg_last_assigned = TAILQ_PREV(td,
threadqueue, td_runq);
@@ -247,7 +247,7 @@ remrunqueue(struct thread *td)
/*
* If it is not a threaded process, take the shortcut.
*/
- if ((td->td_proc->p_flag & P_THREADED) == 0) {
+ if ((td->td_proc->p_flag & P_SA) == 0) {
/* Bring its kse with it, leave the thread attached */
sched_rem(ke);
ke->ke_state = KES_THREAD;
@@ -290,7 +290,7 @@ adjustrunqueue( struct thread *td, int newpri)
/*
* If it is not a threaded process, take the shortcut.
*/
- if ((td->td_proc->p_flag & P_THREADED) == 0) {
+ if ((td->td_proc->p_flag & P_SA) == 0) {
/* We only care about the kse in the run queue. */
td->td_priority = newpri;
if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
@@ -331,7 +331,7 @@ setrunqueue(struct thread *td)
TD_SET_RUNQ(td);
kg = td->td_ksegrp;
kg->kg_runnable++;
- if ((td->td_proc->p_flag & P_THREADED) == 0) {
+ if ((td->td_proc->p_flag & P_SA) == 0) {
/*
* Common path optimisation: Only one of everything
* and the KSE is always already attached.
@@ -651,7 +651,7 @@ thread_sanity_check(struct thread *td, char *string)
}
}
- if ((p->p_flag & P_THREADED) == 0) {
+ if ((p->p_flag & P_SA) == 0) {
if (ke == NULL) {
panc(string, "non KSE thread lost kse");
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 0142fad..be11257 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -170,7 +170,7 @@ msleep(ident, mtx, priority, wmesg, timo)
* the thread (recursion here might be bad).
*/
mtx_lock_spin(&sched_lock);
- if (p->p_flag & P_THREADED || p->p_numthreads > 1) {
+ if (p->p_flag & P_SA || p->p_numthreads > 1) {
/*
* Just don't bother if we are exiting
* and not the exiting thread or thread was marked as
@@ -517,7 +517,7 @@ mi_switch(void)
CTR3(KTR_PROC, "mi_switch: old thread %p (pid %d, %s)", td, p->p_pid,
p->p_comm);
sched_nest = sched_lock.mtx_recurse;
- if (td->td_proc->p_flag & P_THREADED)
+ if (td->td_proc->p_flag & P_SA)
thread_switchout(td);
sched_switchout(td);
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 32c755a..2080493 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -393,7 +393,7 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
struct thread *td2;
p = td->td_proc;
- if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL))
+ if (!(p->p_flag & P_SA) || (uap->tmbx == NULL))
return (EINVAL);
mtx_lock_spin(&sched_lock);
FOREACH_THREAD_IN_PROC(p, td2) {
@@ -456,7 +456,7 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
ke = td->td_kse;
if (p->p_numthreads == 1) {
kse_purge(p, td);
- p->p_flag &= ~P_THREADED;
+ p->p_flag &= ~P_SA;
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
} else {
@@ -545,7 +545,7 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
td2 = NULL;
ku = NULL;
/* KSE-enabled processes only, please. */
- if (!(p->p_flag & P_THREADED))
+ if (!(p->p_flag & P_SA))
return (EINVAL);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
@@ -620,7 +620,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
/* Easier to just set it than to test and set */
PROC_LOCK(p);
- p->p_flag |= P_THREADED;
+ p->p_flag |= P_SA;
PROC_UNLOCK(p);
kg = td->td_ksegrp;
if (uap->newgroup) {
@@ -1764,7 +1764,7 @@ thread_single(int force_exit)
PROC_LOCK_ASSERT(p, MA_OWNED);
KASSERT((td != NULL), ("curthread is NULL"));
- if ((p->p_flag & P_THREADED) == 0 && p->p_numthreads == 1)
+ if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1)
return (0);
/* Is someone already single threading? */
@@ -1906,7 +1906,7 @@ thread_suspend_check(int return_instead)
if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
while (mtx_owned(&Giant))
mtx_unlock(&Giant);
- if (p->p_flag & P_THREADED)
+ if (p->p_flag & P_SA)
thread_exit();
else
thr_exit1();
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index fa8f627..ec5ea5f 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -590,7 +590,7 @@ sched_switchout(struct thread *td)
if (TD_IS_RUNNING(td)) {
/* Put us back on the run queue (kse and all). */
setrunqueue(td);
- } else if (p->p_flag & P_THREADED) {
+ } else if (p->p_flag & P_SA) {
/*
* We will not be on the run queue. So we must be
* sleeping or similar. As it's available,
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 72bf3d3..bb34516 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -771,7 +771,7 @@ sched_switchout(struct thread *td)
* We will not be on the run queue. So we must be
* sleeping or similar.
*/
- if (td->td_proc->p_flag & P_THREADED)
+ if (td->td_proc->p_flag & P_SA)
kse_reassign(ke);
}
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index 15bc58f..396afa7 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -109,7 +109,7 @@ userret(td, frame, oticks)
/*
* Do special thread processing, e.g. upcall tweaking and such.
*/
- if (p->p_flag & P_THREADED) {
+ if (p->p_flag & P_SA) {
thread_userret(td, frame);
}
@@ -254,7 +254,7 @@ ast(struct trapframe *framep)
}
mtx_unlock(&p->p_sigacts->ps_mtx);
PROC_UNLOCK(p);
- if (p->p_flag & P_THREADED && sigs) {
+ if (p->p_flag & P_SA && sigs) {
struct kse_upcall *ku = td->td_upcall;
if ((void *)TRAPF_PC(framep) != ku->ku_func) {
mtx_lock_spin(&sched_lock);
diff --git a/sys/kern/tty.c b/sys/kern/tty.c
index 685c13d..1c8aa6b 100644
--- a/sys/kern/tty.c
+++ b/sys/kern/tty.c
@@ -2419,7 +2419,7 @@ ttyinfo(struct tty *tp)
td = FIRST_THREAD_IN_PROC(pick);
sprefix = "";
- if (pick->p_flag & P_THREADED) {
+ if (pick->p_flag & P_SA) {
stmp = "KSE" ; /* XXXKSE */
} else {
if (td) {
diff --git a/sys/sparc64/sparc64/trap.c b/sys/sparc64/sparc64/trap.c
index 4ea297b..6f800f6 100644
--- a/sys/sparc64/sparc64/trap.c
+++ b/sys/sparc64/sparc64/trap.c
@@ -490,7 +490,7 @@ syscall(struct trapframe *tf)
td->td_frame = tf;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
- if (p->p_flag & P_THREADED)
+ if (p->p_flag & P_SA)
thread_user_enter(p, td);
code = tf->tf_global[1];
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 5ab0d0c..699bb08 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -629,7 +629,7 @@ struct proc {
#define P_WAITED 0x01000 /* Someone is waiting for us */
#define P_WEXIT 0x02000 /* Working on exiting. */
#define P_EXEC 0x04000 /* Process called exec. */
-#define P_THREADED 0x08000 /* Process is using threads. */
+#define P_SA 0x08000 /* Using scheduler activations. */
#define P_CONTINUED 0x10000 /* Proc has continued from a stopped state. */
#define P_STOPPED_SIG 0x20000 /* Stopped due to SIGSTOP/SIGTSTP */
#define P_STOPPED_TRACE 0x40000 /* Stopped because of tracing */
OpenPOWER on IntegriCloud