diff options
author | jeff <jeff@FreeBSD.org> | 2008-03-19 06:19:01 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2008-03-19 06:19:01 +0000 |
commit | 46f09d5bc3f6480ce5db48b9d76f3e83299f588c (patch) | |
tree | 720fece23c8482c35e341e275a0f4068ee43126f /sys/kern | |
parent | f8600f40e7fcf241985d8a978c63889de0969bfb (diff) | |
download | FreeBSD-src-46f09d5bc3f6480ce5db48b9d76f3e83299f588c.zip FreeBSD-src-46f09d5bc3f6480ce5db48b9d76f3e83299f588c.tar.gz |
- Relax requirements for p_numthreads, p_threads, p_swtick, and p_nice from
requiring the per-process spinlock to only requiring the process lock.
- Reflect these changes in the proc.h documentation and consumers throughout
the kernel. This is a substantial reduction in locking cost for these
fields and was made possible by recent changes to threading support.
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_cpuset.c | 12 | ||||
-rw-r--r-- | sys/kern/kern_exit.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_kthread.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_lockf.c | 6 | ||||
-rw-r--r-- | sys/kern/kern_proc.c | 18 | ||||
-rw-r--r-- | sys/kern/kern_resource.c | 20 | ||||
-rw-r--r-- | sys/kern/kern_sig.c | 46 | ||||
-rw-r--r-- | sys/kern/kern_thr.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_thread.c | 12 | ||||
-rw-r--r-- | sys/kern/sched_4bsd.c | 7 | ||||
-rw-r--r-- | sys/kern/sched_ule.c | 3 | ||||
-rw-r--r-- | sys/kern/sys_generic.c | 7 | ||||
-rw-r--r-- | sys/kern/sys_process.c | 6 | ||||
-rw-r--r-- | sys/kern/tty.c | 12 |
14 files changed, 42 insertions, 113 deletions
diff --git a/sys/kern/kern_cpuset.c b/sys/kern/kern_cpuset.c index e22cc8f..0ccb33a 100644 --- a/sys/kern/kern_cpuset.c +++ b/sys/kern/kern_cpuset.c @@ -382,11 +382,9 @@ cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { PROC_LOCK(p); - PROC_SLOCK(p); FOREACH_THREAD_IN_PROC(p, td) if (td->td_tid == id) break; - PROC_SUNLOCK(p); if (td != NULL) break; PROC_UNLOCK(p); @@ -480,11 +478,9 @@ cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); if (error) goto out; - PROC_SLOCK(p); if (nfree >= p->p_numthreads) break; threads = p->p_numthreads; - PROC_SUNLOCK(p); PROC_UNLOCK(p); for (; nfree < threads; nfree++) { nset = uma_zalloc(cpuset_zone, M_WAITOK); @@ -492,7 +488,6 @@ cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) } } PROC_LOCK_ASSERT(p, MA_OWNED); - PROC_SLOCK_ASSERT(p, MA_OWNED); /* * Now that the appropriate locks are held and we have enough cpusets, * make sure the operation will succeed before applying changes. The @@ -526,8 +521,8 @@ cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) } /* * Replace each thread's cpuset while using deferred release. We - * must do this because the PROC_SLOCK has to be held while traversing - * the thread list and this limits the type of operations allowed. + * must do this because the thread lock must be held while operating + * on the thread and this limits the type of operations allowed. */ FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); @@ -561,7 +556,6 @@ cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) thread_unlock(td); } unlock_out: - PROC_SUNLOCK(p); PROC_UNLOCK(p); out: while ((nset = LIST_FIRST(&droplist)) != NULL) @@ -833,13 +827,11 @@ cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) thread_unlock(ttd); break; case CPU_WHICH_PID: - PROC_SLOCK(p); FOREACH_THREAD_IN_PROC(p, ttd) { thread_lock(ttd); CPU_OR(mask, &ttd->td_cpuset->cs_mask); thread_unlock(ttd); } - PROC_SUNLOCK(p); break; case CPU_WHICH_CPUSET: CPU_COPY(&set->cs_mask, mask); diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 910f742..d2e22e9 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -510,9 +510,7 @@ exit1(struct thread *td, int rv) * proc lock. */ wakeup(p->p_pptr); - PROC_SLOCK(p->p_pptr); sched_exit(p->p_pptr, td); - PROC_SUNLOCK(p->p_pptr); PROC_SLOCK(p); p->p_state = PRS_ZOMBIE; PROC_UNLOCK(p->p_pptr); diff --git a/sys/kern/kern_kthread.c b/sys/kern/kern_kthread.c index ed74774..18f1672 100644 --- a/sys/kern/kern_kthread.c +++ b/sys/kern/kern_kthread.c @@ -292,14 +292,12 @@ kthread_add(void (*func)(void *), void *arg, struct proc *p, PROC_LOCK(p); p->p_flag |= P_HADTHREADS; newtd->td_sigmask = oldtd->td_sigmask; /* XXX dubious */ - PROC_SLOCK(p); thread_link(newtd, p); thread_lock(oldtd); /* let the scheduler know about these things. */ sched_fork_thread(oldtd, newtd); TD_SET_CAN_RUN(newtd); thread_unlock(oldtd); - PROC_SUNLOCK(p); PROC_UNLOCK(p); diff --git a/sys/kern/kern_lockf.c b/sys/kern/kern_lockf.c index 51fbc30..6cce5aa 100644 --- a/sys/kern/kern_lockf.c +++ b/sys/kern/kern_lockf.c @@ -283,7 +283,7 @@ lf_setlock(lock, vp, clean) wproc = (struct proc *)block->lf_id; restart: nproc = NULL; - PROC_SLOCK(wproc); + PROC_LOCK(wproc); FOREACH_THREAD_IN_PROC(wproc, td) { thread_lock(td); while (td->td_wchan && @@ -296,8 +296,8 @@ restart: break; nproc = (struct proc *)waitblock->lf_id; if (nproc == (struct proc *)lock->lf_id) { - PROC_SUNLOCK(wproc); thread_unlock(td); + PROC_UNLOCK(wproc); lock->lf_next = *clean; *clean = lock; return (EDEADLK); @@ -305,7 +305,7 @@ restart: } thread_unlock(td); } - PROC_SUNLOCK(wproc); + PROC_UNLOCK(wproc); wproc = nproc; if (wproc) goto restart; diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index 5ca1e60..29975d1 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -640,11 +640,11 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp) struct ucred *cred; struct sigacts *ps; + PROC_LOCK_ASSERT(p, MA_OWNED); bzero(kp, sizeof(*kp)); kp->ki_structsize = sizeof(*kp); kp->ki_paddr = p; - PROC_LOCK_ASSERT(p, MA_OWNED); kp->ki_addr =/* p->p_addr; */0; /* XXX */ kp->ki_args = p->p_args; kp->ki_textvp = p->p_textvp; @@ -776,7 +776,7 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread) struct proc *p; p = td->td_proc; - PROC_SLOCK_ASSERT(p, MA_OWNED); + PROC_LOCK_ASSERT(p, MA_OWNED); thread_lock(td); if (td->td_wmesg != NULL) @@ -851,10 +851,8 @@ fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp) { fill_kinfo_proc_only(p, kp); - PROC_SLOCK(p); if (FIRST_THREAD_IN_PROC(p) != NULL) fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp, 0); - PROC_SUNLOCK(p); } struct pstats * @@ -921,15 +919,12 @@ sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) fill_kinfo_proc_only(p, &kinfo_proc); if (flags & KERN_PROC_NOTHREADS) { - PROC_SLOCK(p); if (FIRST_THREAD_IN_PROC(p) != NULL) fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), &kinfo_proc, 0); - PROC_SUNLOCK(p); error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, sizeof(kinfo_proc)); } else { - PROC_SLOCK(p); if (FIRST_THREAD_IN_PROC(p) != NULL) FOREACH_THREAD_IN_PROC(p, td) { fill_kinfo_thread(td, &kinfo_proc, 1); @@ -941,7 +936,6 @@ sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) else error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, sizeof(kinfo_proc)); - PROC_SUNLOCK(p); } PROC_UNLOCK(p); if (error) @@ -1483,7 +1477,7 @@ sysctl_kern_proc_kstack(SYSCTL_HANDLER_ARGS) lwpidarray = NULL; numthreads = 0; - PROC_SLOCK(p); + PROC_LOCK(p); repeat: if (numthreads < p->p_numthreads) { if (lwpidarray != NULL) { @@ -1491,13 +1485,12 @@ repeat: lwpidarray = NULL; } numthreads = p->p_numthreads; - PROC_SUNLOCK(p); + PROC_UNLOCK(p); lwpidarray = malloc(sizeof(*lwpidarray) * numthreads, M_TEMP, M_WAITOK | M_ZERO); - PROC_SLOCK(p); + PROC_LOCK(p); goto repeat; } - PROC_SUNLOCK(p); i = 0; /* @@ -1509,7 +1502,6 @@ repeat: * have changed, in which case the right to extract debug info might * no longer be assured. */ - PROC_LOCK(p); FOREACH_THREAD_IN_PROC(p, td) { KASSERT(i < numthreads, ("sysctl_kern_proc_kstack: numthreads")); diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c index 6a867a6..e34d97f 100644 --- a/sys/kern/kern_resource.c +++ b/sys/kern/kern_resource.c @@ -266,9 +266,7 @@ donice(struct thread *td, struct proc *p, int n) n = PRIO_MIN; if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0) return (EACCES); - PROC_SLOCK(p); sched_nice(p, n); - PROC_SUNLOCK(p); return (0); } @@ -307,7 +305,6 @@ rtprio_thread(struct thread *td, struct rtprio_thread_args *uap) case RTP_LOOKUP: if ((error = p_cansee(td, p))) break; - PROC_SLOCK(p); if (uap->lwpid == 0 || uap->lwpid == td->td_tid) td1 = td; else @@ -316,7 +313,6 @@ rtprio_thread(struct thread *td, struct rtprio_thread_args *uap) pri_to_rtp(td1, &rtp); else error = ESRCH; - PROC_SUNLOCK(p); PROC_UNLOCK(p); return (copyout(&rtp, uap->rtp, sizeof(struct rtprio))); case RTP_SET: @@ -341,7 +337,6 @@ rtprio_thread(struct thread *td, struct rtprio_thread_args *uap) break; } - PROC_SLOCK(p); if (uap->lwpid == 0 || uap->lwpid == td->td_tid) td1 = td; else @@ -350,7 +345,6 @@ rtprio_thread(struct thread *td, struct rtprio_thread_args *uap) error = rtp_to_pri(&rtp, td1); else error = ESRCH; - PROC_SUNLOCK(p); break; default: error = EINVAL; @@ -399,7 +393,6 @@ rtprio(td, uap) case RTP_LOOKUP: if ((error = p_cansee(td, p))) break; - PROC_SLOCK(p); /* * Return OUR priority if no pid specified, * or if one is, report the highest priority @@ -425,7 +418,6 @@ rtprio(td, uap) } } } - PROC_SUNLOCK(p); PROC_UNLOCK(p); return (copyout(&rtp, uap->rtp, sizeof(struct rtprio))); case RTP_SET: @@ -456,7 +448,6 @@ rtprio(td, uap) * do all the threads on that process. If we * specify our own pid we do the latter. */ - PROC_SLOCK(p); if (uap->pid == 0) { error = rtp_to_pri(&rtp, td); } else { @@ -465,7 +456,6 @@ rtprio(td, uap) break; } } - PROC_SUNLOCK(p); break; default: error = EINVAL; @@ -698,9 +688,7 @@ kern_setrlimit(td, which, limp) if (limp->rlim_cur != RLIM_INFINITY && p->p_cpulimit == RLIM_INFINITY) callout_reset(&p->p_limco, hz, lim_cb, p); - PROC_SLOCK(p); p->p_cpulimit = limp->rlim_cur; - PROC_SUNLOCK(p); break; case RLIMIT_DATA: if (limp->rlim_cur > maxdsiz) @@ -956,11 +944,12 @@ kern_getrusage(td, who, rup) struct rusage *rup; { struct proc *p; + int error; + error = 0; p = td->td_proc; PROC_LOCK(p); switch (who) { - case RUSAGE_SELF: rufetchcalc(p, rup, &rup->ru_utime, &rup->ru_stime); @@ -972,11 +961,10 @@ kern_getrusage(td, who, rup) break; default: - PROC_UNLOCK(p); - return (EINVAL); + error = EINVAL; } PROC_UNLOCK(p); - return (0); + return (error); } void diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index 0e1c493..eee71f1 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -508,10 +508,8 @@ sigqueue_delete_set_proc(struct proc *p, sigset_t *set) sigqueue_init(&worklist, NULL); sigqueue_move_set(&p->p_sigqueue, &worklist, set); - PROC_SLOCK(p); FOREACH_THREAD_IN_PROC(p, td0) sigqueue_move_set(&td0->td_sigqueue, &worklist, set); - PROC_SUNLOCK(p); sigqueue_flush(&worklist); } @@ -734,9 +732,7 @@ kern_sigaction(td, sig, act, oact, flags) (sigprop(sig) & SA_IGNORE && ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { /* never to be seen again */ - PROC_SLOCK(p); sigqueue_delete_proc(p, sig); - PROC_SUNLOCK(p); if (sig != SIGCONT) /* easier in psignal */ SIGADDSET(ps->ps_sigignore, sig); @@ -932,9 +928,7 @@ execsigs(struct proc *p) if (sigprop(sig) & SA_IGNORE) { if (sig != SIGCONT) SIGADDSET(ps->ps_sigignore, sig); - PROC_SLOCK(p); sigqueue_delete_proc(p, sig); - PROC_SUNLOCK(p); } ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; } @@ -1879,7 +1873,6 @@ sigtd(struct proc *p, int sig, int prop) if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig)) return (curthread); signal_td = NULL; - PROC_SLOCK(p); FOREACH_THREAD_IN_PROC(p, td) { if (!SIGISMEMBER(td->td_sigmask, sig)) { signal_td = td; @@ -1888,7 +1881,6 @@ sigtd(struct proc *p, int sig, int prop) } if (signal_td == NULL) signal_td = FIRST_THREAD_IN_PROC(p); - PROC_SUNLOCK(p); return (signal_td); } @@ -2026,9 +2018,7 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) ksiginfo_tryfree(ksi); return (ret); } - PROC_SLOCK(p); sigqueue_delete_proc(p, SIGCONT); - PROC_SUNLOCK(p); if (p->p_flag & P_CONTINUED) { p->p_flag &= ~P_CONTINUED; PROC_LOCK(p->p_pptr); @@ -2066,7 +2056,6 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) * waking up threads so that they can cross the user boundary. * We try do the per-process part here. */ - PROC_SLOCK(p); if (P_SHOULDSTOP(p)) { /* * The process is in stopped mode. All the threads should be @@ -2078,7 +2067,6 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) * so no further action is necessary. * No signal can restart us. */ - PROC_SUNLOCK(p); goto out; } @@ -2104,6 +2092,7 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) * Otherwise, process goes back to sleep state. */ p->p_flag &= ~P_STOPPED_SIG; + PROC_SLOCK(p); if (p->p_numthreads == p->p_suspcount) { PROC_SUNLOCK(p); p->p_flag |= P_CONTINUED; @@ -2124,6 +2113,7 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) * The process wants to catch it so it needs * to run at least one thread, but which one? */ + PROC_SUNLOCK(p); goto runfast; } /* @@ -2140,7 +2130,6 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) * (If we did the shell could get confused). * Just make sure the signal STOP bit set. */ - PROC_SUNLOCK(p); p->p_flag |= P_STOPPED_SIG; sigqueue_delete(sigqueue, sig); goto out; @@ -2154,6 +2143,7 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) * the PROCESS runnable, leave it stopped. * It may run a bit until it hits a thread_suspend_check(). */ + PROC_SLOCK(p); thread_lock(td); if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) sleepq_abort(td, intrval); @@ -2166,22 +2156,18 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) */ } else if (p->p_state == PRS_NORMAL) { if (p->p_flag & P_TRACED || action == SIG_CATCH) { - thread_lock(td); tdsigwakeup(td, sig, action, intrval); - thread_unlock(td); - PROC_SUNLOCK(p); goto out; } MPASS(action == SIG_DFL); if (prop & SA_STOP) { - if (p->p_flag & P_PPWAIT) { - PROC_SUNLOCK(p); + if (p->p_flag & P_PPWAIT) goto out; - } p->p_flag |= P_STOPPED_SIG; p->p_xstat = sig; + PROC_SLOCK(p); sig_suspend_threads(td, p, 1); if (p->p_numthreads == p->p_suspcount) { /* @@ -2197,13 +2183,9 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) } else PROC_SUNLOCK(p); goto out; - } - else - goto runfast; - /* NOTREACHED */ + } } else { /* Not in "NORMAL" state. discard the signal. */ - PROC_SUNLOCK(p); sigqueue_delete(sigqueue, sig); goto out; } @@ -2212,11 +2194,9 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) * The process is not stopped so we need to apply the signal to all the * running threads. */ - runfast: - thread_lock(td); tdsigwakeup(td, sig, action, intrval); - thread_unlock(td); + PROC_SLOCK(p); thread_unsuspend(p); PROC_SUNLOCK(p); out: @@ -2237,17 +2217,16 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval) register int prop; PROC_LOCK_ASSERT(p, MA_OWNED); - PROC_SLOCK_ASSERT(p, MA_OWNED); - THREAD_LOCK_ASSERT(td, MA_OWNED); prop = sigprop(sig); + PROC_SLOCK(p); + thread_lock(td); /* * Bring the priority of a thread up if we want it to get * killed in this lifetime. */ if (action == SIG_DFL && (prop & SA_KILL) && td->td_priority > PUSER) sched_prio(td, PUSER); - if (TD_ON_SLEEPQ(td)) { /* * If thread is sleeping uninterruptibly @@ -2256,7 +2235,7 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval) * trap() or syscall(). */ if ((td->td_flags & TDF_SINTR) == 0) - return; + goto out; /* * If SIGCONT is default (or ignored) and process is * asleep, we are finished; the process should not @@ -2271,8 +2250,6 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval) * Remove from both for now. */ sigqueue_delete(&td->td_sigqueue, sig); - PROC_SLOCK(p); - thread_lock(td); return; } @@ -2294,6 +2271,9 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval) forward_signal(td); #endif } +out: + PROC_SUNLOCK(p); + thread_unlock(td); } static void diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c index 25bf8d1..3d410b5 100644 --- a/sys/kern/kern_thr.c +++ b/sys/kern/kern_thr.c @@ -229,14 +229,12 @@ create_thread(struct thread *td, mcontext_t *ctx, PROC_LOCK(td->td_proc); td->td_proc->p_flag |= P_HADTHREADS; newtd->td_sigmask = td->td_sigmask; - PROC_SLOCK(p); thread_link(newtd, p); bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name)); thread_lock(td); /* let the scheduler know about these things. */ sched_fork_thread(td, newtd); thread_unlock(td); - PROC_SUNLOCK(p); PROC_UNLOCK(p); thread_lock(newtd); if (rtp != NULL) { diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index ec63f89..91d8c55 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -345,9 +345,7 @@ thread_exit(void) #ifdef AUDIT AUDIT_SYSCALL_EXIT(0, td); #endif - umtx_thread_exit(td); - /* * drop FPU & debug register state storage, or any other * architecture specific resources that @@ -374,9 +372,7 @@ thread_exit(void) */ if (p->p_flag & P_HADTHREADS) { if (p->p_numthreads > 1) { - thread_lock(td); thread_unlink(td); - thread_unlock(td); td2 = FIRST_THREAD_IN_PROC(p); sched_exit_thread(td2, td); @@ -450,8 +446,8 @@ thread_link(struct thread *td, struct proc *p) /* * XXX This can't be enabled because it's called for proc0 before - * it's spinlock has been created. - * PROC_SLOCK_ASSERT(p, MA_OWNED); + * its lock has been created. + * PROC_LOCK_ASSERT(p, MA_OWNED); */ td->td_state = TDS_INACTIVE; td->td_proc = p; @@ -487,7 +483,7 @@ thread_unlink(struct thread *td) { struct proc *p = td->td_proc; - PROC_SLOCK_ASSERT(p, MA_OWNED); + PROC_LOCK_ASSERT(p, MA_OWNED); TAILQ_REMOVE(&p->p_threads, td, td_plist); p->p_numthreads--; /* could clear a few other things here */ @@ -863,11 +859,9 @@ thread_find(struct proc *p, lwpid_t tid) struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); - PROC_SLOCK(p); FOREACH_THREAD_IN_PROC(p, td) { if (td->td_tid == tid) break; } - PROC_SUNLOCK(p); return (td); } diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index ed906b6..691e3b4 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -357,7 +357,7 @@ schedcpu(void) realstathz = stathz ? stathz : hz; sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { - PROC_SLOCK(p); + PROC_LOCK(p); FOREACH_THREAD_IN_PROC(p, td) { awake = 0; thread_lock(td); @@ -436,7 +436,7 @@ XXX this is broken resetpriority_thread(td); thread_unlock(td); } /* end of thread loop */ - PROC_SUNLOCK(p); + PROC_UNLOCK(p); } /* end of process loop */ sx_sunlock(&allproc_lock); } @@ -616,7 +616,7 @@ sched_exit(struct proc *p, struct thread *td) CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", td, td->td_name, td->td_priority); - PROC_SLOCK_ASSERT(p, MA_OWNED); + PROC_LOCK_ASSERT(p, MA_OWNED); sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); } @@ -656,7 +656,6 @@ sched_nice(struct proc *p, int nice) struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); - PROC_SLOCK_ASSERT(p, MA_OWNED); p->p_nice = nice; FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 8bfec8d..67239ba 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -1843,7 +1843,6 @@ sched_nice(struct proc *p, int nice) struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); - PROC_SLOCK_ASSERT(p, MA_OWNED); p->p_nice = nice; FOREACH_THREAD_IN_PROC(p, td) { @@ -1996,7 +1995,7 @@ sched_exit(struct proc *p, struct thread *child) CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", child, child->td_name, child->td_priority); - PROC_SLOCK_ASSERT(p, MA_OWNED); + PROC_LOCK_ASSERT(p, MA_OWNED); td = FIRST_THREAD_IN_PROC(p); sched_exit_thread(td, child); } diff --git a/sys/kern/sys_generic.c b/sys/kern/sys_generic.c index 90946ea..209e5a9 100644 --- a/sys/kern/sys_generic.c +++ b/sys/kern/sys_generic.c @@ -1013,13 +1013,8 @@ poll(td, uap) * least enough for the current limits. We want to be reasonably * safe, but not overly restrictive. */ - PROC_LOCK(td->td_proc); - if ((nfds > lim_cur(td->td_proc, RLIMIT_NOFILE)) && - (nfds > FD_SETSIZE)) { - PROC_UNLOCK(td->td_proc); + if (nfds > maxfilesperproc && nfds > FD_SETSIZE) return (EINVAL); - } - PROC_UNLOCK(td->td_proc); ni = nfds * sizeof(struct pollfd); if (ni > sizeof(smallbits)) bits = malloc(ni, M_TEMP, M_WAITOK); diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c index 77ddee5..dbdd30f 100644 --- a/sys/kern/sys_process.c +++ b/sys/kern/sys_process.c @@ -528,12 +528,10 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { PROC_LOCK(p); - PROC_SLOCK(p); FOREACH_THREAD_IN_PROC(p, td2) { if (td2->td_tid == pid) break; } - PROC_SUNLOCK(p); if (td2 != NULL) break; /* proc lock held */ PROC_UNLOCK(p); @@ -789,7 +787,6 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) thread_unlock(td2); td2->td_xsig = data; - PROC_SLOCK(p); if (req == PT_DETACH) { struct thread *td3; FOREACH_THREAD_IN_PROC(p, td3) { @@ -803,6 +800,7 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) * you should use PT_SUSPEND to suspend it before * continuing process. */ + PROC_SLOCK(p); p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); thread_unsuspend(p); PROC_SUNLOCK(p); @@ -957,13 +955,11 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); tmp = 0; PROC_LOCK(p); - PROC_SLOCK(p); FOREACH_THREAD_IN_PROC(p, td2) { if (tmp >= num) break; buf[tmp++] = td2->td_tid; } - PROC_SUNLOCK(p); PROC_UNLOCK(p); error = copyout(buf, addr, tmp * sizeof(lwpid_t)); free(buf, M_TEMP); diff --git a/sys/kern/tty.c b/sys/kern/tty.c index 932f87a..076a35c 100644 --- a/sys/kern/tty.c +++ b/sys/kern/tty.c @@ -2581,7 +2581,7 @@ ttyinfo(struct tty *tp) if (proc_compare(pick, p)) pick = p; - PROC_SLOCK(pick); + PROC_LOCK(pick); picktd = NULL; td = FIRST_THREAD_IN_PROC(pick); FOREACH_THREAD_IN_PROC(pick, td) @@ -2615,7 +2615,7 @@ ttyinfo(struct tty *tp) rss = 0; else rss = pgtok(vmspace_resident_count(pick->p_vmspace)); - PROC_SUNLOCK(pick); + PROC_UNLOCK(pick); PROC_LOCK(pick); PGRP_UNLOCK(tp->t_pgrp); rufetchcalc(pick, &ru, &utime, &stime); @@ -2744,12 +2744,12 @@ proc_compare(struct proc *p1, struct proc *p2) * Fetch various stats about these processes. After we drop the * lock the information could be stale but the race is unimportant. */ - PROC_SLOCK(p1); + PROC_LOCK(p1); runa = proc_sum(p1, &esta); - PROC_SUNLOCK(p1); - PROC_SLOCK(p2); + PROC_UNLOCK(p1); + PROC_LOCK(p2); runb = proc_sum(p2, &estb); - PROC_SUNLOCK(p2); + PROC_UNLOCK(p2); /* * see if at least one of them is runnable |