summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authordchagin <dchagin@FreeBSD.org>2016-01-09 14:08:10 +0000
committerdchagin <dchagin@FreeBSD.org>2016-01-09 14:08:10 +0000
commit939deb433656eadcd7e74873a32a38411367cdc8 (patch)
tree098f09b40a7709202e8662e68db796b52cf431ce /sys/kern
parent8120f17059427acef6857f4445f402c4646a266a (diff)
downloadFreeBSD-src-939deb433656eadcd7e74873a32a38411367cdc8.zip
FreeBSD-src-939deb433656eadcd7e74873a32a38411367cdc8.tar.gz
To facillitate an upcoming Linuxulator merging partially
MFC r275121 (by kib). Only merge the syntax changes from r275121, PROC_*LOCK() macros still lock the same proc spinlock. The process spin lock currently has the following distinct uses: - Threads lifetime cycle, in particular, counting of the threads in the process, and interlocking with process mutex and thread lock. The main reason of this is that turnstile locks are after thread locks, so you e.g. cannot unlock blockable mutex (think process mutex) while owning thread lock. - Virtual and profiling itimers, since the timers activation is done from the clock interrupt context. Replace the p_slock by p_itimmtx and PROC_ITIMLOCK(). - Profiling code (profil(2)), for similar reason. Replace the p_slock by p_profmtx and PROC_PROFLOCK(). - Resource usage accounting. Need for the spinlock there is subtle, my understanding is that spinlock blocks context switching for the current thread, which prevents td_runtime and similar fields from changing (updates are done at the mi_switch()). Replace the p_slock by p_statmtx and PROC_STATLOCK(). Discussed with: kib
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/init_main.c4
-rw-r--r--sys/kern/kern_clock.c16
-rw-r--r--sys/kern/kern_exit.c8
-rw-r--r--sys/kern/kern_proc.c8
-rw-r--r--sys/kern/kern_racct.c4
-rw-r--r--sys/kern/kern_resource.c20
-rw-r--r--sys/kern/kern_thread.c6
-rw-r--r--sys/kern/kern_time.c20
-rw-r--r--sys/kern/subr_prof.c16
9 files changed, 53 insertions, 49 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 23b4a6d..a014988 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -608,9 +608,9 @@ proc0_post(void *dummy __unused)
sx_slock(&allproc_lock);
FOREACH_PROC_IN_SYSTEM(p) {
microuptime(&p->p_stats->p_start);
- PROC_SLOCK(p);
+ PROC_STATLOCK(p);
rufetch(p, &ru); /* Clears thread stats */
- PROC_SUNLOCK(p);
+ PROC_STATUNLOCK(p);
p->p_rux.rux_runtime = 0;
p->p_rux.rux_uticks = 0;
p->p_rux.rux_sticks = 0;
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 08c58be..c62c10f 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -433,16 +433,16 @@ hardclock_cpu(int usermode)
flags = 0;
if (usermode &&
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
- PROC_SLOCK(p);
+ PROC_ITIMLOCK(p);
if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
flags |= TDF_ALRMPEND | TDF_ASTPENDING;
- PROC_SUNLOCK(p);
+ PROC_ITIMUNLOCK(p);
}
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
- PROC_SLOCK(p);
+ PROC_ITIMLOCK(p);
if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
flags |= TDF_PROFPEND | TDF_ASTPENDING;
- PROC_SUNLOCK(p);
+ PROC_ITIMUNLOCK(p);
}
thread_lock(td);
sched_tick(1);
@@ -521,18 +521,18 @@ hardclock_cnt(int cnt, int usermode)
flags = 0;
if (usermode &&
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
- PROC_SLOCK(p);
+ PROC_ITIMLOCK(p);
if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL],
tick * cnt) == 0)
flags |= TDF_ALRMPEND | TDF_ASTPENDING;
- PROC_SUNLOCK(p);
+ PROC_ITIMUNLOCK(p);
}
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
- PROC_SLOCK(p);
+ PROC_ITIMLOCK(p);
if (itimerdecr(&pstats->p_timer[ITIMER_PROF],
tick * cnt) == 0)
flags |= TDF_PROFPEND | TDF_ASTPENDING;
- PROC_SUNLOCK(p);
+ PROC_ITIMUNLOCK(p);
}
thread_lock(td);
sched_tick(cnt);
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index d00a554..76b4427 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -660,7 +660,9 @@ exit1(struct thread *td, int rv)
/*
* Save our children's rusage information in our exit rusage.
*/
+ PROC_STATLOCK(p);
ruadd(&p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
+ PROC_STATUNLOCK(p);
/*
* Make sure the scheduler takes this thread out of its tables etc.
@@ -1044,8 +1046,6 @@ proc_to_reap(struct thread *td, struct proc *p, idtype_t idtype, id_t id,
return (0);
}
- PROC_SLOCK(p);
-
if (siginfo != NULL) {
bzero(siginfo, sizeof(*siginfo));
siginfo->si_errno = 0;
@@ -1092,7 +1092,9 @@ proc_to_reap(struct thread *td, struct proc *p, idtype_t idtype, id_t id,
if (wrusage != NULL) {
rup = &wrusage->wru_self;
*rup = p->p_ru;
+ PROC_STATLOCK(p);
calcru(p, &rup->ru_utime, &rup->ru_stime);
+ PROC_STATUNLOCK(p);
rup = &wrusage->wru_children;
*rup = p->p_stats->p_cru;
@@ -1100,10 +1102,10 @@ proc_to_reap(struct thread *td, struct proc *p, idtype_t idtype, id_t id,
}
if (p->p_state == PRS_ZOMBIE && !check_only) {
+ PROC_SLOCK(p);
proc_reap(td, p, status, options);
return (-1);
}
- PROC_SUNLOCK(p);
PROC_UNLOCK(p);
return (1);
}
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index ac0152a..c9b7ca3 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -872,11 +872,11 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
kp->ki_fibnum = p->p_fibnum;
kp->ki_start = p->p_stats->p_start;
timevaladd(&kp->ki_start, &boottime);
- PROC_SLOCK(p);
+ PROC_STATLOCK(p);
rufetch(p, &kp->ki_rusage);
kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime);
calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime);
- PROC_SUNLOCK(p);
+ PROC_STATUNLOCK(p);
calccru(p, &kp->ki_childutime, &kp->ki_childstime);
/* Some callers want child times in a single value. */
kp->ki_childtime = kp->ki_childstime;
@@ -941,7 +941,7 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread)
PROC_LOCK_ASSERT(p, MA_OWNED);
if (preferthread)
- PROC_SLOCK(p);
+ PROC_STATLOCK(p);
thread_lock(td);
if (td->td_wmesg != NULL)
strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg));
@@ -1008,7 +1008,7 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread)
kp->ki_sigmask = td->td_sigmask;
thread_unlock(td);
if (preferthread)
- PROC_SUNLOCK(p);
+ PROC_STATUNLOCK(p);
}
/*
diff --git a/sys/kern/kern_racct.c b/sys/kern/kern_racct.c
index 448922a..9e67034 100644
--- a/sys/kern/kern_racct.c
+++ b/sys/kern/kern_racct.c
@@ -1214,11 +1214,11 @@ racctd(void)
microuptime(&wallclock);
timevalsub(&wallclock, &p->p_stats->p_start);
- PROC_SLOCK(p);
+ PROC_STATLOCK(p);
FOREACH_THREAD_IN_PROC(p, td)
ruxagg(p, td);
runtime = cputick2usec(p->p_rux.rux_runtime);
- PROC_SUNLOCK(p);
+ PROC_STATUNLOCK(p);
#ifdef notyet
KASSERT(runtime >= p->p_prev_runtime,
("runtime < p_prev_runtime"));
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index b8b8fee..3a5d575 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -631,11 +631,11 @@ lim_cb(void *arg)
*/
if (p->p_cpulimit == RLIM_INFINITY)
return;
- PROC_SLOCK(p);
+ PROC_STATLOCK(p);
FOREACH_THREAD_IN_PROC(p, td) {
ruxagg(p, td);
}
- PROC_SUNLOCK(p);
+ PROC_STATUNLOCK(p);
if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
lim_rlimit(p, RLIMIT_CPU, &rlim);
if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
@@ -847,7 +847,7 @@ calcru(struct proc *p, struct timeval *up, struct timeval *sp)
uint64_t runtime, u;
PROC_LOCK_ASSERT(p, MA_OWNED);
- PROC_SLOCK_ASSERT(p, MA_OWNED);
+ PROC_STATLOCK_ASSERT(p, MA_OWNED);
/*
* If we are getting stats for the current process, then add in the
* stats that this thread has accumulated in its current time slice.
@@ -879,7 +879,7 @@ rufetchtd(struct thread *td, struct rusage *ru)
uint64_t runtime, u;
p = td->td_proc;
- PROC_SLOCK_ASSERT(p, MA_OWNED);
+ PROC_STATLOCK_ASSERT(p, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
/*
* If we are getting stats for the current thread, then add in the
@@ -1015,11 +1015,11 @@ kern_getrusage(struct thread *td, int who, struct rusage *rup)
break;
case RUSAGE_THREAD:
- PROC_SLOCK(p);
+ PROC_STATLOCK(p);
thread_lock(td);
rufetchtd(td, rup);
thread_unlock(td);
- PROC_SUNLOCK(p);
+ PROC_STATUNLOCK(p);
break;
default:
@@ -1066,7 +1066,7 @@ ruxagg_locked(struct rusage_ext *rux, struct thread *td)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
- PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED);
+ PROC_STATLOCK_ASSERT(td->td_proc, MA_OWNED);
rux->rux_runtime += td->td_incruntime;
rux->rux_uticks += td->td_uticks;
rux->rux_sticks += td->td_sticks;
@@ -1096,7 +1096,7 @@ rufetch(struct proc *p, struct rusage *ru)
{
struct thread *td;
- PROC_SLOCK_ASSERT(p, MA_OWNED);
+ PROC_STATLOCK_ASSERT(p, MA_OWNED);
*ru = p->p_ru;
if (p->p_numthreads > 0) {
@@ -1117,10 +1117,10 @@ rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
struct timeval *sp)
{
- PROC_SLOCK(p);
+ PROC_STATLOCK(p);
rufetch(p, ru);
calcru(p, up, sp);
- PROC_SUNLOCK(p);
+ PROC_STATUNLOCK(p);
}
/*
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 5dc2bb0..e5ec1fb 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -472,6 +472,9 @@ thread_exit(void)
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
#endif
PROC_UNLOCK(p);
+ PROC_STATLOCK(p);
+ thread_lock(td);
+ PROC_SUNLOCK(p);
/* Do the same timestamp bookkeeping that mi_switch() would do. */
new_switchtime = cpu_ticks();
@@ -486,9 +489,8 @@ thread_exit(void)
td->td_ru.ru_nvcsw++;
ruxagg(p, td);
rucollect(&p->p_ru, &td->td_ru);
+ PROC_STATUNLOCK(p);
- thread_lock(td);
- PROC_SUNLOCK(p);
td->td_state = TDS_INACTIVE;
#ifdef WITNESS
witness_thread_exit(td);
diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c
index 73d5e15..6ae0fb1 100644
--- a/sys/kern/kern_time.c
+++ b/sys/kern/kern_time.c
@@ -273,10 +273,10 @@ get_process_cputime(struct proc *targetp, struct timespec *ats)
uint64_t runtime;
struct rusage ru;
- PROC_SLOCK(targetp);
+ PROC_STATLOCK(targetp);
rufetch(targetp, &ru);
runtime = targetp->p_rux.rux_runtime;
- PROC_SUNLOCK(targetp);
+ PROC_STATUNLOCK(targetp);
cputick2timespec(runtime, ats);
}
@@ -325,17 +325,17 @@ kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats)
break;
case CLOCK_VIRTUAL:
PROC_LOCK(p);
- PROC_SLOCK(p);
+ PROC_STATLOCK(p);
calcru(p, &user, &sys);
- PROC_SUNLOCK(p);
+ PROC_STATUNLOCK(p);
PROC_UNLOCK(p);
TIMEVAL_TO_TIMESPEC(&user, ats);
break;
case CLOCK_PROF:
PROC_LOCK(p);
- PROC_SLOCK(p);
+ PROC_STATLOCK(p);
calcru(p, &user, &sys);
- PROC_SUNLOCK(p);
+ PROC_STATUNLOCK(p);
PROC_UNLOCK(p);
timevaladd(&user, &sys);
TIMEVAL_TO_TIMESPEC(&user, ats);
@@ -695,9 +695,9 @@ kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv)
timevalsub(&aitv->it_value, &ctv);
}
} else {
- PROC_SLOCK(p);
+ PROC_ITIMLOCK(p);
*aitv = p->p_stats->p_timer[which];
- PROC_SUNLOCK(p);
+ PROC_ITIMUNLOCK(p);
}
return (0);
}
@@ -779,10 +779,10 @@ kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv,
aitv->it_value.tv_usec != 0 &&
aitv->it_value.tv_usec < tick)
aitv->it_value.tv_usec = tick;
- PROC_SLOCK(p);
+ PROC_ITIMLOCK(p);
*oitv = p->p_stats->p_timer[which];
p->p_stats->p_timer[which] = *aitv;
- PROC_SUNLOCK(p);
+ PROC_ITIMUNLOCK(p);
}
return (0);
}
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index fcfd748..3d4e86a 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -421,12 +421,12 @@ sys_profil(struct thread *td, struct profil_args *uap)
}
PROC_LOCK(p);
upp = &td->td_proc->p_stats->p_prof;
- PROC_SLOCK(p);
+ PROC_PROFLOCK(p);
upp->pr_off = uap->offset;
upp->pr_scale = uap->scale;
upp->pr_base = uap->samples;
upp->pr_size = uap->size;
- PROC_SUNLOCK(p);
+ PROC_PROFUNLOCK(p);
startprofclock(p);
PROC_UNLOCK(p);
@@ -466,15 +466,15 @@ addupc_intr(struct thread *td, uintfptr_t pc, u_int ticks)
if (ticks == 0)
return;
prof = &td->td_proc->p_stats->p_prof;
- PROC_SLOCK(td->td_proc);
+ PROC_PROFLOCK(td->td_proc);
if (pc < prof->pr_off ||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
- PROC_SUNLOCK(td->td_proc);
+ PROC_PROFUNLOCK(td->td_proc);
return; /* out of range; ignore */
}
addr = prof->pr_base + i;
- PROC_SUNLOCK(td->td_proc);
+ PROC_PROFUNLOCK(td->td_proc);
if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
td->td_profil_addr = pc;
td->td_profil_ticks = ticks;
@@ -509,15 +509,15 @@ addupc_task(struct thread *td, uintfptr_t pc, u_int ticks)
}
p->p_profthreads++;
prof = &p->p_stats->p_prof;
- PROC_SLOCK(p);
+ PROC_PROFLOCK(p);
if (pc < prof->pr_off ||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
- PROC_SUNLOCK(p);
+ PROC_PROFUNLOCK(p);
goto out;
}
addr = prof->pr_base + i;
- PROC_SUNLOCK(p);
+ PROC_PROFUNLOCK(p);
PROC_UNLOCK(p);
if (copyin(addr, &v, sizeof(v)) == 0) {
v += ticks;
OpenPOWER on IntegriCloud