summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_clock.c42
-rw-r--r--sys/kern/kern_synch.c14
-rw-r--r--sys/kern/subr_smp.c9
-rw-r--r--sys/kern/subr_trap.c12
4 files changed, 44 insertions, 33 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index bc6bf06..6b3153f 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -58,7 +58,9 @@
#include <sys/sysctl.h>
#include <machine/cpu.h>
+#include <machine/ipl.h>
#include <machine/limits.h>
+#include <machine/mutex.h>
#include <machine/smp.h>
#ifdef GPROF
@@ -161,11 +163,15 @@ hardclock(frame)
pstats = p->p_stats;
if (CLKF_USERMODE(frame) &&
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
- psignal(p, SIGVTALRM);
+ itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
+ p->p_flag |= P_ALRMPEND;
+ aston();
+ }
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
- psignal(p, SIGPROF);
+ itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
+ p->p_flag |= P_PROFPEND;
+ aston();
+ }
}
#if defined(SMP) && defined(BETTER_CLOCK)
@@ -186,15 +192,7 @@ hardclock(frame)
* relatively high clock interrupt priority any longer than necessary.
*/
if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
- if (CLKF_BASEPRI(frame)) {
- /*
- * Save the overhead of a software interrupt;
- * it will happen as soon as we return, so do it now.
- */
- (void)splsoftclock();
- softclock();
- } else
- setsoftclock();
+ setsoftclock();
} else if (softticks + 1 == ticks)
++softticks;
}
@@ -321,20 +319,24 @@ statclock(frame)
struct rusage *ru;
struct vmspace *vm;
+ mtx_enter(&sched_lock, MTX_SPIN);
+
if (CLKF_USERMODE(frame)) {
/*
* Came from user mode; CPU was in user state.
* If this process is being profiled, record the tick.
*/
- p = prevproc;
+ p = curproc;
if (p->p_flag & P_PROFIL)
addupc_intr(p, CLKF_PC(frame), 1);
#if defined(SMP) && defined(BETTER_CLOCK)
if (stathz != 0)
forward_statclock(pscnt);
#endif
- if (--pscnt > 0)
+ if (--pscnt > 0) {
+ mtx_exit(&sched_lock, MTX_SPIN);
return;
+ }
/*
* Charge the time as appropriate.
*/
@@ -361,8 +363,10 @@ statclock(frame)
if (stathz != 0)
forward_statclock(pscnt);
#endif
- if (--pscnt > 0)
+ if (--pscnt > 0) {
+ mtx_exit(&sched_lock, MTX_SPIN);
return;
+ }
/*
* Came from kernel mode, so we were:
* - handling an interrupt,
@@ -375,8 +379,8 @@ statclock(frame)
* so that we know how much of its real time was spent
* in ``non-process'' (i.e., interrupt) work.
*/
- p = prevproc;
- if (p->p_ithd) {
+ p = curproc;
+ if ((p->p_ithd != NULL) || CLKF_INTR(frame)) {
p->p_iticks++;
cp_time[CP_INTR]++;
} else {
@@ -402,6 +406,8 @@ statclock(frame)
if (ru->ru_maxrss < rss)
ru->ru_maxrss = rss;
}
+
+ mtx_exit(&sched_lock, MTX_SPIN);
}
/*
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index eeddeb6..3031334 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -289,6 +289,7 @@ schedcpu(arg)
if (p->p_stat == SWAIT)
continue;
*/
+ mtx_enter(&sched_lock, MTX_SPIN);
p->p_swtime++;
if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
p->p_slptime++;
@@ -297,13 +298,15 @@ schedcpu(arg)
* If the process has slept the entire second,
* stop recalculating its priority until it wakes up.
*/
- if (p->p_slptime > 1)
+ if (p->p_slptime > 1) {
+ mtx_exit(&sched_lock, MTX_SPIN);
continue;
+ }
+
/*
* prevent state changes and protect run queue
*/
s = splhigh();
- mtx_enter(&sched_lock, MTX_SPIN);
/*
* p_pctcpu is only for ps.
@@ -451,9 +454,6 @@ msleep(ident, mtx, priority, wmesg, timo)
* in case this is the idle process and already asleep.
*/
mtx_exit(&sched_lock, MTX_SPIN);
-#if 0
- splx(safepri);
-#endif
splx(s);
return (0);
}
@@ -994,7 +994,6 @@ setrunnable(p)
p->p_stat = SRUN;
if (p->p_flag & P_INMEM)
setrunqueue(p);
- mtx_exit(&sched_lock, MTX_SPIN);
splx(s);
if (p->p_slptime > 1)
updatepri(p);
@@ -1005,6 +1004,7 @@ setrunnable(p)
}
else
maybe_resched(p);
+ mtx_exit(&sched_lock, MTX_SPIN);
}
/*
@@ -1018,6 +1018,7 @@ resetpriority(p)
{
register unsigned int newpriority;
+ mtx_enter(&sched_lock, MTX_SPIN);
if (p->p_rtprio.type == RTP_PRIO_NORMAL) {
newpriority = PUSER + p->p_estcpu / INVERSE_ESTCPU_WEIGHT +
NICE_WEIGHT * (p->p_nice - PRIO_MIN);
@@ -1025,6 +1026,7 @@ resetpriority(p)
p->p_usrpri = newpriority;
}
maybe_resched(p);
+ mtx_exit(&sched_lock, MTX_SPIN);
}
/* ARGSUSED */
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index ea9aee8..5ef95b3 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -1900,11 +1900,6 @@ struct simplelock mcount_lock;
struct simplelock com_lock;
#endif /* USE_COMLOCK */
-#ifdef USE_CLOCKLOCK
-/* lock regions around the clock hardware */
-struct simplelock clock_lock;
-#endif /* USE_CLOCKLOCK */
-
/* lock around the MP rendezvous */
static struct simplelock smp_rv_lock;
@@ -1930,9 +1925,6 @@ init_locks(void)
#ifdef USE_COMLOCK
s_lock_init((struct simplelock*)&com_lock);
#endif /* USE_COMLOCK */
-#ifdef USE_CLOCKLOCK
- s_lock_init((struct simplelock*)&clock_lock);
-#endif /* USE_CLOCKLOCK */
s_lock_init(&ap_boot_lock);
}
@@ -2425,7 +2417,6 @@ ap_init(void)
* something unique to lock with.
*/
PCPU_SET(curproc,idleproc);
- PCPU_SET(prevproc,idleproc);
microuptime(&switchtime);
switchticks = ticks;
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index 1cbfcfa..b9ebb60 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -1244,6 +1244,18 @@ ast(frame)
addupc_task(p, p->p_stats->p_prof.pr_addr,
p->p_stats->p_prof.pr_ticks);
}
+ if (p->p_flag & P_ALRMPEND) {
+ if (!mtx_owned(&Giant))
+ mtx_enter(&Giant, MTX_DEF);
+ p->p_flag &= ~P_ALRMPEND;
+ psignal(p, SIGVTALRM);
+ }
+ if (p->p_flag & P_PROFPEND) {
+ if (!mtx_owned(&Giant))
+ mtx_enter(&Giant, MTX_DEF);
+ p->p_flag &= ~P_PROFPEND;
+ psignal(p, SIGPROF);
+ }
if (userret(p, &frame, sticks, mtx_owned(&Giant)) != 0)
mtx_exit(&Giant, MTX_DEF);
}
OpenPOWER on IntegriCloud