summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_clock.c2
-rw-r--r--sys/kern/kern_condvar.c18
-rw-r--r--sys/kern/kern_exit.c4
-rw-r--r--sys/kern/kern_intr.c13
-rw-r--r--sys/kern/kern_mutex.c14
-rw-r--r--sys/kern/kern_shutdown.c2
-rw-r--r--sys/kern/kern_sig.c8
-rw-r--r--sys/kern/kern_subr.c2
-rw-r--r--sys/kern/kern_synch.c12
-rw-r--r--sys/kern/subr_taskqueue.c2
-rw-r--r--sys/kern/subr_trap.c2
-rw-r--r--sys/kern/subr_turnstile.c14
-rw-r--r--sys/kern/subr_witness.c16
13 files changed, 41 insertions, 68 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 6aa36ef..7591f11 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -242,7 +242,7 @@ hardclock(frame)
* callout_lock held; incorrect locking order.
*/
if (need_softclock)
- swi_sched(softclock_ih, SWI_NOSWITCH);
+ swi_sched(softclock_ih, 0);
}
/*
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index 14ed213..fccd59b 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -145,7 +145,7 @@ cv_switch_catch(struct thread *td)
PROC_LOCK(p);
sig = CURSIG(p); /* XXXKSE */
mtx_lock_spin(&sched_lock);
- PROC_UNLOCK_NOSWITCH(p);
+ PROC_UNLOCK(p);
if (sig != 0) {
if (td->td_wchan != NULL)
cv_waitq_remove(td);
@@ -218,8 +218,8 @@ cv_wait(struct cv *cvp, struct mtx *mp)
}
CV_WAIT_VALIDATE(cvp, mp);
- DROP_GIANT_NOSWITCH();
- mtx_unlock_flags(mp, MTX_NOSWITCH);
+ DROP_GIANT();
+ mtx_unlock(mp);
cv_waitq_add(cvp, td);
cv_switch(td);
@@ -273,8 +273,8 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
}
CV_WAIT_VALIDATE(cvp, mp);
- DROP_GIANT_NOSWITCH();
- mtx_unlock_flags(mp, MTX_NOSWITCH);
+ DROP_GIANT();
+ mtx_unlock(mp);
cv_waitq_add(cvp, td);
sig = cv_switch_catch(td);
@@ -339,8 +339,8 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
}
CV_WAIT_VALIDATE(cvp, mp);
- DROP_GIANT_NOSWITCH();
- mtx_unlock_flags(mp, MTX_NOSWITCH);
+ DROP_GIANT();
+ mtx_unlock(mp);
cv_waitq_add(cvp, td);
callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
@@ -412,8 +412,8 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
}
CV_WAIT_VALIDATE(cvp, mp);
- DROP_GIANT_NOSWITCH();
- mtx_unlock_flags(mp, MTX_NOSWITCH);
+ DROP_GIANT();
+ mtx_unlock(mp);
cv_waitq_add(cvp, td);
callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index d772be6..b9e1641 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -402,7 +402,7 @@ exit1(td, rv)
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
while (mtx_owned(&Giant))
- mtx_unlock_flags(&Giant, MTX_NOSWITCH);
+ mtx_unlock(&Giant);
/*
* We have to wait until after releasing all locks before
@@ -413,7 +413,7 @@ exit1(td, rv)
p->p_stat = SZOMB;
wakeup(p->p_pptr);
- PROC_UNLOCK_NOSWITCH(p);
+ PROC_UNLOCK(p);
cnt.v_swtch++;
cpu_throw();
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index 88868dc..4bd6837 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -381,9 +381,9 @@ ithread_schedule(struct ithd *ithread, int do_switch)
* Set it_need to tell the thread to keep running if it is already
* running. Then, grab sched_lock and see if we actually need to
* put this thread on the runqueue. If so and the do_switch flag is
- * true, then switch to the ithread immediately. Otherwise, set the
- * needresched flag to guarantee that this ithread will run before any
- * userland processes.
+ * true and it is safe to switch, then switch to the ithread
+ * immediately. Otherwise, set the needresched flag to guarantee
+ * that this ithread will run before any userland processes.
*/
ithread->it_need = 1;
mtx_lock_spin(&sched_lock);
@@ -391,7 +391,8 @@ ithread_schedule(struct ithd *ithread, int do_switch)
CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
p->p_stat = SRUN;
setrunqueue(td); /* XXXKSE */
- if (do_switch && curthread->td_proc->p_stat == SRUN) {
+ if (do_switch && curthread->td_critnest == 1 &&
+ curthread->td_proc->p_stat == SRUN) {
if (curthread != PCPU_GET(idlethread))
setrunqueue(curthread);
curthread->td_proc->p_stats->p_ru.ru_nivcsw++;
@@ -458,7 +459,7 @@ swi_sched(void *cookie, int flags)
*/
atomic_store_rel_int(&ih->ih_need, 1);
if (!(flags & SWI_DELAY)) {
- error = ithread_schedule(it, !cold && flags & SWI_SWITCH);
+ error = ithread_schedule(it, !cold);
KASSERT(error == 0, ("stray software interrupt"));
}
}
@@ -580,7 +581,7 @@ SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
void
legacy_setsoftnet(void)
{
- swi_sched(net_ih, SWI_NOSWITCH);
+ swi_sched(net_ih, 0);
}
/*
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 852b570..2c5217b 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -209,8 +209,6 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
{
MPASS(curthread != NULL);
- KASSERT((opts & MTX_NOSWITCH) == 0,
- ("MTX_NOSWITCH used at %s:%d", file, line));
_get_sleep_lock(m, curthread, opts, file, line);
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
@@ -264,12 +262,6 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
MPASS(curthread != NULL);
- /*
- * _mtx_trylock does not accept MTX_NOSWITCH option.
- */
- KASSERT((opts & MTX_NOSWITCH) == 0,
- ("mtx_trylock() called with invalid option flag(s) %d", opts));
-
rval = _obtain_lock(m, curthread);
LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
@@ -524,7 +516,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
td1->td_proc->p_stat = SRUN;
setrunqueue(td1);
- if ((opts & MTX_NOSWITCH) == 0 && td1->td_ksegrp->kg_pri.pri_level < pri) {
+ if (td->td_critnest == 1 && td1->td_ksegrp->kg_pri.pri_level < pri) {
#ifdef notyet
if (td->td_ithd != NULL) {
struct ithd *it = td->td_ithd;
@@ -691,8 +683,8 @@ mtx_destroy(struct mtx *m)
MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
/* Tell witness this isn't locked to make it happy. */
- WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE | LOP_NOSWITCH,
- __FILE__, __LINE__);
+ WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
+ __LINE__);
}
WITNESS_DESTROY(&m->mtx_object);
diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c
index 648e438..256d7bc 100644
--- a/sys/kern/kern_shutdown.c
+++ b/sys/kern/kern_shutdown.c
@@ -268,7 +268,7 @@ boot(int howto)
pbusy = nbusy;
sync(thread0, NULL);
if (curthread != NULL) {
- DROP_GIANT_NOSWITCH();
+ DROP_GIANT();
for (subiter = 0; subiter < 50 * iter; subiter++) {
mtx_lock_spin(&sched_lock);
setrunqueue(curthread);
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index d3f01e6..cc989e0 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -1560,8 +1560,8 @@ issignal(p)
do {
mtx_lock_spin(&sched_lock);
stop(p);
- PROC_UNLOCK_NOSWITCH(p);
- DROP_GIANT_NOSWITCH();
+ PROC_UNLOCK(p);
+ DROP_GIANT();
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mtx_unlock_spin(&sched_lock);
@@ -1639,8 +1639,8 @@ issignal(p)
}
mtx_lock_spin(&sched_lock);
stop(p);
- PROC_UNLOCK_NOSWITCH(p);
- DROP_GIANT_NOSWITCH();
+ PROC_UNLOCK(p);
+ DROP_GIANT();
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c
index cab78c2..9449192 100644
--- a/sys/kern/kern_subr.c
+++ b/sys/kern/kern_subr.c
@@ -386,7 +386,7 @@ uio_yield()
td = curthread;
mtx_lock_spin(&sched_lock);
- DROP_GIANT_NOSWITCH();
+ DROP_GIANT();
td->td_ksegrp->kg_pri.pri_level = td->td_ksegrp->kg_pri.pri_user;
setrunqueue(td);
td->td_proc->p_stats->p_ru.ru_nivcsw++;
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index fce470f..9f3ba01 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -437,17 +437,17 @@ msleep(ident, mtx, priority, wmesg, timo)
* in case this is the idle process and already asleep.
*/
if (mtx != NULL && priority & PDROP)
- mtx_unlock_flags(mtx, MTX_NOSWITCH);
+ mtx_unlock(mtx);
mtx_unlock_spin(&sched_lock);
return (0);
}
- DROP_GIANT_NOSWITCH();
+ DROP_GIANT();
if (mtx != NULL) {
mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
WITNESS_SAVE(&mtx->mtx_object, mtx);
- mtx_unlock_flags(mtx, MTX_NOSWITCH);
+ mtx_unlock(mtx);
if (priority & PDROP)
mtx = NULL;
}
@@ -482,7 +482,7 @@ msleep(ident, mtx, priority, wmesg, timo)
PROC_LOCK(p);
sig = CURSIG(p);
mtx_lock_spin(&sched_lock);
- PROC_UNLOCK_NOSWITCH(p);
+ PROC_UNLOCK(p);
if (sig != 0) {
if (td->td_wchan != NULL)
unsleep(td);
@@ -750,13 +750,13 @@ mi_switch()
PROC_LOCK(p);
killproc(p, "exceeded maximum CPU limit");
mtx_lock_spin(&sched_lock);
- PROC_UNLOCK_NOSWITCH(p);
+ PROC_UNLOCK(p);
} else {
mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
psignal(p, SIGXCPU);
mtx_lock_spin(&sched_lock);
- PROC_UNLOCK_NOSWITCH(p);
+ PROC_UNLOCK(p);
if (rlim->rlim_cur < rlim->rlim_max) {
/* XXX: we should make a private copy */
rlim->rlim_cur += 5;
diff --git a/sys/kern/subr_taskqueue.c b/sys/kern/subr_taskqueue.c
index 6052aea..72afa6b 100644
--- a/sys/kern/subr_taskqueue.c
+++ b/sys/kern/subr_taskqueue.c
@@ -209,7 +209,7 @@ taskqueue_run(struct taskqueue *queue)
static void
taskqueue_swi_enqueue(void *context)
{
- swi_sched(taskqueue_ih, SWI_NOSWITCH);
+ swi_sched(taskqueue_ih, 0);
}
static void
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index 5d7edec..46a19b7 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -82,7 +82,7 @@ userret(td, frame, oticks)
mtx_lock_spin(&sched_lock);
kg->kg_pri.pri_level = kg->kg_pri.pri_user;
if (ke->ke_flags & KEF_NEEDRESCHED) {
- DROP_GIANT_NOSWITCH();
+ DROP_GIANT();
setrunqueue(td);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 852b570..2c5217b 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -209,8 +209,6 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
{
MPASS(curthread != NULL);
- KASSERT((opts & MTX_NOSWITCH) == 0,
- ("MTX_NOSWITCH used at %s:%d", file, line));
_get_sleep_lock(m, curthread, opts, file, line);
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
@@ -264,12 +262,6 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
MPASS(curthread != NULL);
- /*
- * _mtx_trylock does not accept MTX_NOSWITCH option.
- */
- KASSERT((opts & MTX_NOSWITCH) == 0,
- ("mtx_trylock() called with invalid option flag(s) %d", opts));
-
rval = _obtain_lock(m, curthread);
LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
@@ -524,7 +516,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
td1->td_proc->p_stat = SRUN;
setrunqueue(td1);
- if ((opts & MTX_NOSWITCH) == 0 && td1->td_ksegrp->kg_pri.pri_level < pri) {
+ if (td->td_critnest == 1 && td1->td_ksegrp->kg_pri.pri_level < pri) {
#ifdef notyet
if (td->td_ithd != NULL) {
struct ithd *it = td->td_ithd;
@@ -691,8 +683,8 @@ mtx_destroy(struct mtx *m)
MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
/* Tell witness this isn't locked to make it happy. */
- WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE | LOP_NOSWITCH,
- __FILE__, __LINE__);
+ WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
+ __LINE__);
}
WITNESS_DESTROY(&m->mtx_object);
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index b798579..37dc369 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -818,7 +818,7 @@ witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
instance->li_lock->lo_name,
instance->li_flags);
instance->li_flags--;
- goto out;
+ return;
}
s = cpu_critical_enter();
CTR4(KTR_WITNESS,
@@ -839,23 +839,11 @@ witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
td->td_proc->p_pid, lle);
witness_lock_list_free(lle);
}
- goto out;
+ return;
}
}
panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
file, line);
-out:
- /*
- * We don't need to protect this PCPU_GET() here against preemption
- * because if we hold any spinlocks then we are already protected,
- * and if we don't we will get NULL if we hold no spinlocks even if
- * we switch CPU's while reading it.
- */
- if (class->lc_flags & LC_SLEEPLOCK) {
- if ((flags & LOP_NOSWITCH) == 0 && PCPU_GET(spinlocks) != NULL)
- panic("switchable sleep unlock (%s) %s @ %s:%d",
- class->lc_name, lock->lo_name, file, line);
- }
}
/*
OpenPOWER on IntegriCloud