summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/mp_machdep.c27
-rw-r--r--sys/i386/i386/mp_machdep.c26
-rw-r--r--sys/ia64/ia64/mp_machdep.c17
-rw-r--r--sys/sparc64/sparc64/mp_machdep.c8
-rw-r--r--sys/sun4v/sun4v/mp_machdep.c14
5 files changed, 16 insertions, 76 deletions
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 4066866..b64bc76 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
+#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
@@ -590,25 +591,7 @@ init_secondary(void)
while (smp_started == 0)
ia32_pause();
- /* ok, now grab sched_lock and enter the scheduler */
- mtx_lock_spin(&sched_lock);
-
- /*
- * Correct spinlock nesting. The idle thread context that we are
- * borrowing was created so that it would start out with a single
- * spin lock (sched_lock) held in fork_trampoline(). Since we've
- * explicitly acquired locks in this function, the nesting count
- * is now 2 rather than 1. Since we are nested, calling
- * spinlock_exit() will simply adjust the counts without allowing
- * spin lock using code to interrupt us.
- */
- spinlock_exit();
- KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
-
- PCPU_SET(switchtime, cpu_ticks());
- PCPU_SET(switchticks, ticks);
-
- cpu_throw(NULL, choosethread()); /* doesn't return */
+ sched_throw(NULL);
panic("scheduler returned us to %s", __func__);
/* NOTREACHED */
@@ -988,12 +971,12 @@ ipi_bitmap_handler(struct trapframe frame)
if (ipi_bitmap & (1 << IPI_PREEMPT)) {
struct thread *running_thread = curthread;
- mtx_lock_spin(&sched_lock);
+ thread_lock(running_thread);
if (running_thread->td_critnest > 1)
running_thread->td_owepreempt = 1;
else
mi_switch(SW_INVOL | SW_PREEMPT, NULL);
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(running_thread);
}
/* Nothing to do for AST */
@@ -1177,11 +1160,9 @@ release_aps(void *dummy __unused)
if (mp_ncpus == 1)
return;
- mtx_lock_spin(&sched_lock);
atomic_store_rel_int(&aps_ready, 1);
while (smp_started == 0)
ia32_pause();
- mtx_unlock_spin(&sched_lock);
}
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index e67a99d..e6d136e 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -61,6 +61,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
+#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
@@ -642,25 +643,8 @@ init_secondary(void)
while (smp_started == 0)
ia32_pause();
- /* ok, now grab sched_lock and enter the scheduler */
- mtx_lock_spin(&sched_lock);
-
- /*
- * Correct spinlock nesting. The idle thread context that we are
- * borrowing was created so that it would start out with a single
- * spin lock (sched_lock) held in fork_trampoline(). Since we've
- * explicitly acquired locks in this function, the nesting count
- * is now 2 rather than 1. Since we are nested, calling
- * spinlock_exit() will simply adjust the counts without allowing
- * spin lock using code to interrupt us.
- */
- spinlock_exit();
- KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
-
- PCPU_SET(switchtime, cpu_ticks());
- PCPU_SET(switchticks, ticks);
-
- cpu_throw(NULL, choosethread()); /* doesn't return */
+ /* enter the scheduler */
+ sched_throw(NULL);
panic("scheduler returned us to %s", __func__);
/* NOTREACHED */
@@ -1194,12 +1178,12 @@ ipi_bitmap_handler(struct trapframe frame)
#ifdef COUNT_IPIS
(*ipi_preempt_counts[cpu])++;
#endif
- mtx_lock_spin(&sched_lock);
+ thread_lock(running_thread);
if (running_thread->td_critnest > 1)
running_thread->td_owepreempt = 1;
else
mi_switch(SW_INVOL | SW_PREEMPT, NULL);
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(running_thread);
}
if (ipi_bitmap & (1 << IPI_AST)) {
diff --git a/sys/ia64/ia64/mp_machdep.c b/sys/ia64/ia64/mp_machdep.c
index 0746437..2da3ce6 100644
--- a/sys/ia64/ia64/mp_machdep.c
+++ b/sys/ia64/ia64/mp_machdep.c
@@ -111,16 +111,6 @@ ia64_ap_startup(void)
PCPU_SET(curthread, PCPU_GET(idlethread));
/*
- * Correct spinlock nesting. The idle thread context that we are
- * borrowing was created so that it would start out with a single
- * spin lock (sched_lock) held in fork_trampoline(). Since we
- * don't have any locks and explicitly acquire locks when we need
- * to, the nesting count will be off by 1.
- */
- curthread->td_md.md_spinlock_count = 0;
- critical_exit();
-
- /*
* Get and save the CPU specific MCA records. Should we get the
* MCA state for each processor, or just the CMC state?
*/
@@ -133,17 +123,12 @@ ia64_ap_startup(void)
CTR1(KTR_SMP, "SMP: cpu%d launched", PCPU_GET(cpuid));
- mtx_lock_spin(&sched_lock);
-
- PCPU_SET(switchtime, cpu_ticks());
- PCPU_SET(switchticks, ticks);
-
ia64_set_tpr(0);
/* kick off the clock on this AP */
pcpu_initclock();
- cpu_throw(NULL, choosethread());
+ sched_throw(NULL);
/* NOTREACHED */
}
diff --git a/sys/sparc64/sparc64/mp_machdep.c b/sys/sparc64/sparc64/mp_machdep.c
index 5a4b3da..4c604f4 100644
--- a/sys/sparc64/sparc64/mp_machdep.c
+++ b/sys/sparc64/sparc64/mp_machdep.c
@@ -364,12 +364,8 @@ cpu_mp_bootstrap(struct pcpu *pc)
while (csa->csa_count != 0)
;
- /* ok, now grab sched_lock and enter the scheduler */
- mtx_lock_spin(&sched_lock);
- spinlock_exit();
- PCPU_SET(switchtime, cpu_ticks());
- PCPU_SET(switchticks, ticks);
- cpu_throw(NULL, choosethread()); /* doesn't return */
+ /* ok, now enter the scheduler */
+ sched_throw(NULL);
}
void
diff --git a/sys/sun4v/sun4v/mp_machdep.c b/sys/sun4v/sun4v/mp_machdep.c
index e9c7d43..acf91c3 100644
--- a/sys/sun4v/sun4v/mp_machdep.c
+++ b/sys/sun4v/sun4v/mp_machdep.c
@@ -403,13 +403,8 @@ cpu_mp_bootstrap(struct pcpu *pc)
while (csa->csa_count != 0)
;
- /* ok, now grab sched_lock and enter the scheduler */
- mtx_lock_spin(&sched_lock);
- spinlock_exit();
- PCPU_SET(switchtime, cpu_ticks());
- PCPU_SET(switchticks, ticks);
-
- cpu_throw(NULL, choosethread()); /* doesn't return */
+ /* ok, now enter the scheduler */
+ sched_throw(NULL);
}
void
@@ -460,13 +455,12 @@ cpu_ipi_preempt(struct trapframe *tf)
{
struct thread *running_thread = curthread;
- mtx_lock_spin(&sched_lock);
+ thread_lock(running_thread);
if (running_thread->td_critnest > 1)
running_thread->td_owepreempt = 1;
else
mi_switch(SW_INVOL | SW_PREEMPT, NULL);
- mtx_unlock_spin(&sched_lock);
-
+ thread_unlock(running_thread);
}
void
OpenPOWER on IntegriCloud