summaryrefslogtreecommitdiffstats
path: root/sys/x86
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2016-05-14 18:22:52 +0000
committerjhb <jhb@FreeBSD.org>2016-05-14 18:22:52 +0000
commitbcc5b0c55d8b271169672e8a6ef07f362e25873b (patch)
tree0f443e2c4c306207e1367f675cf582d2e838e142 /sys/x86
parent2b8cd1e8042150817a7433ab3b6e09f40ae9b0e2 (diff)
downloadFreeBSD-src-bcc5b0c55d8b271169672e8a6ef07f362e25873b.zip
FreeBSD-src-bcc5b0c55d8b271169672e8a6ef07f362e25873b.tar.gz
Add an EARLY_AP_STARTUP option to start APs earlier during boot.
Currently, Application Processors (non-boot CPUs) are started by MD code at SI_SUB_CPU, but they are kept waiting in a "pen" until SI_SUB_SMP at which point they are released to run kernel threads. SI_SUB_SMP is one of the last SYSINIT levels, so APs don't enter the scheduler and start running threads until fairly late in the boot. This change moves SI_SUB_SMP up to just before software interrupt threads are created allowing the APs to start executing kernel threads much sooner (before any devices are probed). This allows several initialization routines that need to perform initialization on all CPUs to now perform that initialization in one step rather than having to defer the AP initialization to a second SYSINIT run at SI_SUB_SMP. It also permits all CPUs to be available for handling interrupts before any devices are probed. This last feature fixes a problem on with interrupt vector exhaustion. Specifically, in the old model all device interrupts were routed onto the boot CPU during boot. Later after the APs were released at SI_SUB_SMP, interrupts were redistributed across all CPUs. However, several drivers for multiqueue hardware allocate N interrupts per CPU in the system. In a system with many CPUs, just a few drivers doing this could exhaust the available pool of interrupt vectors on the boot CPU as each driver was allocating N * mp_ncpu vectors on the boot CPU. Now, drivers will allocate interrupts on their desired CPUs during boot meaning that only N interrupts are allocated from the boot CPU instead of N * mp_ncpu. Some other bits of code can also be simplified as smp_started is now true much earlier and will now always be true for these bits of code. This removes the need to treat the single-CPU boot environment as a special case. As a transition aid, the new behavior is available under a new kernel option (EARLY_AP_STARTUP). This will allow the option to be turned off if need be during initial testing. I plan to enable this on x86 by default in a followup commit in the next few days and to have all platforms moved over before 11.0. Once the transition is complete, the option will be removed along with the !EARLY_AP_STARTUP code. These changes have only been tested on x86. Other platform maintainers are encouraged to port their architectures over as well. The main things to check for are any uses of smp_started in MD code that can be simplified and SI_SUB_SMP SYSINITs in MD code that can be removed in the EARLY_AP_STARTUP case (e.g. the interrupt shuffling). PR: kern/199321 Reviewed by: markj, gnn, kib Sponsored by: Netflix
Diffstat (limited to 'sys/x86')
-rw-r--r--sys/x86/isa/clock.c19
-rw-r--r--sys/x86/x86/intr_machdep.c13
-rw-r--r--sys/x86/x86/local_apic.c5
-rw-r--r--sys/x86/x86/mca.c4
-rw-r--r--sys/x86/x86/mp_x86.c2
5 files changed, 42 insertions, 1 deletions
diff --git a/sys/x86/isa/clock.c b/sys/x86/isa/clock.c
index cced5e34..fa4ca5b 100644
--- a/sys/x86/isa/clock.c
+++ b/sys/x86/isa/clock.c
@@ -475,8 +475,27 @@ startrtclock()
void
cpu_initclocks(void)
{
+#ifdef EARLY_AP_STARTUP
+ struct thread *td;
+ int i;
+ td = curthread;
cpu_initclocks_bsp();
+ CPU_FOREACH(i) {
+ if (i == 0)
+ continue;
+ thread_lock(td);
+ sched_bind(td, i);
+ thread_unlock(td);
+ cpu_initclocks_ap();
+ }
+ thread_lock(td);
+ if (sched_is_bound(td))
+ sched_unbind(td);
+ thread_unlock(td);
+#else
+ cpu_initclocks_bsp();
+#endif
}
static int
diff --git a/sys/x86/x86/intr_machdep.c b/sys/x86/x86/intr_machdep.c
index 36a3441..1bc3038 100644
--- a/sys/x86/x86/intr_machdep.c
+++ b/sys/x86/x86/intr_machdep.c
@@ -77,7 +77,7 @@ static struct mtx intr_table_lock;
static struct mtx intrcnt_lock;
static TAILQ_HEAD(pics_head, pic) pics;
-#ifdef SMP
+#if defined(SMP) && !defined(EARLY_AP_STARTUP)
static int assign_cpu;
#endif
@@ -320,11 +320,16 @@ intr_assign_cpu(void *arg, int cpu)
struct intsrc *isrc;
int error;
+#ifdef EARLY_AP_STARTUP
+ MPASS(mp_ncpus == 1 || smp_started);
+ if (cpu != NOCPU) {
+#else
/*
* Don't do anything during early boot. We will pick up the
* assignment once the APs are started.
*/
if (assign_cpu && cpu != NOCPU) {
+#endif
isrc = arg;
mtx_lock(&intr_table_lock);
error = isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]);
@@ -502,9 +507,13 @@ intr_next_cpu(void)
{
u_int apic_id;
+#ifdef EARLY_AP_STARTUP
+ MPASS(mp_ncpus == 1 || smp_started);
+#else
/* Leave all interrupts on the BSP during boot. */
if (!assign_cpu)
return (PCPU_GET(apic_id));
+#endif
mtx_lock_spin(&icu_lock);
apic_id = cpu_apic_ids[current_cpu];
@@ -546,6 +555,7 @@ intr_add_cpu(u_int cpu)
CPU_SET(cpu, &intr_cpus);
}
+#ifndef EARLY_AP_STARTUP
/*
* Distribute all the interrupt sources among the available CPUs once the
* AP's have been launched.
@@ -586,6 +596,7 @@ intr_shuffle_irqs(void *arg __unused)
}
SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs,
NULL);
+#endif
#else
/*
* Always route interrupts to the current processor in the UP case.
diff --git a/sys/x86/x86/local_apic.c b/sys/x86/x86/local_apic.c
index 1a02626..0a77679 100644
--- a/sys/x86/x86/local_apic.c
+++ b/sys/x86/x86/local_apic.c
@@ -749,6 +749,10 @@ native_lapic_enable_pmc(void)
lvts[APIC_LVT_PMC].lvt_masked = 0;
+#ifdef EARLY_AP_STARTUP
+ MPASS(mp_ncpus == 1 || smp_started);
+ smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
+#else
#ifdef SMP
/*
* If hwpmc was loaded at boot time then the APs may not be
@@ -760,6 +764,7 @@ native_lapic_enable_pmc(void)
else
#endif
lapic_update_pmc(NULL);
+#endif
return (1);
#else
return (0);
diff --git a/sys/x86/x86/mca.c b/sys/x86/x86/mca.c
index b3d6066..d005180 100644
--- a/sys/x86/x86/mca.c
+++ b/sys/x86/x86/mca.c
@@ -726,7 +726,11 @@ mca_startup(void *dummy)
callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
}
+#ifdef EARLY_AP_STARTUP
+SYSINIT(mca_startup, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, mca_startup, NULL);
+#else
SYSINIT(mca_startup, SI_SUB_SMP, SI_ORDER_ANY, mca_startup, NULL);
+#endif
#ifdef DEV_APIC
static void
diff --git a/sys/x86/x86/mp_x86.c b/sys/x86/x86/mp_x86.c
index 7ad9b96..d2eb2e9 100644
--- a/sys/x86/x86/mp_x86.c
+++ b/sys/x86/x86/mp_x86.c
@@ -933,8 +933,10 @@ init_secondary_tail(void)
while (atomic_load_acq_int(&smp_started) == 0)
ia32_pause();
+#ifndef EARLY_AP_STARTUP
/* Start per-CPU event timers. */
cpu_initclocks_ap();
+#endif
sched_throw(NULL);
OpenPOWER on IntegriCloud