summaryrefslogtreecommitdiffstats
path: root/sys/amd64/amd64/mp_machdep.c
diff options
context:
space:
mode:
authoravg <avg@FreeBSD.org>2011-06-08 08:12:15 +0000
committeravg <avg@FreeBSD.org>2011-06-08 08:12:15 +0000
commit74204e61b2bf85d1e0d861efb6350869bef8d5ba (patch)
tree76b1205a2910e2296d889196ecc53a537b4fc56c /sys/amd64/amd64/mp_machdep.c
parent9c3130fd8beafdb49ce265e2ed95dde63492dac1 (diff)
downloadFreeBSD-src-74204e61b2bf85d1e0d861efb6350869bef8d5ba.zip
FreeBSD-src-74204e61b2bf85d1e0d861efb6350869bef8d5ba.tar.gz
remove code for dynamic offlining/onlining of CPUs on x86
The code has definitely been broken for SCHED_ULE, which is a default scheduler. It may have been broken for SCHED_4BSD in more subtle ways, e.g. with manually configured CPU affinities and for interrupt devilery purposes. We still provide a way to disable individual CPUs or all hyperthreading "twin" CPUs before SMP startup. See the UPDATING entry for details. Interaction between building CPU topology and disabling CPUs still remains fuzzy: topology is first built using all availble CPUs and then the disabled CPUs should be "subtracted" from it. That doesn't work well if the resulting topology becomes non-uniform. This work is done in cooperation with Attilio Rao who in addition to reviewing also provided parts of code. PR: kern/145385 Discussed with: gcooper, ambrisko, mdf, sbruno Reviewed by: attilio Tested by: pho, pluknet X-MFC after: never
Diffstat (limited to 'sys/amd64/amd64/mp_machdep.c')
-rw-r--r--sys/amd64/amd64/mp_machdep.c171
1 files changed, 6 insertions, 165 deletions
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index d72afd6..53988e9 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -29,7 +29,6 @@ __FBSDID("$FreeBSD$");
#include "opt_cpu.h"
#include "opt_kstack_pages.h"
-#include "opt_mp_watchdog.h"
#include "opt_sched.h"
#include "opt_smp.h"
@@ -64,7 +63,6 @@ __FBSDID("$FreeBSD$");
#include <machine/cpufunc.h>
#include <x86/mca.h>
#include <machine/md_var.h>
-#include <machine/mp_watchdog.h>
#include <machine/pcb.h>
#include <machine/psl.h>
#include <machine/smp.h>
@@ -160,11 +158,8 @@ static int start_all_aps(void);
static int start_ap(int apic_id);
static void release_aps(void *dummy);
-static int hlt_logical_cpus;
static u_int hyperthreading_cpus; /* logical cpus sharing L1 cache */
-static cpuset_t hyperthreading_cpus_mask;
static int hyperthreading_allowed = 1;
-static struct sysctl_ctx_list logical_cpu_clist;
static u_int bootMP_size;
static void
@@ -748,11 +743,6 @@ init_secondary(void)
if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0)
CPU_OR(&logical_cpus_mask, &tcpuset);
- /* Determine if we are a hyperthread. */
- if (hyperthreading_cpus > 1 &&
- PCPU_GET(apic_id) % hyperthreading_cpus != 0)
- CPU_OR(&hyperthreading_cpus_mask, &tcpuset);
-
/* Build our map of 'other' CPUs. */
tallcpus = all_cpus;
CPU_NAND(&tallcpus, &tcpuset);
@@ -843,7 +833,7 @@ assign_cpu_ids(void)
if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0) {
cpu_info[i].cpu_hyperthread = 1;
-#if defined(SCHED_ULE)
+
/*
* Don't use HT CPU if it has been disabled by a
* tunable.
@@ -852,7 +842,6 @@ assign_cpu_ids(void)
cpu_info[i].cpu_disabled = 1;
continue;
}
-#endif
}
/* Don't use this CPU if it has been disabled by a tunable. */
@@ -862,6 +851,11 @@ assign_cpu_ids(void)
}
}
+ if (hyperthreading_allowed == 0 && hyperthreading_cpus > 1) {
+ hyperthreading_cpus = 0;
+ cpu_logical = 1;
+ }
+
/*
* Assign CPU IDs to local APIC IDs and disable any CPUs
* beyond MAXCPU. CPU 0 is always assigned to the BSP.
@@ -1487,159 +1481,6 @@ release_aps(void *dummy __unused)
}
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
-static int
-sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
-{
- cpuset_t mask;
- int error;
-
- mask = hlt_cpus_mask;
- error = sysctl_handle_opaque(oidp, &mask, sizeof(mask), req);
- if (error || !req->newptr)
- return (error);
-
- if (!CPU_EMPTY(&logical_cpus_mask) &&
- CPU_SUBSET(&mask, &logical_cpus_mask))
- hlt_logical_cpus = 1;
- else
- hlt_logical_cpus = 0;
-
- if (! hyperthreading_allowed)
- CPU_OR(&mask, &hyperthreading_cpus_mask);
-
- if (CPU_SUBSET(&mask, &all_cpus))
- CPU_CLR(0, &mask);
- hlt_cpus_mask = mask;
- return (error);
-}
-SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus,
- CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, sysctl_hlt_cpus, "S",
- "Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
-
-static int
-sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
-{
- int disable, error;
-
- disable = hlt_logical_cpus;
- error = sysctl_handle_int(oidp, &disable, 0, req);
- if (error || !req->newptr)
- return (error);
-
- if (disable)
- CPU_OR(&hlt_cpus_mask, &logical_cpus_mask);
- else
- CPU_NAND(&hlt_cpus_mask, &logical_cpus_mask);
-
- if (! hyperthreading_allowed)
- CPU_OR(&hlt_cpus_mask, &hyperthreading_cpus_mask);
-
- if (CPU_SUBSET(&hlt_cpus_mask, &all_cpus))
- CPU_CLR(0, &hlt_cpus_mask);
-
- hlt_logical_cpus = disable;
- return (error);
-}
-
-static int
-sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
-{
- int allowed, error;
-
- allowed = hyperthreading_allowed;
- error = sysctl_handle_int(oidp, &allowed, 0, req);
- if (error || !req->newptr)
- return (error);
-
-#ifdef SCHED_ULE
- /*
- * SCHED_ULE doesn't allow enabling/disabling HT cores at
- * run-time.
- */
- if (allowed != hyperthreading_allowed)
- return (ENOTSUP);
- return (error);
-#endif
-
- if (allowed)
- CPU_NAND(&hlt_cpus_mask, &hyperthreading_cpus_mask);
- else
- CPU_OR(&hlt_cpus_mask, &hyperthreading_cpus_mask);
-
- if (!CPU_EMPTY(&logical_cpus_mask) &&
- CPU_SUBSET(&hlt_cpus_mask, &logical_cpus_mask))
- hlt_logical_cpus = 1;
- else
- hlt_logical_cpus = 0;
-
- if (CPU_SUBSET(&hlt_cpus_mask, &all_cpus))
- CPU_CLR(0, &hlt_cpus_mask);
-
- hyperthreading_allowed = allowed;
- return (error);
-}
-
-static void
-cpu_hlt_setup(void *dummy __unused)
-{
-
- if (!CPU_EMPTY(&logical_cpus_mask)) {
- TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
- &hlt_logical_cpus);
- sysctl_ctx_init(&logical_cpu_clist);
- SYSCTL_ADD_PROC(&logical_cpu_clist,
- SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
- "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
- sysctl_hlt_logical_cpus, "IU", "");
- SYSCTL_ADD_UINT(&logical_cpu_clist,
- SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
- "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
- &logical_cpus_mask, 0, "");
-
- if (hlt_logical_cpus)
- CPU_OR(&hlt_cpus_mask, &logical_cpus_mask);
-
- /*
- * If necessary for security purposes, force
- * hyperthreading off, regardless of the value
- * of hlt_logical_cpus.
- */
- if (!CPU_EMPTY(&hyperthreading_cpus_mask)) {
- SYSCTL_ADD_PROC(&logical_cpu_clist,
- SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
- "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
- 0, 0, sysctl_hyperthreading_allowed, "IU", "");
- if (! hyperthreading_allowed)
- CPU_OR(&hlt_cpus_mask,
- &hyperthreading_cpus_mask);
- }
- }
-}
-SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
-
-int
-mp_grab_cpu_hlt(void)
-{
- cpuset_t mask;
-#ifdef MP_WATCHDOG
- u_int cpuid;
-#endif
- int retval;
-
- mask = PCPU_GET(cpumask);
-#ifdef MP_WATCHDOG
- cpuid = PCPU_GET(cpuid);
- ap_watchdog(cpuid);
-#endif
-
- retval = 0;
- while (CPU_OVERLAP(&mask, &hlt_cpus_mask)) {
- retval = 1;
- __asm __volatile("sti; hlt" : : : "memory");
- }
- return (retval);
-}
-
#ifdef COUNT_IPIS
/*
* Setup interrupt counters for IPI handlers.
OpenPOWER on IntegriCloud