summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/kernel/acpi.c56
-rw-r--r--arch/ia64/kernel/setup.c4
-rw-r--r--arch/ia64/kernel/smpboot.c5
-rw-r--r--include/asm-ia64/acpi.h2
4 files changed, 63 insertions, 4 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index d2702c4..34795ed 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -761,6 +761,62 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
return (0);
}
+int additional_cpus __initdata = -1;
+
+static __init int setup_additional_cpus(char *s)
+{
+ if (s)
+ additional_cpus = simple_strtol(s, NULL, 0);
+
+ return 0;
+}
+
+early_param("additional_cpus", setup_additional_cpus);
+
+/*
+ * cpu_possible_map should be static, it cannot change as cpu's
+ * are onlined, or offlined. The reason is per-cpu data-structures
+ * are allocated by some modules at init time, and dont expect to
+ * do this dynamically on cpu arrival/departure.
+ * cpu_present_map on the other hand can change dynamically.
+ * In case when cpu_hotplug is not compiled, then we resort to current
+ * behaviour, which is cpu_possible == cpu_present.
+ * - Ashok Raj
+ *
+ * Three ways to find out the number of additional hotplug CPUs:
+ * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
+ * - The user can overwrite it with additional_cpus=NUM
+ * - Otherwise don't reserve additional CPUs.
+ */
+__init void prefill_possible_map(void)
+{
+ int i;
+ int possible, disabled_cpus;
+
+ disabled_cpus = total_cpus - available_cpus;
+ if (additional_cpus == -1) {
+ if (disabled_cpus > 0) {
+ possible = total_cpus;
+ additional_cpus = disabled_cpus;
+ }
+ else {
+ possible = available_cpus;
+ additional_cpus = 0;
+ }
+ } else {
+ possible = available_cpus + additional_cpus;
+ }
+ if (possible > NR_CPUS)
+ possible = NR_CPUS;
+
+ printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
+ possible,
+ max_t(int, additional_cpus, 0));
+
+ for (i = 0; i < possible; i++)
+ cpu_set(i, cpu_possible_map);
+}
+
int acpi_map_lsapic(acpi_handle handle, int *pcpu)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 35f7835..3258e09 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -430,6 +430,7 @@ setup_arch (char **cmdline_p)
if (early_console_setup(*cmdline_p) == 0)
mark_bsp_online();
+ parse_early_param();
#ifdef CONFIG_ACPI
/* Initialize the ACPI boot-time table parser */
acpi_table_init();
@@ -688,6 +689,9 @@ void
setup_per_cpu_areas (void)
{
/* start_kernel() requires this... */
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+ prefill_possible_map();
+#endif
}
/*
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 8f44e7d..b681ef3 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -129,7 +129,7 @@ DEFINE_PER_CPU(int, cpu_state);
/* Bitmasks of currently online, and possible CPUs */
cpumask_t cpu_online_map;
EXPORT_SYMBOL(cpu_online_map);
-cpumask_t cpu_possible_map;
+cpumask_t cpu_possible_map = CPU_MASK_NONE;
EXPORT_SYMBOL(cpu_possible_map);
cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
@@ -506,9 +506,6 @@ smp_build_cpu_map (void)
for (cpu = 0; cpu < NR_CPUS; cpu++) {
ia64_cpu_to_sapicid[cpu] = -1;
-#ifdef CONFIG_HOTPLUG_CPU
- cpu_set(cpu, cpu_possible_map);
-#endif
}
ia64_cpu_to_sapicid[0] = boot_cpu_id;
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h
index 3a544ff..f7a5176 100644
--- a/include/asm-ia64/acpi.h
+++ b/include/asm-ia64/acpi.h
@@ -106,6 +106,8 @@ extern unsigned int can_cpei_retarget(void);
extern unsigned int is_cpu_cpei_target(unsigned int cpu);
extern void set_cpei_target_cpu(unsigned int cpu);
extern unsigned int get_cpei_target_cpu(void);
+extern void prefill_possible_map(void);
+extern int additional_cpus;
#ifdef CONFIG_ACPI_NUMA
/* Proximity bitmap length; _PXM is at most 255 (8 bit)*/
OpenPOWER on IntegriCloud