summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/ia64/kernel/numa.c2
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/ia64/mm/discontig.c13
-rw-r--r--arch/ia64/mm/numa.c4
-rw-r--r--include/asm-ia64/acpi.h33
-rw-r--r--include/asm-ia64/numa.h2
7 files changed, 49 insertions, 11 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 78f28d8..c7467f8 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -423,6 +423,7 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
static struct acpi_table_slit __initdata *slit_table;
+cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
{
@@ -482,6 +483,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
(pa->apic_id << 8) | (pa->local_sapic_eid);
/* nid should be overridden as logical node id later */
node_cpuid[srat_num_cpus].nid = pxm;
+ cpu_set(srat_num_cpus, early_cpu_possible_map);
srat_num_cpus++;
}
@@ -559,7 +561,7 @@ void __init acpi_numa_arch_fixup(void)
}
/* set logical node id in cpu structure */
- for (i = 0; i < srat_num_cpus; i++)
+ for_each_possible_early_cpu(i)
node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
printk(KERN_INFO "Number of logical nodes in system = %d\n",
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
index a78b45f..c93420c 100644
--- a/arch/ia64/kernel/numa.c
+++ b/arch/ia64/kernel/numa.c
@@ -73,7 +73,7 @@ void __init build_cpu_to_node_map(void)
for(node=0; node < MAX_NUMNODES; node++)
cpus_clear(node_to_cpu_mask[node]);
- for(cpu = 0; cpu < NR_CPUS; ++cpu) {
+ for_each_possible_early_cpu(cpu) {
node = -1;
for (i = 0; i < NR_CPUS; ++i)
if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 4aa9eae..6206541 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -493,6 +493,8 @@ setup_arch (char **cmdline_p)
acpi_table_init();
# ifdef CONFIG_ACPI_NUMA
acpi_numa_init();
+ per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
+ 32 : cpus_weight(early_cpu_possible_map)), additional_cpus);
# endif
#else
# ifdef CONFIG_SMP
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index ee5e68b..6136a4c 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -104,7 +104,7 @@ static int __meminit early_nr_cpus_node(int node)
{
int cpu, n = 0;
- for (cpu = 0; cpu < NR_CPUS; cpu++)
+ for_each_possible_early_cpu(cpu)
if (node == node_cpuid[cpu].nid)
n++;
@@ -124,6 +124,7 @@ static unsigned long __meminit compute_pernodesize(int node)
pernodesize += node * L1_CACHE_BYTES;
pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
+ pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
pernodesize = PAGE_ALIGN(pernodesize);
return pernodesize;
}
@@ -142,7 +143,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
#ifdef CONFIG_SMP
int cpu;
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ for_each_possible_early_cpu(cpu) {
if (node == node_cpuid[cpu].nid) {
memcpy(__va(cpu_data), __phys_per_cpu_start,
__per_cpu_end - __per_cpu_start);
@@ -345,7 +346,7 @@ static void __init initialize_pernode_data(void)
#ifdef CONFIG_SMP
/* Set the node_data pointer for each per-cpu struct */
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ for_each_possible_early_cpu(cpu) {
node = node_cpuid[cpu].nid;
per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
}
@@ -493,13 +494,9 @@ void __cpuinit *per_cpu_init(void)
int cpu;
static int first_time = 1;
-
- if (smp_processor_id() != 0)
- return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
-
if (first_time) {
first_time = 0;
- for (cpu = 0; cpu < NR_CPUS; cpu++)
+ for_each_possible_early_cpu(cpu)
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
}
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 7807fc5..b73bf18 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -27,7 +27,9 @@
*/
int num_node_memblks;
struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
-struct node_cpuid_s node_cpuid[NR_CPUS];
+struct node_cpuid_s node_cpuid[NR_CPUS] =
+ { [0 ... NR_CPUS-1] = { .phys_id = 0, .nid = NUMA_NO_NODE } };
+
/*
* This is a matrix with "distances" between nodes, they should be
* proportional to the memory access latency ratios.
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h
index cd1cc39..fcfad32 100644
--- a/include/asm-ia64/acpi.h
+++ b/include/asm-ia64/acpi.h
@@ -35,6 +35,7 @@
#include <linux/init.h>
#include <linux/numa.h>
#include <asm/system.h>
+#include <asm/numa.h>
#define COMPILER_DEPENDENT_INT64 long
#define COMPILER_DEPENDENT_UINT64 unsigned long
@@ -115,7 +116,11 @@ extern unsigned int is_cpu_cpei_target(unsigned int cpu);
extern void set_cpei_target_cpu(unsigned int cpu);
extern unsigned int get_cpei_target_cpu(void);
extern void prefill_possible_map(void);
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
extern int additional_cpus;
+#else
+#define additional_cpus 0
+#endif
#ifdef CONFIG_ACPI_NUMA
#if MAX_NUMNODES > 256
@@ -129,6 +134,34 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
#define acpi_unlazy_tlb(x)
+#ifdef CONFIG_ACPI_NUMA
+extern cpumask_t early_cpu_possible_map;
+#define for_each_possible_early_cpu(cpu) \
+ for_each_cpu_mask((cpu), early_cpu_possible_map)
+
+static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
+{
+ int low_cpu, high_cpu;
+ int cpu;
+ int next_nid = 0;
+
+ low_cpu = cpus_weight(early_cpu_possible_map);
+
+ high_cpu = max(low_cpu, min_cpus);
+ high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
+
+ for (cpu = low_cpu; cpu < high_cpu; cpu++) {
+ cpu_set(cpu, early_cpu_possible_map);
+ if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
+ node_cpuid[cpu].nid = next_nid;
+ next_nid++;
+ if (next_nid >= num_online_nodes())
+ next_nid = 0;
+ }
+ }
+}
+#endif /* CONFIG_ACPI_NUMA */
+
#endif /*__KERNEL__*/
#endif /*_ASM_ACPI_H*/
diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h
index 6a8a27c..3499ff5 100644
--- a/include/asm-ia64/numa.h
+++ b/include/asm-ia64/numa.h
@@ -22,6 +22,8 @@
#include <asm/mmzone.h>
+#define NUMA_NO_NODE -1
+
extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
extern pg_data_t *pgdat_list[MAX_NUMNODES];
OpenPOWER on IntegriCloud