summaryrefslogtreecommitdiffstats
path: root/arch/x86_64/mm/srat.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/mm/srat.c')
-rw-r--r--arch/x86_64/mm/srat.c50
1 files changed, 38 insertions, 12 deletions
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 8b7f856..482c257 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -26,6 +26,10 @@ static nodemask_t nodes_found __initdata;
static struct node nodes[MAX_NUMNODES] __initdata;
static u8 pxm2node[256] = { [0 ... 255] = 0xff };
+/* Too small nodes confuse the VM badly. Usually they result
+ from BIOS bugs. */
+#define NODE_MIN_SIZE (4*1024*1024)
+
static int node_to_pxm(int n);
int pxm_to_node(int pxm)
@@ -131,7 +135,12 @@ void __init
acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
{
int pxm, node;
- if (srat_disabled() || pa->flags.enabled == 0)
+ if (srat_disabled())
+ return;
+ if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat();
+ return;
+ }
+ if (pa->flags.enabled == 0)
return;
pxm = pa->proximity_domain;
node = setup_node(pxm);
@@ -155,8 +164,16 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
int node, pxm;
int i;
- if (srat_disabled() || ma->flags.enabled == 0)
+ if (srat_disabled())
+ return;
+ if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) {
+ bad_srat();
return;
+ }
+ if (ma->flags.enabled == 0)
+ return;
+ start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
+ end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
pxm = ma->proximity_domain;
node = setup_node(pxm);
if (node < 0) {
@@ -164,8 +181,6 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
bad_srat();
return;
}
- start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
- end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
/* It is fine to add this area to the nodes data it will be used later*/
if (ma->flags.hot_pluggable == 1)
printk(KERN_INFO "SRAT: hot plug zone found %lx - %lx \n",
@@ -213,7 +228,8 @@ static int nodes_cover_memory(void)
}
e820ram = end_pfn - e820_hole_size(0, end_pfn);
- if (pxmram < e820ram) {
+ /* We seem to lose 3 pages somewhere. Allow a bit of slack. */
+ if ((long)(e820ram - pxmram) >= 1*1024*1024) {
printk(KERN_ERR
"SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
(pxmram << PAGE_SHIFT) >> 20,
@@ -223,6 +239,16 @@ static int nodes_cover_memory(void)
return 1;
}
+static void unparse_node(int node)
+{
+ int i;
+ node_clear(node, nodes_parsed);
+ for (i = 0; i < MAX_LOCAL_APIC; i++) {
+ if (apicid_to_node[i] == node)
+ apicid_to_node[i] = NUMA_NO_NODE;
+ }
+}
+
void __init acpi_numa_arch_fixup(void) {}
/* Use the information discovered above to actually set up the nodes. */
@@ -230,22 +256,22 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
{
int i;
- if (acpi_numa <= 0)
- return -1;
-
/* First clean up the node list */
- for_each_node_mask(i, nodes_parsed) {
+ for (i = 0; i < MAX_NUMNODES; i++) {
cutoff_node(i, start, end);
- if (nodes[i].start == nodes[i].end)
- node_clear(i, nodes_parsed);
+ if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE)
+ unparse_node(i);
}
+ if (acpi_numa <= 0)
+ return -1;
+
if (!nodes_cover_memory()) {
bad_srat();
return -1;
}
- memnode_shift = compute_hash_shift(nodes, nodes_weight(nodes_parsed));
+ memnode_shift = compute_hash_shift(nodes, MAX_NUMNODES);
if (memnode_shift < 0) {
printk(KERN_ERR
"SRAT: No NUMA node hash function found. Contact maintainer\n");
OpenPOWER on IntegriCloud