diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/acpi/boot.c | 18 | ||||
-rw-r--r-- | arch/i386/kernel/acpi/earlyquirk.c | 2 | ||||
-rw-r--r-- | arch/i386/pci/mmconfig.c | 24 | ||||
-rw-r--r-- | arch/x86_64/kernel/early-quirks.c | 4 | ||||
-rw-r--r-- | arch/x86_64/mm/srat.c | 48 | ||||
-rw-r--r-- | arch/x86_64/pci/mmconfig.c | 29 |
6 files changed, 62 insertions, 63 deletions
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 5fafbac..2147511 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c @@ -166,7 +166,7 @@ char *__acpi_map_table(unsigned long phys, unsigned long size) #ifdef CONFIG_PCI_MMCONFIG /* The physical address of the MMCONFIG aperture. Set from ACPI tables. */ -struct acpi_table_mcfg_config *pci_mmcfg_config; +struct acpi_mcfg_allocation *pci_mmcfg_config; int pci_mmcfg_config_num; int __init acpi_parse_mcfg(struct acpi_table_header *header) @@ -179,17 +179,13 @@ int __init acpi_parse_mcfg(struct acpi_table_header *header) return -EINVAL; mcfg = (struct acpi_table_mcfg *)header; - if (!mcfg) { - printk(KERN_WARNING PREFIX "Unable to map MCFG\n"); - return -ENODEV; - } /* how many config structures do we have */ pci_mmcfg_config_num = 0; i = header->length - sizeof(struct acpi_table_mcfg); - while (i >= sizeof(struct acpi_table_mcfg_config)) { + while (i >= sizeof(struct acpi_mcfg_allocation)) { ++pci_mmcfg_config_num; - i -= sizeof(struct acpi_table_mcfg_config); + i -= sizeof(struct acpi_mcfg_allocation); }; if (pci_mmcfg_config_num == 0) { printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); @@ -206,7 +202,7 @@ int __init acpi_parse_mcfg(struct acpi_table_header *header) memcpy(pci_mmcfg_config, &mcfg[1], config_size); for (i = 0; i < pci_mmcfg_config_num; ++i) { - if (pci_mmcfg_config[i].base_reserved) { + if (pci_mmcfg_config[i].address > 0xFFFFFFFF) { printk(KERN_ERR PREFIX "MMCONFIG not in low 4GB of memory\n"); kfree(pci_mmcfg_config); @@ -220,14 +216,14 @@ int __init acpi_parse_mcfg(struct acpi_table_header *header) #endif /* CONFIG_PCI_MMCONFIG */ #ifdef CONFIG_X86_LOCAL_APIC -static int __init acpi_parse_madt(struct acpi_table_header *header) +static int __init acpi_parse_madt(struct acpi_table_header *table) { struct acpi_table_madt *madt = NULL; - if (!header|| !cpu_has_apic) + if (!cpu_has_apic) return -EINVAL; - madt = (struct acpi_table_madt *)header; + madt = (struct acpi_table_madt *)table; if (!madt) { printk(KERN_WARNING PREFIX "Unable to map MADT\n"); return -ENODEV; diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c index 4261c85..bf86f76 100644 --- a/arch/i386/kernel/acpi/earlyquirk.c +++ b/arch/i386/kernel/acpi/earlyquirk.c @@ -30,7 +30,7 @@ static int __init check_bridge(int vendor, int device) is enabled. */ if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { nvidia_hpet_detected = 0; - acpi_table_parse("HPET", nvidia_hpet_check); + acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check); if (nvidia_hpet_detected == 0) { acpi_skip_timer_override = 1; printk(KERN_INFO "Nvidia board " diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c index 80522e3..5700220 100644 --- a/arch/i386/pci/mmconfig.c +++ b/arch/i386/pci/mmconfig.c @@ -36,7 +36,7 @@ static DECLARE_BITMAP(fallback_slots, MAX_CHECK_BUS*32); static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) { int cfg_num = -1; - struct acpi_table_mcfg_config *cfg; + struct acpi_mcfg_allocation *cfg; if (seg == 0 && bus < MAX_CHECK_BUS && test_bit(PCI_SLOT(devfn) + 32*bus, fallback_slots)) @@ -48,11 +48,11 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) break; } cfg = &pci_mmcfg_config[cfg_num]; - if (cfg->pci_segment_group_number != seg) + if (cfg->pci_segment != seg) continue; if ((cfg->start_bus_number <= bus) && (cfg->end_bus_number >= bus)) - return cfg->base_address; + return cfg->address; } /* Handle more broken MCFG tables on Asus etc. @@ -60,9 +60,9 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) this applies to all busses. */ cfg = &pci_mmcfg_config[0]; if (pci_mmcfg_config_num == 1 && - cfg->pci_segment_group_number == 0 && + cfg->pci_segment == 0 && (cfg->start_bus_number | cfg->end_bus_number) == 0) - return cfg->base_address; + return cfg->address; /* Fall back to type 0 */ return 0; @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus, unsigned long flags; u32 base; - if ((bus > 255) || (devfn > 255) || (reg > 4095)) + if ((bus > 255) || (devfn > 255) || (reg > 4095)) return -EINVAL; base = get_base_addr(seg, bus, devfn); @@ -199,19 +199,19 @@ void __init pci_mmcfg_init(int type) if ((pci_probe & PCI_PROBE_MMCONF) == 0) return; - acpi_table_parse("MCFG", acpi_parse_mcfg); + acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg); if ((pci_mmcfg_config_num == 0) || (pci_mmcfg_config == NULL) || - (pci_mmcfg_config[0].base_address == 0)) + (pci_mmcfg_config[0].address == 0)) return; /* Only do this check when type 1 works. If it doesn't work assume we run on a Mac and always use MCFG */ - if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address, - pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, + if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address, + pci_mmcfg_config[0].address + MMCONFIG_APER_MIN, E820_RESERVED)) { - printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", - pci_mmcfg_config[0].base_address); + printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n", + (unsigned long)pci_mmcfg_config[0].address); printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); return; } diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c index 49802f1..bd30d13 100644 --- a/arch/x86_64/kernel/early-quirks.c +++ b/arch/x86_64/kernel/early-quirks.c @@ -32,7 +32,7 @@ static void via_bugs(void) static int nvidia_hpet_detected __initdata; -static int __init nvidia_hpet_check(unsigned long phys, unsigned long size) +static int __init nvidia_hpet_check(struct acpi_table_header *header) { nvidia_hpet_detected = 1; return 0; @@ -53,7 +53,7 @@ static void nvidia_bugs(void) return; nvidia_hpet_detected = 0; - acpi_table_parse(ACPI_HPET, nvidia_hpet_check); + acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check); if (nvidia_hpet_detected == 0) { acpi_skip_timer_override = 1; printk(KERN_INFO "Nvidia board " diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c index 1087e15..2efe215 100644 --- a/arch/x86_64/mm/srat.c +++ b/arch/x86_64/mm/srat.c @@ -101,7 +101,7 @@ static __init inline int srat_disabled(void) static __init int slit_valid(struct acpi_table_slit *slit) { int i, j; - int d = slit->localities; + int d = slit->locality_count; for (i = 0; i < d; i++) { for (j = 0; j < d; j++) { u8 val = slit->entry[d*i + j]; @@ -127,18 +127,18 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) /* Callback for Proximity Domain -> LAPIC mapping */ void __init -acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { int pxm, node; if (srat_disabled()) return; - if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { + if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { bad_srat(); return; } - if (pa->flags.enabled == 0) + if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) return; - pxm = pa->proximity_domain; + pxm = pa->proximity_domain_lo; node = setup_node(pxm); if (node < 0) { printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); @@ -254,21 +254,21 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end) /* Looks good */ if (nd->start == nd->end) { - nd->start = start; - nd->end = end; + nd->start = start; + nd->end = end; changed = 1; - } else { - if (nd->start == end) { - nd->start = start; + } else { + if (nd->start == end) { + nd->start = start; changed = 1; } - if (nd->end == start) { - nd->end = end; + if (nd->end == start) { + nd->end = end; changed = 1; } if (!changed) printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); - } + } ret = update_end_of_memory(nd->end); @@ -279,7 +279,7 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end) /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ void __init -acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) +acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { struct bootnode *nd, oldnode; unsigned long start, end; @@ -288,16 +288,17 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) if (srat_disabled()) return; - if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) { + if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) { bad_srat(); return; } - if (ma->flags.enabled == 0) + if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) return; - if (ma->flags.hot_pluggable && !save_add_info()) + + if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) return; - start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32); - end = start + (ma->length_lo | ((u64)ma->length_hi << 32)); + start = ma->base_address; + end = start + ma->length; pxm = ma->proximity_domain; node = setup_node(pxm); if (node < 0) { @@ -337,7 +338,8 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) push_node_boundaries(node, nd->start >> PAGE_SHIFT, nd->end >> PAGE_SHIFT); - if (ma->flags.hot_pluggable && (reserve_hotadd(node, start, end) < 0)) { + if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && + (reserve_hotadd(node, start, end) < 0)) { /* Ignore hotadd region. Undo damage */ printk(KERN_NOTICE "SRAT: Hotplug region ignored\n"); *nd = oldnode; @@ -394,7 +396,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) /* First clean up the node list */ for (i = 0; i < MAX_NUMNODES; i++) { - cutoff_node(i, start, end); + cutoff_node(i, start, end); if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) { unparse_node(i); node_set_offline(i); @@ -426,7 +428,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) if (!node_online(i)) setup_node_bootmem(i, nodes[i].start, nodes[i].end); - for (i = 0; i < NR_CPUS; i++) { + for (i = 0; i < NR_CPUS; i++) { if (cpu_to_node[i] == NUMA_NO_NODE) continue; if (!node_isset(cpu_to_node[i], nodes_parsed)) @@ -461,7 +463,7 @@ int __node_distance(int a, int b) if (!acpi_slit) return a == b ? 10 : 20; - index = acpi_slit->localities * node_to_pxm(a); + index = acpi_slit->locality_count * node_to_pxm(a); return acpi_slit->entry[index + node_to_pxm(b)]; } diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c index f8b6b28..faabb6e 100644 --- a/arch/x86_64/pci/mmconfig.c +++ b/arch/x86_64/pci/mmconfig.c @@ -1,6 +1,6 @@ /* * mmconfig.c - Low-level direct PCI config space access via MMCONFIG - * + * * This is an 64bit optimized version that always keeps the full mmconfig * space mapped. This allows lockless config space operation. */ @@ -25,7 +25,7 @@ static DECLARE_BITMAP(fallback_slots, 32*MAX_CHECK_BUS); /* Static virtual mapping of the MMCONFIG aperture */ struct mmcfg_virt { - struct acpi_table_mcfg_config *cfg; + struct acpi_mcfg_allocation *cfg; char __iomem *virt; }; static struct mmcfg_virt *pci_mmcfg_virt; @@ -33,14 +33,14 @@ static struct mmcfg_virt *pci_mmcfg_virt; static char __iomem *get_virt(unsigned int seg, unsigned bus) { int cfg_num = -1; - struct acpi_table_mcfg_config *cfg; + struct acpi_mcfg_allocation *cfg; while (1) { ++cfg_num; if (cfg_num >= pci_mmcfg_config_num) break; cfg = pci_mmcfg_virt[cfg_num].cfg; - if (cfg->pci_segment_group_number != seg) + if (cfg->pci_segment != seg) continue; if ((cfg->start_bus_number <= bus) && (cfg->end_bus_number >= bus)) @@ -52,7 +52,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus) this applies to all busses. */ cfg = &pci_mmcfg_config[0]; if (pci_mmcfg_config_num == 1 && - cfg->pci_segment_group_number == 0 && + cfg->pci_segment == 0 && (cfg->start_bus_number | cfg->end_bus_number) == 0) return pci_mmcfg_virt[0].virt; @@ -170,19 +170,19 @@ void __init pci_mmcfg_init(int type) if ((pci_probe & PCI_PROBE_MMCONF) == 0) return; - acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); + acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg); if ((pci_mmcfg_config_num == 0) || (pci_mmcfg_config == NULL) || - (pci_mmcfg_config[0].base_address == 0)) + (pci_mmcfg_config[0].address == 0)) return; /* Only do this check when type 1 works. If it doesn't work assume we run on a Mac and always use MCFG */ - if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address, - pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, + if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address, + pci_mmcfg_config[0].address + MMCONFIG_APER_MIN, E820_RESERVED)) { - printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", - pci_mmcfg_config[0].base_address); + printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n", + (unsigned long)pci_mmcfg_config[0].address); printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); return; } @@ -194,15 +194,16 @@ void __init pci_mmcfg_init(int type) } for (i = 0; i < pci_mmcfg_config_num; ++i) { pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; - pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address, + pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].address, MMCONFIG_APER_MAX); if (!pci_mmcfg_virt[i].virt) { printk(KERN_ERR "PCI: Cannot map mmconfig aperture for " "segment %d\n", - pci_mmcfg_config[i].pci_segment_group_number); + pci_mmcfg_config[i].pci_segment); return; } - printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address); + printk(KERN_INFO "PCI: Using MMCONFIG at %lx\n", + (unsigned long)pci_mmcfg_config[i].address); } unreachable_devices(); |