summaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/Kconfig4
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c4
-rw-r--r--arch/x86_64/ia32/ia32entry.S2
-rw-r--r--arch/x86_64/ia32/sys_ia32.c66
-rw-r--r--arch/x86_64/ia32/syscall32.c59
-rw-r--r--arch/x86_64/kernel/early-quirks.c4
-rw-r--r--arch/x86_64/kernel/genapic.c4
-rw-r--r--arch/x86_64/kernel/head64.c4
-rw-r--r--arch/x86_64/kernel/io_apic.c17
-rw-r--r--arch/x86_64/kernel/mce.c2
-rw-r--r--arch/x86_64/kernel/mce_amd.c2
-rw-r--r--arch/x86_64/kernel/mpparse.c2
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/setup.c6
-rw-r--r--arch/x86_64/kernel/time.c18
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86_64/mm/fault.c21
-rw-r--r--arch/x86_64/mm/srat.c48
-rw-r--r--arch/x86_64/pci/mmconfig.c29
19 files changed, 95 insertions, 203 deletions
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index d427553..02dd394 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -44,6 +44,10 @@ config MMU
bool
default y
+config ZONE_DMA
+ bool
+ default y
+
config ISA
bool
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index 5ce0bd4..6efe04f 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -300,12 +300,10 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
bprm->loader += stack_base;
bprm->exec += stack_base;
- mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!mpnt)
return -ENOMEM;
- memset(mpnt, 0, sizeof(*mpnt));
-
down_write(&mm->mmap_sem);
{
mpnt->vm_mm = mm;
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index b4aa875..5f32cf4 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -515,7 +515,7 @@ ia32_sys_call_table:
.quad sys32_vm86_warning /* vm86old */
.quad compat_sys_wait4
.quad sys_swapoff /* 115 */
- .quad sys32_sysinfo
+ .quad compat_sys_sysinfo
.quad sys32_ipc
.quad sys_fsync
.quad stub32_sigreturn
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index c9bac3a..200fdde 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -523,72 +523,6 @@ sys32_sysfs(int option, u32 arg1, u32 arg2)
return sys_sysfs(option, arg1, arg2);
}
-struct sysinfo32 {
- s32 uptime;
- u32 loads[3];
- u32 totalram;
- u32 freeram;
- u32 sharedram;
- u32 bufferram;
- u32 totalswap;
- u32 freeswap;
- unsigned short procs;
- unsigned short pad;
- u32 totalhigh;
- u32 freehigh;
- u32 mem_unit;
- char _f[20-2*sizeof(u32)-sizeof(int)];
-};
-
-asmlinkage long
-sys32_sysinfo(struct sysinfo32 __user *info)
-{
- struct sysinfo s;
- int ret;
- mm_segment_t old_fs = get_fs ();
- int bitcount = 0;
-
- set_fs (KERNEL_DS);
- ret = sys_sysinfo((struct sysinfo __user *)&s);
- set_fs (old_fs);
-
- /* Check to see if any memory value is too large for 32-bit and scale
- * down if needed
- */
- if ((s.totalram >> 32) || (s.totalswap >> 32)) {
- while (s.mem_unit < PAGE_SIZE) {
- s.mem_unit <<= 1;
- bitcount++;
- }
- s.totalram >>= bitcount;
- s.freeram >>= bitcount;
- s.sharedram >>= bitcount;
- s.bufferram >>= bitcount;
- s.totalswap >>= bitcount;
- s.freeswap >>= bitcount;
- s.totalhigh >>= bitcount;
- s.freehigh >>= bitcount;
- }
-
- if (!access_ok(VERIFY_WRITE, info, sizeof(struct sysinfo32)) ||
- __put_user (s.uptime, &info->uptime) ||
- __put_user (s.loads[0], &info->loads[0]) ||
- __put_user (s.loads[1], &info->loads[1]) ||
- __put_user (s.loads[2], &info->loads[2]) ||
- __put_user (s.totalram, &info->totalram) ||
- __put_user (s.freeram, &info->freeram) ||
- __put_user (s.sharedram, &info->sharedram) ||
- __put_user (s.bufferram, &info->bufferram) ||
- __put_user (s.totalswap, &info->totalswap) ||
- __put_user (s.freeswap, &info->freeswap) ||
- __put_user (s.procs, &info->procs) ||
- __put_user (s.totalhigh, &info->totalhigh) ||
- __put_user (s.freehigh, &info->freehigh) ||
- __put_user (s.mem_unit, &info->mem_unit))
- return -EFAULT;
- return 0;
-}
-
asmlinkage long
sys32_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval)
{
diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
index 59f1fa1..568ff0d 100644
--- a/arch/x86_64/ia32/syscall32.c
+++ b/arch/x86_64/ia32/syscall32.c
@@ -18,68 +18,34 @@ extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
extern int sysctl_vsyscall32;
-char *syscall32_page;
+static struct page *syscall32_pages[1];
static int use_sysenter = -1;
-static struct page *
-syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
-{
- struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
- get_page(p);
- return p;
-}
-
-/* Prevent VMA merging */
-static void syscall32_vma_close(struct vm_area_struct *vma)
-{
-}
-
-static struct vm_operations_struct syscall32_vm_ops = {
- .close = syscall32_vma_close,
- .nopage = syscall32_nopage,
-};
-
struct linux_binprm;
/* Setup a VMA at program startup for the vsyscall page */
int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
{
- int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
- struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
int ret;
- vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
- if (!vma)
- return -ENOMEM;
-
- memset(vma, 0, sizeof(struct vm_area_struct));
- /* Could randomize here */
- vma->vm_start = VSYSCALL32_BASE;
- vma->vm_end = VSYSCALL32_END;
- /* MAYWRITE to allow gdb to COW and set breakpoints */
- vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
+ down_write(&mm->mmap_sem);
/*
+ * MAYWRITE to allow gdb to COW and set breakpoints
+ *
* Make sure the vDSO gets into every core dump.
* Dumping its contents makes post-mortem fully interpretable later
* without matching up the same kernel and hardware config to see
* what PC values meant.
*/
- vma->vm_flags |= VM_ALWAYSDUMP;
- vma->vm_flags |= mm->def_flags;
- vma->vm_page_prot = protection_map[vma->vm_flags & 7];
- vma->vm_ops = &syscall32_vm_ops;
- vma->vm_mm = mm;
-
- down_write(&mm->mmap_sem);
- if ((ret = insert_vm_struct(mm, vma))) {
- up_write(&mm->mmap_sem);
- kmem_cache_free(vm_area_cachep, vma);
- return ret;
- }
- mm->total_vm += npages;
+ /* Could randomize here */
+ ret = install_special_mapping(mm, VSYSCALL32_BASE, PAGE_SIZE,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
+ VM_ALWAYSDUMP,
+ syscall32_pages);
up_write(&mm->mmap_sem);
- return 0;
+ return ret;
}
const char *arch_vma_name(struct vm_area_struct *vma)
@@ -92,9 +58,10 @@ const char *arch_vma_name(struct vm_area_struct *vma)
static int __init init_syscall32(void)
{
- syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
+ char *syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
if (!syscall32_page)
panic("Cannot allocate syscall32 page");
+ syscall32_pages[0] = virt_to_page(syscall32_page);
if (use_sysenter > 0) {
memcpy(syscall32_page, syscall32_sysenter,
syscall32_sysenter_end - syscall32_sysenter);
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 49802f1..bd30d13 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -32,7 +32,7 @@ static void via_bugs(void)
static int nvidia_hpet_detected __initdata;
-static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
+static int __init nvidia_hpet_check(struct acpi_table_header *header)
{
nvidia_hpet_detected = 1;
return 0;
@@ -53,7 +53,7 @@ static void nvidia_bugs(void)
return;
nvidia_hpet_detected = 0;
- acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
+ acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
if (nvidia_hpet_detected == 0) {
acpi_skip_timer_override = 1;
printk(KERN_INFO "Nvidia board "
diff --git a/arch/x86_64/kernel/genapic.c b/arch/x86_64/kernel/genapic.c
index b007433..0b3603a 100644
--- a/arch/x86_64/kernel/genapic.c
+++ b/arch/x86_64/kernel/genapic.c
@@ -58,8 +58,8 @@ void __init clustered_apic_check(void)
* Some x86_64 machines use physical APIC mode regardless of how many
* procs/clusters are present (x86_64 ES7000 is an example).
*/
- if (acpi_fadt.revision > FADT2_REVISION_ID)
- if (acpi_fadt.force_apic_physical_destination_mode) {
+ if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID)
+ if (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) {
genapic = &apic_cluster;
goto print;
}
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index cc230b9..5f197b0 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -34,8 +34,6 @@ static void __init clear_bss(void)
#define OLD_CL_BASE_ADDR 0x90000
#define OLD_CL_OFFSET 0x90022
-extern char saved_command_line[];
-
static void __init copy_bootdata(char *real_mode_data)
{
int new_data;
@@ -50,7 +48,7 @@ static void __init copy_bootdata(char *real_mode_data)
new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
}
command_line = (char *) ((u64)(new_data));
- memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
+ memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
}
void __init x86_64_start_kernel(char * real_mode_data)
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index d7bad90..6be6730 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -1956,24 +1956,31 @@ static struct irq_chip msi_chip = {
.retrigger = ioapic_retrigger_irq,
};
-int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
+int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
struct msi_msg msg;
- int ret;
+ int irq, ret;
+ irq = create_irq();
+ if (irq < 0)
+ return irq;
+
+ set_irq_msi(irq, desc);
ret = msi_compose_msg(dev, irq, &msg);
- if (ret < 0)
+ if (ret < 0) {
+ destroy_irq(irq);
return ret;
+ }
write_msi_msg(irq, &msg);
set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
- return 0;
+ return irq;
}
void arch_teardown_msi_irq(unsigned int irq)
{
- return;
+ destroy_irq(irq);
}
#endif /* CONFIG_PCI_MSI */
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index ac08503..bdb54a2 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -516,7 +516,7 @@ static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned
}
}
-static struct file_operations mce_chrdev_ops = {
+static const struct file_operations mce_chrdev_ops = {
.read = mce_read,
.ioctl = mce_ioctl,
};
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index fa09deb..93c7072 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -401,7 +401,6 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
if (!b)
return -ENOMEM;
- memset(b, 0, sizeof(struct threshold_block));
b->block = block;
b->bank = bank;
@@ -490,7 +489,6 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
err = -ENOMEM;
goto out;
}
- memset(b, 0, sizeof(struct threshold_bank));
kobject_set_name(&b->kobj, "threshold_bank%i", bank);
b->kobj.parent = &per_cpu(device_mce, cpu).kobj;
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 0807256..50dd8be 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -798,7 +798,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
return gsi;
/* Don't set up the ACPI SCI because it's already set up */
- if (acpi_fadt.sci_int == gsi)
+ if (acpi_gbl_FADT.sci_interrupt == gsi)
return gsi;
ioapic = mp_find_ioapic(gsi);
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 697f0aa..eb18be5 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -29,7 +29,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
.dma_supported = NULL,
};
-void pci_swiotlb_init(void)
+void __init pci_swiotlb_init(void)
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index af425a8..6047724 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(edid_info);
extern int root_mountflags;
-char command_line[COMMAND_LINE_SIZE];
+char __initdata command_line[COMMAND_LINE_SIZE];
struct resource standard_io_resources[] = {
{ .name = "dma1", .start = 0x00, .end = 0x1f,
@@ -343,7 +343,7 @@ static void discover_ebda(void)
void __init setup_arch(char **cmdline_p)
{
- printk(KERN_INFO "Command line: %s\n", saved_command_line);
+ printk(KERN_INFO "Command line: %s\n", boot_command_line);
ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
screen_info = SCREEN_INFO;
@@ -373,7 +373,7 @@ void __init setup_arch(char **cmdline_p)
early_identify_cpu(&boot_cpu_data);
- strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE);
+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
parse_early_param();
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 5cc76d0..335cc91 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -498,7 +498,7 @@ static unsigned long get_cmos_time(void)
{
unsigned int year, mon, day, hour, min, sec;
unsigned long flags;
- unsigned extyear = 0;
+ unsigned century = 0;
spin_lock_irqsave(&rtc_lock, flags);
@@ -510,9 +510,9 @@ static unsigned long get_cmos_time(void)
mon = CMOS_READ(RTC_MONTH);
year = CMOS_READ(RTC_YEAR);
#ifdef CONFIG_ACPI
- if (acpi_fadt.revision >= FADT2_REVISION_ID &&
- acpi_fadt.century)
- extyear = CMOS_READ(acpi_fadt.century);
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century)
+ century = CMOS_READ(acpi_gbl_FADT.century);
#endif
} while (sec != CMOS_READ(RTC_SECONDS));
@@ -530,10 +530,10 @@ static unsigned long get_cmos_time(void)
BCD_TO_BIN(mon);
BCD_TO_BIN(year);
- if (extyear) {
- BCD_TO_BIN(extyear);
- year += extyear;
- printk(KERN_INFO "Extended CMOS year: %d\n", extyear);
+ if (century) {
+ BCD_TO_BIN(century);
+ year += century * 100;
+ printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
} else {
/*
* x86-64 systems only exists since 2002.
@@ -954,7 +954,7 @@ __cpuinit int unsynchronized_tsc(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
#ifdef CONFIG_ACPI
/* But TSC doesn't tick in C3 so don't use it there */
- if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 1000)
+ if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000)
return 1;
#endif
return 0;
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 1e54ddf..c360c42 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -192,10 +192,14 @@ SECTIONS
from .altinstructions and .eh_frame */
.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
+
+#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(4096);
__initramfs_start = .;
.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
__initramfs_end = .;
+#endif
+
. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
__per_cpu_start = .;
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index a65fc6f..49e8cf2 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -69,27 +69,6 @@ static inline int notify_page_fault(enum die_val val, const char *str,
return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
}
-void bust_spinlocks(int yes)
-{
- int loglevel_save = console_loglevel;
- if (yes) {
- oops_in_progress = 1;
- } else {
-#ifdef CONFIG_VT
- unblank_screen();
-#endif
- oops_in_progress = 0;
- /*
- * OK, the message is on the console. Now we call printk()
- * without oops_in_progress set so that printk will give klogd
- * a poke. Hold onto your hats...
- */
- console_loglevel = 15; /* NMI oopser may have shut the console up */
- printk(" ");
- console_loglevel = loglevel_save;
- }
-}
-
/* Sometimes the CPU reports invalid exceptions on prefetch.
Check that here and ignore.
Opcode checker based on code by Richard Brunner */
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 1087e15..2efe215 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -101,7 +101,7 @@ static __init inline int srat_disabled(void)
static __init int slit_valid(struct acpi_table_slit *slit)
{
int i, j;
- int d = slit->localities;
+ int d = slit->locality_count;
for (i = 0; i < d; i++) {
for (j = 0; j < d; j++) {
u8 val = slit->entry[d*i + j];
@@ -127,18 +127,18 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
/* Callback for Proximity Domain -> LAPIC mapping */
void __init
-acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
+acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
{
int pxm, node;
if (srat_disabled())
return;
- if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
+ if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
bad_srat();
return;
}
- if (pa->flags.enabled == 0)
+ if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return;
- pxm = pa->proximity_domain;
+ pxm = pa->proximity_domain_lo;
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -254,21 +254,21 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
/* Looks good */
if (nd->start == nd->end) {
- nd->start = start;
- nd->end = end;
+ nd->start = start;
+ nd->end = end;
changed = 1;
- } else {
- if (nd->start == end) {
- nd->start = start;
+ } else {
+ if (nd->start == end) {
+ nd->start = start;
changed = 1;
}
- if (nd->end == start) {
- nd->end = end;
+ if (nd->end == start) {
+ nd->end = end;
changed = 1;
}
if (!changed)
printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
- }
+ }
ret = update_end_of_memory(nd->end);
@@ -279,7 +279,7 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
void __init
-acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
+acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
struct bootnode *nd, oldnode;
unsigned long start, end;
@@ -288,16 +288,17 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
if (srat_disabled())
return;
- if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) {
+ if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
bad_srat();
return;
}
- if (ma->flags.enabled == 0)
+ if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
return;
- if (ma->flags.hot_pluggable && !save_add_info())
+
+ if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
return;
- start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
- end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
+ start = ma->base_address;
+ end = start + ma->length;
pxm = ma->proximity_domain;
node = setup_node(pxm);
if (node < 0) {
@@ -337,7 +338,8 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
push_node_boundaries(node, nd->start >> PAGE_SHIFT,
nd->end >> PAGE_SHIFT);
- if (ma->flags.hot_pluggable && (reserve_hotadd(node, start, end) < 0)) {
+ if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) &&
+ (reserve_hotadd(node, start, end) < 0)) {
/* Ignore hotadd region. Undo damage */
printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
*nd = oldnode;
@@ -394,7 +396,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
/* First clean up the node list */
for (i = 0; i < MAX_NUMNODES; i++) {
- cutoff_node(i, start, end);
+ cutoff_node(i, start, end);
if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
unparse_node(i);
node_set_offline(i);
@@ -426,7 +428,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
if (!node_online(i))
setup_node_bootmem(i, nodes[i].start, nodes[i].end);
- for (i = 0; i < NR_CPUS; i++) {
+ for (i = 0; i < NR_CPUS; i++) {
if (cpu_to_node[i] == NUMA_NO_NODE)
continue;
if (!node_isset(cpu_to_node[i], nodes_parsed))
@@ -461,7 +463,7 @@ int __node_distance(int a, int b)
if (!acpi_slit)
return a == b ? 10 : 20;
- index = acpi_slit->localities * node_to_pxm(a);
+ index = acpi_slit->locality_count * node_to_pxm(a);
return acpi_slit->entry[index + node_to_pxm(b)];
}
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index f8b6b28..faabb6e 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -1,6 +1,6 @@
/*
* mmconfig.c - Low-level direct PCI config space access via MMCONFIG
- *
+ *
* This is an 64bit optimized version that always keeps the full mmconfig
* space mapped. This allows lockless config space operation.
*/
@@ -25,7 +25,7 @@ static DECLARE_BITMAP(fallback_slots, 32*MAX_CHECK_BUS);
/* Static virtual mapping of the MMCONFIG aperture */
struct mmcfg_virt {
- struct acpi_table_mcfg_config *cfg;
+ struct acpi_mcfg_allocation *cfg;
char __iomem *virt;
};
static struct mmcfg_virt *pci_mmcfg_virt;
@@ -33,14 +33,14 @@ static struct mmcfg_virt *pci_mmcfg_virt;
static char __iomem *get_virt(unsigned int seg, unsigned bus)
{
int cfg_num = -1;
- struct acpi_table_mcfg_config *cfg;
+ struct acpi_mcfg_allocation *cfg;
while (1) {
++cfg_num;
if (cfg_num >= pci_mmcfg_config_num)
break;
cfg = pci_mmcfg_virt[cfg_num].cfg;
- if (cfg->pci_segment_group_number != seg)
+ if (cfg->pci_segment != seg)
continue;
if ((cfg->start_bus_number <= bus) &&
(cfg->end_bus_number >= bus))
@@ -52,7 +52,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
this applies to all busses. */
cfg = &pci_mmcfg_config[0];
if (pci_mmcfg_config_num == 1 &&
- cfg->pci_segment_group_number == 0 &&
+ cfg->pci_segment == 0 &&
(cfg->start_bus_number | cfg->end_bus_number) == 0)
return pci_mmcfg_virt[0].virt;
@@ -170,19 +170,19 @@ void __init pci_mmcfg_init(int type)
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
return;
- acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
+ acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
if ((pci_mmcfg_config_num == 0) ||
(pci_mmcfg_config == NULL) ||
- (pci_mmcfg_config[0].base_address == 0))
+ (pci_mmcfg_config[0].address == 0))
return;
/* Only do this check when type 1 works. If it doesn't work
assume we run on a Mac and always use MCFG */
- if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address,
- pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
+ if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
+ pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
E820_RESERVED)) {
- printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
- pci_mmcfg_config[0].base_address);
+ printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
+ (unsigned long)pci_mmcfg_config[0].address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return;
}
@@ -194,15 +194,16 @@ void __init pci_mmcfg_init(int type)
}
for (i = 0; i < pci_mmcfg_config_num; ++i) {
pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
- pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address,
+ pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].address,
MMCONFIG_APER_MAX);
if (!pci_mmcfg_virt[i].virt) {
printk(KERN_ERR "PCI: Cannot map mmconfig aperture for "
"segment %d\n",
- pci_mmcfg_config[i].pci_segment_group_number);
+ pci_mmcfg_config[i].pci_segment);
return;
}
- printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address);
+ printk(KERN_INFO "PCI: Using MMCONFIG at %lx\n",
+ (unsigned long)pci_mmcfg_config[i].address);
}
unreachable_devices();
OpenPOWER on IntegriCloud