diff options
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/msi_ia64.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 22 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 19 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/vmlinux.lds.S | 362 |
5 files changed, 211 insertions, 201 deletions
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 6c89228..4a746ea 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -25,7 +25,7 @@ static int ia64_set_msi_irq_affinity(unsigned int irq, if (irq_prepare_move(irq, cpu)) return -1; - read_msi_msg(irq, &msg); + get_cached_msi_msg(irq, &msg); addr = msg.address_lo; addr &= MSI_ADDR_DEST_ID_MASK; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index ab985f7..7443290 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -1696,8 +1696,8 @@ pfm_poll(struct file *filp, poll_table * wait) return mask; } -static int -pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +static long +pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { DPRINT(("pfm_ioctl called\n")); return -EINVAL; @@ -2174,15 +2174,15 @@ pfm_no_open(struct inode *irrelevant, struct file *dontcare) static const struct file_operations pfm_file_ops = { - .llseek = no_llseek, - .read = pfm_read, - .write = pfm_write, - .poll = pfm_poll, - .ioctl = pfm_ioctl, - .open = pfm_no_open, /* special open code to disallow open via /proc */ - .fasync = pfm_fasync, - .release = pfm_close, - .flush = pfm_flush + .llseek = no_llseek, + .read = pfm_read, + .write = pfm_write, + .poll = pfm_poll, + .unlocked_ioctl = pfm_ioctl, + .open = pfm_no_open, /* special open code to disallow open via /proc */ + .fasync = pfm_fasync, + .release = pfm_close, + .flush = pfm_flush }; static int diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 6a1380e..d003b50 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -390,13 +390,11 @@ smp_callin (void) fix_b0_for_bsp(); -#ifdef CONFIG_NUMA /* * numa_node_id() works after this. */ set_numa_node(cpu_to_node_map[cpuid]); set_numa_mem(local_memory_node(cpu_to_node_map[cpuid])); -#endif ipi_call_lock_irq(); spin_lock(&vector_lock); @@ -510,21 +508,18 @@ do_boot_cpu (int sapicid, int cpu) .done = COMPLETION_INITIALIZER(c_idle.done), }; + /* + * We can't use kernel_thread since we must avoid to + * reschedule the child. + */ c_idle.idle = get_idle_for_cpu(cpu); if (c_idle.idle) { init_idle(c_idle.idle, cpu); goto do_rest; } - /* - * We can't use kernel_thread since we must avoid to reschedule the child. - */ - if (!keventd_up() || current_is_keventd()) - c_idle.work.func(&c_idle.work); - else { - schedule_work(&c_idle.work); - wait_for_completion(&c_idle.done); - } + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); if (IS_ERR(c_idle.idle)) panic("failed fork for CPU %d", cpu); @@ -640,9 +635,7 @@ void __devinit smp_prepare_boot_cpu(void) { cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_callin_map); -#ifdef CONFIG_NUMA set_numa_node(cpu_to_node_map[smp_processor_id()]); -#endif per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; paravirt_post_smp_prepare_boot_cpu(); } diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 653b3c4..ed6f22e 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -471,7 +471,8 @@ void update_vsyscall_tz(void) { } -void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult) +void update_vsyscall(struct timespec *wall, struct timespec *wtm, + struct clocksource *c, u32 mult) { unsigned long flags; @@ -487,9 +488,9 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult) /* copy kernel time structures */ fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; - fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec + fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec + wall->tv_sec; - fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec + fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec + wall->tv_nsec; /* normalize */ diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index e07218a..5a4d044 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -6,204 +6,209 @@ #include <asm-generic/vmlinux.lds.h> -#define IVT_TEXT \ - VMLINUX_SYMBOL(__start_ivt_text) = .; \ - *(.text..ivt) \ - VMLINUX_SYMBOL(__end_ivt_text) = .; - OUTPUT_FORMAT("elf64-ia64-little") OUTPUT_ARCH(ia64) ENTRY(phys_start) jiffies = jiffies_64; + PHDRS { - code PT_LOAD; - percpu PT_LOAD; - data PT_LOAD; - note PT_NOTE; - unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */ + code PT_LOAD; + percpu PT_LOAD; + data PT_LOAD; + note PT_NOTE; + unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */ } -SECTIONS -{ - /* unwind exit sections must be discarded before the rest of the - sections get included. */ - /DISCARD/ : { - *(.IA_64.unwind.exit.text) - *(.IA_64.unwind_info.exit.text) - *(.comment) - *(.note) - } - - v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */ - phys_start = _start - LOAD_OFFSET; - - code : { } :code - . = KERNEL_START; - - _text = .; - _stext = .; - - .text : AT(ADDR(.text) - LOAD_OFFSET) - { - IVT_TEXT - TEXT_TEXT - SCHED_TEXT - LOCK_TEXT - KPROBES_TEXT - *(.gnu.linkonce.t*) - } - .text2 : AT(ADDR(.text2) - LOAD_OFFSET) - { *(.text2) } -#ifdef CONFIG_SMP - .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) - { *(.text..lock) } -#endif - _etext = .; - /* Read-only data */ +SECTIONS { + /* + * unwind exit sections must be discarded before + * the rest of the sections get included. + */ + /DISCARD/ : { + *(.IA_64.unwind.exit.text) + *(.IA_64.unwind_info.exit.text) + *(.comment) + *(.note) + } - NOTES :code :note /* put .notes in text and mark in PT_NOTE */ - code_continues : {} :code /* switch back to regular program... */ + v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */ + phys_start = _start - LOAD_OFFSET; + + code : { + } :code + . = KERNEL_START; + + _text = .; + _stext = .; + + .text : AT(ADDR(.text) - LOAD_OFFSET) { + __start_ivt_text = .; + *(.text..ivt) + __end_ivt_text = .; + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + KPROBES_TEXT + *(.gnu.linkonce.t*) + } - EXCEPTION_TABLE(16) + .text2 : AT(ADDR(.text2) - LOAD_OFFSET) { + *(.text2) + } - /* MCA table */ - . = ALIGN(16); - __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) - { - __start___mca_table = .; - *(__mca_table) - __stop___mca_table = .; +#ifdef CONFIG_SMP + .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) { + *(.text..lock) + } +#endif + _etext = .; + + /* + * Read-only data + */ + NOTES :code :note /* put .notes in text and mark in PT_NOTE */ + code_continues : { + } : code /* switch back to regular program... */ + + EXCEPTION_TABLE(16) + + /* MCA table */ + . = ALIGN(16); + __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) { + __start___mca_table = .; + *(__mca_table) + __stop___mca_table = .; } - .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) - { - __start___phys_stack_reg_patchlist = .; - *(.data..patch.phys_stack_reg) - __end___phys_stack_reg_patchlist = .; + .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) { + __start___phys_stack_reg_patchlist = .; + *(.data..patch.phys_stack_reg) + __end___phys_stack_reg_patchlist = .; } - /* Global data */ - _data = .; + /* + * Global data + */ + _data = .; - /* Unwind info & table: */ - . = ALIGN(8); - .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) - { *(.IA_64.unwind_info*) } - .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) - { - __start_unwind = .; - *(.IA_64.unwind*) - __end_unwind = .; + /* Unwind info & table: */ + . = ALIGN(8); + .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) { + *(.IA_64.unwind_info*) + } + .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) { + __start_unwind = .; + *(.IA_64.unwind*) + __end_unwind = .; } :code :unwind - code_continues2 : {} : code + code_continues2 : { + } : code - RODATA + RODATA - .opd : AT(ADDR(.opd) - LOAD_OFFSET) - { *(.opd) } - - /* Initialization code and data: */ + .opd : AT(ADDR(.opd) - LOAD_OFFSET) { + *(.opd) + } - . = ALIGN(PAGE_SIZE); - __init_begin = .; + /* + * Initialization code and data: + */ + . = ALIGN(PAGE_SIZE); + __init_begin = .; - INIT_TEXT_SECTION(PAGE_SIZE) - INIT_DATA_SECTION(16) + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(16) - .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) - { - __start___vtop_patchlist = .; - *(.data..patch.vtop) - __end___vtop_patchlist = .; + .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) { + __start___vtop_patchlist = .; + *(.data..patch.vtop) + __end___vtop_patchlist = .; } - .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) - { - __start___rse_patchlist = .; - *(.data..patch.rse) - __end___rse_patchlist = .; + .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) { + __start___rse_patchlist = .; + *(.data..patch.rse) + __end___rse_patchlist = .; } - .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) - { - __start___mckinley_e9_bundles = .; - *(.data..patch.mckinley_e9) - __end___mckinley_e9_bundles = .; + .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) { + __start___mckinley_e9_bundles = .; + *(.data..patch.mckinley_e9) + __end___mckinley_e9_bundles = .; } #if defined(CONFIG_PARAVIRT) - . = ALIGN(16); - .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) - { - __start_paravirt_bundles = .; - *(.paravirt_bundles) - __stop_paravirt_bundles = .; - } - . = ALIGN(16); - .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) - { - __start_paravirt_insts = .; - *(.paravirt_insts) - __stop_paravirt_insts = .; - } - . = ALIGN(16); - .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) - { - __start_paravirt_branches = .; - *(.paravirt_branches) - __stop_paravirt_branches = .; + . = ALIGN(16); + .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) { + __start_paravirt_bundles = .; + *(.paravirt_bundles) + __stop_paravirt_bundles = .; + } + . = ALIGN(16); + .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) { + __start_paravirt_insts = .; + *(.paravirt_insts) + __stop_paravirt_insts = .; + } + . = ALIGN(16); + .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) { + __start_paravirt_branches = .; + *(.paravirt_branches) + __stop_paravirt_branches = .; } #endif #if defined(CONFIG_IA64_GENERIC) - /* Machine Vector */ - . = ALIGN(16); - .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) - { - machvec_start = .; - *(.machvec) - machvec_end = .; + /* Machine Vector */ + . = ALIGN(16); + .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) { + machvec_start = .; + *(.machvec) + machvec_end = .; } #endif #ifdef CONFIG_SMP - . = ALIGN(PERCPU_PAGE_SIZE); - __cpu0_per_cpu = .; - . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ + . = ALIGN(PERCPU_PAGE_SIZE); + __cpu0_per_cpu = .; + . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ #endif - . = ALIGN(PAGE_SIZE); - __init_end = .; + . = ALIGN(PAGE_SIZE); + __init_end = .; - .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) - { - PAGE_ALIGNED_DATA(PAGE_SIZE) - . = ALIGN(PAGE_SIZE); - __start_gate_section = .; - *(.data..gate) - __stop_gate_section = .; + .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { + PAGE_ALIGNED_DATA(PAGE_SIZE) + . = ALIGN(PAGE_SIZE); + __start_gate_section = .; + *(.data..gate) + __stop_gate_section = .; #ifdef CONFIG_XEN - . = ALIGN(PAGE_SIZE); - __xen_start_gate_section = .; - *(.data..gate.xen) - __xen_stop_gate_section = .; + . = ALIGN(PAGE_SIZE); + __xen_start_gate_section = .; + *(.data..gate.xen) + __xen_stop_gate_section = .; #endif } - . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose - * kernel data - */ - - /* Per-cpu data: */ - . = ALIGN(PERCPU_PAGE_SIZE); - PERCPU_VADDR(PERCPU_ADDR, :percpu) - __phys_per_cpu_start = __per_cpu_load; - . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits - * into percpu page size - */ - - data : { } :data - .data : AT(ADDR(.data) - LOAD_OFFSET) - { + /* + * make sure the gate page doesn't expose + * kernel data + */ + . = ALIGN(PAGE_SIZE); + + /* Per-cpu data: */ + . = ALIGN(PERCPU_PAGE_SIZE); + PERCPU_VADDR(PERCPU_ADDR, :percpu) + __phys_per_cpu_start = __per_cpu_load; + /* + * ensure percpu data fits + * into percpu page size + */ + . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; + + data : { + } :data + .data : AT(ADDR(.data) - LOAD_OFFSET) { INIT_TASK_DATA(PAGE_SIZE) CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) READ_MOSTLY_DATA(SMP_CACHE_BYTES) @@ -213,26 +218,37 @@ SECTIONS CONSTRUCTORS } - . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */ - .got : AT(ADDR(.got) - LOAD_OFFSET) - { *(.got.plt) *(.got) } - __gp = ADDR(.got) + 0x200000; - /* We want the small data sections together, so single-instruction offsets - can access them all, and initialized data all before uninitialized, so - we can shorten the on-disk segment size. */ - .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) - { *(.sdata) *(.sdata1) *(.srdata) } - _edata = .; + . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */ + .got : AT(ADDR(.got) - LOAD_OFFSET) { + *(.got.plt) + *(.got) + } + __gp = ADDR(.got) + 0x200000; + + /* + * We want the small data sections together, + * so single-instruction offsets can access + * them all, and initialized data all before + * uninitialized, so we can shorten the + * on-disk segment size. + */ + .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) { + *(.sdata) + *(.sdata1) + *(.srdata) + } + _edata = .; - BSS_SECTION(0, 0, 0) + BSS_SECTION(0, 0, 0) - _end = .; + _end = .; - code : { } :code + code : { + } :code - STABS_DEBUG - DWARF_DEBUG + STABS_DEBUG + DWARF_DEBUG - /* Default discards */ - DISCARDS + /* Default discards */ + DISCARDS } |