diff options
Diffstat (limited to 'arch')
935 files changed, 12025 insertions, 13223 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 892d617..99f0e17 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -316,13 +316,6 @@ config ARCH_WANT_OLD_COMPAT_IPC select ARCH_WANT_COMPAT_IPC_PARSE_VERSION bool -config HAVE_VIRT_TO_BUS - bool - help - An architecture should select this if it implements the - deprecated interface virt_to_bus(). All new architectures - should probably not select this. - config HAVE_ARCH_SECCOMP_FILTER bool help diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 5469f7b..8629127 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -8,7 +8,7 @@ config ALPHA select HAVE_PERF_EVENTS select HAVE_DMA_ATTRS select HAVE_GENERIC_HARDIRQS - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select GENERIC_IRQ_PROBE select AUTO_IRQ_AFFINITY if SMP select GENERIC_IRQ_SHOW diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile index 4759fe7..2cc3cc5 100644 --- a/arch/alpha/Makefile +++ b/arch/alpha/Makefile @@ -12,7 +12,7 @@ NM := $(NM) -B LDFLAGS_vmlinux := -static -N #-relax CHECKFLAGS += -D__alpha__ -m64 -cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data +cflags-y := -pipe -mno-fp-regs -ffixed-8 cflags-y += $(call cc-option, -fno-jump-tables) cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4 diff --git a/arch/alpha/boot/head.S b/arch/alpha/boot/head.S index b06812b..8efb266 100644 --- a/arch/alpha/boot/head.S +++ b/arch/alpha/boot/head.S @@ -4,6 +4,7 @@ * initial bootloader stuff.. */ +#include <asm/pal.h> .set noreorder .globl __start diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h index 46cefbd..bae97eb 100644 --- a/arch/alpha/include/asm/floppy.h +++ b/arch/alpha/include/asm/floppy.h @@ -26,7 +26,7 @@ #define fd_disable_irq() disable_irq(FLOPPY_IRQ) #define fd_cacheflush(addr,size) /* nothing */ #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ - IRQF_DISABLED, "floppy", NULL) + 0, "floppy", NULL) #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) #ifdef CONFIG_PCI diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 1f8c729..52cd2a4 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h @@ -95,8 +95,6 @@ register struct thread_info *__current_thread_info __asm__("$8"); #define TS_POLLING 0x0010 /* idle task polling need_resched, skip sending interrupt */ -#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) - #ifndef __ASSEMBLY__ #define HAVE_SET_RESTORE_SIGMASK 1 static inline void set_restore_sigmask(void) diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 2872acc..7b2be25 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -117,13 +117,6 @@ handle_irq(int irq) return; } - /* - * From here we must proceed with IPL_MAX. Note that we do not - * explicitly enable interrupts afterwards - some MILO PALcode - * (namely LX164 one) seems to have severe problems with RTI - * at IPL 0. - */ - local_irq_disable(); irq_enter(); generic_handle_irq_desc(irq, desc); irq_exit(); diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 772ddfdb..f433fc1 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -45,6 +45,14 @@ do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr, struct pt_regs *regs) { struct pt_regs *old_regs; + + /* + * Disable interrupts during IRQ handling. + * Note that there is no matching local_irq_enable() due to + * severe problems with RTI at IPL0 and some MILO PALcode + * (namely LX164). + */ + local_irq_disable(); switch (type) { case 0: #ifdef CONFIG_SMP @@ -62,7 +70,6 @@ do_entInt(unsigned long type, unsigned long vector, { long cpu; - local_irq_disable(); smp_percpu_timer_interrupt(regs); cpu = smp_processor_id(); if (cpu != boot_cpuid) { @@ -222,7 +229,6 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr, struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED, .name = "timer", }; diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 63d27fb..ab80a80 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -46,25 +46,6 @@ void (*pm_power_off)(void) = machine_power_off; EXPORT_SYMBOL(pm_power_off); -void -cpu_idle(void) -{ - current_thread_info()->status |= TS_POLLING; - - while (1) { - /* FIXME -- EV6 and LCA45 know how to power down - the CPU. */ - - rcu_idle_enter(); - while (!need_resched()) - cpu_relax(); - - rcu_idle_exit(); - schedule_preempt_disabled(); - } -} - - struct halt_info { int mode; char *restart_cmd; @@ -194,6 +175,7 @@ machine_power_off(void) void show_regs(struct pt_regs *regs) { + show_regs_print_info(KERN_DEFAULT); dik_show_regs(regs, NULL); } diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 9603bc2..7b60834 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -167,8 +167,7 @@ smp_callin(void) cpuid, current, current->active_mm)); preempt_disable(); - /* Do nothing. */ - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */ diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 4d4c046..1d4aabf 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -185,9 +185,12 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr) mb(); } -extern void free_reserved_mem(void *, void *); extern void pcibios_claim_one_bus(struct pci_bus *); +static struct resource irongate_io = { + .name = "Irongate PCI IO", + .flags = IORESOURCE_IO, +}; static struct resource irongate_mem = { .name = "Irongate PCI MEM", .flags = IORESOURCE_MEM, @@ -209,6 +212,7 @@ nautilus_init_pci(void) irongate = pci_get_bus_and_slot(0, 0); bus->self = irongate; + bus->resource[0] = &irongate_io; bus->resource[1] = &irongate_mem; pci_bus_size_bridges(bus); @@ -234,8 +238,8 @@ nautilus_init_pci(void) if (pci_mem < memtop) memtop = pci_mem; if (memtop > alpha_mv.min_mem_address) { - free_reserved_mem(__va(alpha_mv.min_mem_address), - __va(memtop)); + free_reserved_area((unsigned long)__va(alpha_mv.min_mem_address), + (unsigned long)__va(memtop), 0, NULL); printk("nautilus_init_pci: %ldk freed\n", (memtop - alpha_mv.min_mem_address) >> 10); } diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 5cf4a48..a53cf03 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c @@ -280,15 +280,15 @@ titan_late_init(void) * all reported to the kernel as machine checks, so the handler * is a nop so it can be called to count the individual events. */ - titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(63+16, titan_intr_nop, 0, "CChip Error", NULL); - titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(62+16, titan_intr_nop, 0, "PChip 0 H_Error", NULL); - titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(61+16, titan_intr_nop, 0, "PChip 1 H_Error", NULL); - titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(60+16, titan_intr_nop, 0, "PChip 0 C_Error", NULL); - titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(59+16, titan_intr_nop, 0, "PChip 1 C_Error", NULL); /* @@ -348,9 +348,9 @@ privateer_init_pci(void) * Hook a couple of extra err interrupts that the * common titan code won't. */ - titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(53+16, titan_intr_nop, 0, "NMI", NULL); - titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(50+16, titan_intr_nop, 0, "Temperature Warning", NULL); /* diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 4037461..affccb9 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -169,13 +169,6 @@ void show_stack(struct task_struct *task, unsigned long *sp) dik_show_trace(sp); } -void dump_stack(void) -{ - show_stack(NULL, NULL); -} - -EXPORT_SYMBOL(dump_stack); - void die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) { diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 1ad6ca7..0ba85ee 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -31,6 +31,7 @@ #include <asm/console.h> #include <asm/tlb.h> #include <asm/setup.h> +#include <asm/sections.h> extern void die_if_kernel(char *,struct pt_regs *,long); @@ -281,8 +282,6 @@ printk_memory_info(void) { unsigned long codesize, reservedpages, datasize, initsize, tmp; extern int page_is_ram(unsigned long) __init; - extern char _text, _etext, _data, _edata; - extern char __init_begin, __init_end; /* printk all informations */ reservedpages = 0; @@ -318,32 +317,15 @@ mem_init(void) #endif /* CONFIG_DISCONTIGMEM */ void -free_reserved_mem(void *start, void *end) -{ - void *__start = start; - for (; __start < end; __start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(__start)); - init_page_count(virt_to_page(__start)); - free_page((long)__start); - totalram_pages++; - } -} - -void free_initmem(void) { - extern char __init_begin, __init_end; - - free_reserved_mem(&__init_begin, &__init_end); - printk ("Freeing unused kernel memory: %ldk freed\n", - (&__init_end - &__init_begin) >> 10); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_mem((void *)start, (void *)end); - printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); + free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c index 3973ae3..3388504 100644 --- a/arch/alpha/mm/numa.c +++ b/arch/alpha/mm/numa.c @@ -17,6 +17,7 @@ #include <asm/hwrpb.h> #include <asm/pgalloc.h> +#include <asm/sections.h> pg_data_t node_data[MAX_NUMNODES]; EXPORT_SYMBOL(node_data); @@ -325,8 +326,6 @@ void __init mem_init(void) { unsigned long codesize, reservedpages, datasize, initsize, pfn; extern int page_is_ram(unsigned long) __init; - extern char _text, _etext, _data, _edata; - extern char __init_begin, __init_end; unsigned long nid, i; high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h index 31f77ae..45b8e0c 100644 --- a/arch/arc/include/asm/dma-mapping.h +++ b/arch/arc/include/asm/dma-mapping.h @@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int i; for_each_sg(sg, s, nents, i) - sg->dma_address = dma_map_page(dev, sg_page(s), s->offset, + s->dma_address = dma_map_page(dev, sg_page(s), s->offset, s->length, dir); return nents; diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h index f4c8d36..a262828 100644 --- a/arch/arc/include/asm/elf.h +++ b/arch/arc/include/asm/elf.h @@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *); */ #define ELF_PLATFORM (NULL) -#define SET_PERSONALITY(ex) \ - set_personality(PER_LINUX | (current->personality & (~PER_MASK))) - #endif diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h index 23daa32..eb2ae53 100644 --- a/arch/arc/include/asm/entry.h +++ b/arch/arc/include/asm/entry.h @@ -415,7 +415,7 @@ *-------------------------------------------------------------*/ .macro SAVE_ALL_EXCEPTION marker - st \marker, [sp, 8] + st \marker, [sp, 8] /* orig_r8 */ st r0, [sp, 4] /* orig_r0, needed only for sys calls */ /* Restore r9 used to code the early prologue */ diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index ccd8480..eac0716 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h @@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void) " flag.nz %0 \n" : "=r"(temp), "=r"(flags) : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) - : "cc"); + : "memory", "cc"); return flags; } @@ -53,7 +53,8 @@ static inline void arch_local_irq_restore(unsigned long flags) __asm__ __volatile__( " flag %0 \n" : - : "r"(flags)); + : "r"(flags) + : "memory"); } /* @@ -73,7 +74,8 @@ static inline void arch_local_irq_disable(void) " and %0, %0, %1 \n" " flag %0 \n" : "=&r"(temp) - : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))); + : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)) + : "memory"); } /* @@ -85,7 +87,9 @@ static inline long arch_local_save_flags(void) __asm__ __volatile__( " lr %0, [status32] \n" - : "=&r"(temp)); + : "=&r"(temp) + : + : "memory"); return temp; } diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h index f3c4934..4930957 100644 --- a/arch/arc/include/asm/kgdb.h +++ b/arch/arc/include/asm/kgdb.h @@ -13,7 +13,7 @@ #ifdef CONFIG_KGDB -#include <asm/user.h> +#include <asm/ptrace.h> /* to ensure compatibility with Linux 2.6.35, we don't implement the get/set * register API yet */ @@ -53,9 +53,7 @@ enum arc700_linux_regnums { }; #else -static inline void kgdb_trap(struct pt_regs *regs, int param) -{ -} +#define kgdb_trap(regs, param) #endif #endif /* __ARC_KGDB_H__ */ diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 8ae783d..6179de7 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -123,7 +123,7 @@ static inline long regs_return_value(struct pt_regs *regs) #define orig_r8_IS_SCALL 0x0001 #define orig_r8_IS_SCALL_RESTARTED 0x0002 #define orig_r8_IS_BRKPT 0x0004 -#define orig_r8_IS_EXCPN 0x0004 +#define orig_r8_IS_EXCPN 0x0008 #define orig_r8_IS_IRQ1 0x0010 #define orig_r8_IS_IRQ2 0x0020 diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h index e53a534..dd785be 100644 --- a/arch/arc/include/asm/syscalls.h +++ b/arch/arc/include/asm/syscalls.h @@ -16,8 +16,6 @@ #include <linux/types.h> int sys_clone_wrapper(int, int, int, int, int); -int sys_fork_wrapper(void); -int sys_vfork_wrapper(void); int sys_cacheflush(uint32_t, uint32_t uint32_t); int sys_arc_settls(void *); int sys_arc_gettls(void); diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h index 6afa4f7..30333ce 100644 --- a/arch/arc/include/uapi/asm/ptrace.h +++ b/arch/arc/include/uapi/asm/ptrace.h @@ -28,14 +28,14 @@ */ struct user_regs_struct { - struct scratch { + struct { long pad; long bta, lp_start, lp_end, lp_count; long status32, ret, blink, fp, gp; long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; long sp; } scratch; - struct callee { + struct { long pad; long r25, r24, r23, r22, r21, r20; long r19, r18, r17, r16, r15, r14, r13; diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c index 2f39028..d14764a 100644 --- a/arch/arc/kernel/disasm.c +++ b/arch/arc/kernel/disasm.c @@ -535,4 +535,4 @@ int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs, return instr.is_branch; } -#endif /* CONFIG_KGDB || CONFIG_MISALIGN_ACCESS || CONFIG_KPROBES */ +#endif /* CONFIG_KGDB || CONFIG_ARC_MISALIGN_ACCESS || CONFIG_KPROBES */ diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index ef6800b..91eeab8 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -452,7 +452,7 @@ tracesys: ; using ERET won't work since next-PC has already committed lr r12, [efa] GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 - st r12, [r11, THREAD_FAULT_ADDR] + st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address ; PRE Sys Call Ptrace hook mov r0, sp ; pt_regs needed @@ -792,31 +792,6 @@ ARC_EXIT ret_from_fork ;################### Special Sys Call Wrappers ########################## -; TBD: call do_fork directly from here -ARC_ENTRY sys_fork_wrapper - SAVE_CALLEE_SAVED_USER - bl @sys_fork - DISCARD_CALLEE_SAVED_USER - - GET_CURR_THR_INFO_FLAGS r10 - btst r10, TIF_SYSCALL_TRACE - bnz tracesys_exit - - b ret_from_system_call -ARC_EXIT sys_fork_wrapper - -ARC_ENTRY sys_vfork_wrapper - SAVE_CALLEE_SAVED_USER - bl @sys_vfork - DISCARD_CALLEE_SAVED_USER - - GET_CURR_THR_INFO_FLAGS r10 - btst r10, TIF_SYSCALL_TRACE - bnz tracesys_exit - - b ret_from_system_call -ARC_EXIT sys_vfork_wrapper - ARC_ENTRY sys_clone_wrapper SAVE_CALLEE_SAVED_USER bl @sys_clone diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c index 2888ba5..52bdc83 100644 --- a/arch/arc/kernel/kgdb.c +++ b/arch/arc/kernel/kgdb.c @@ -9,6 +9,7 @@ */ #include <linux/kgdb.h> +#include <linux/sched.h> #include <asm/disasm.h> #include <asm/cacheflush.h> diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 0a7531d..cad6685 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -41,37 +41,12 @@ SYSCALL_DEFINE0(arc_gettls) return task_thread_info(current)->thr_ptr; } -static inline void arch_idle(void) +void arch_cpu_idle(void) { /* sleep, but enable all interrupts before committing */ __asm__("sleep 0x3"); } -void cpu_idle(void) -{ - /* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */ - - /* endless idle loop with no priority at all */ - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - -doze: - local_irq_disable(); - if (!need_resched()) { - arch_idle(); - goto doze; - } else { - local_irq_enable(); - } - - rcu_idle_exit(); - tick_nohz_idle_exit(); - - schedule_preempt_disabled(); - } -} - asmlinkage void ret_from_fork(void); /* Layout of Child kernel mode stack as setup at the end of this function is diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index dc0f968..2d95ac0 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -232,10 +232,8 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) n += scnprintf(buf + n, len - n, "\n"); -#ifdef _ASM_GENERIC_UNISTD_H n += scnprintf(buf + n, len - n, - "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n"); -#endif + "OS ABI [v3]\t: no-legacy-syscalls\n"); return buf; } diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 3af3e06..5c7fd60 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -141,7 +141,7 @@ void __cpuinit start_kernel_secondary(void) local_irq_enable(); preempt_disable(); - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } /* diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index a63ff84..ca0207b 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -220,13 +220,6 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) show_stacktrace(tsk, NULL); } -/* Expected by Rest of kernel code */ -void dump_stack(void) -{ - show_stacktrace(NULL, NULL); -} -EXPORT_SYMBOL(dump_stack); - /* Another API expected by schedular, shows up in "ps" as Wait Channel * Ofcourse just returning schedule( ) would be pointless so unwind until * the function is not in schedular code diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c index f6bdd07..9d6c1ca 100644 --- a/arch/arc/kernel/sys.c +++ b/arch/arc/kernel/sys.c @@ -6,8 +6,6 @@ #include <asm/syscalls.h> #define sys_clone sys_clone_wrapper -#define sys_fork sys_fork_wrapper -#define sys_vfork sys_vfork_wrapper #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 7c10873..0aec0198 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -71,7 +71,7 @@ void print_task_path_n_nm(struct task_struct *tsk, char *buf) } done: - pr_info("%s, TGID %u\n", path_nm, tsk->tgid); + pr_info("Path: %s\n", path_nm); } EXPORT_SYMBOL(print_task_path_n_nm); @@ -163,6 +163,7 @@ void show_regs(struct pt_regs *regs) return; print_task_path_n_nm(tsk, buf); + show_regs_print_info(KERN_INFO); if (current->thread.cause_code) show_ecr_verbose(regs); diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index caf797d..727d479 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -144,37 +144,18 @@ void __init mem_init(void) PAGES_TO_KB(reserved_pages)); } -static void __init free_init_pages(const char *what, unsigned long begin, - unsigned long end) -{ - unsigned long addr; - - pr_info("Freeing %s: %ldk [%lx] to [%lx]\n", - what, TO_KB(end - begin), begin, end); - - /* need to check that the page we free is not a partial page */ - for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } -} - /* * free_initmem: Free all the __init memory. */ void __init_refok free_initmem(void) { - free_init_pages("unused kernel memory", - (unsigned long)__init_begin, - (unsigned long)__init_end); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", start, end); + free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/arc/plat-arcfpga/Kconfig b/arch/arc/plat-arcfpga/Kconfig index b41e786..295cefe 100644 --- a/arch/arc/plat-arcfpga/Kconfig +++ b/arch/arc/plat-arcfpga/Kconfig @@ -53,7 +53,7 @@ menuconfig ARC_HAS_BVCI_LAT_UNIT bool "BVCI Bus Latency Unit" depends on ARC_BOARD_ML509 || ARC_BOARD_ANGEL4 help - IP to add artifical latency to BVCI Bus Based FPGA builds. + IP to add artificial latency to BVCI Bus Based FPGA builds. The default latency (even worst case) for FPGA is non-realistic (~10 SDRAM, ~5 SSRAM). diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5b71469..006f983 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -15,6 +15,7 @@ config ARM select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_SMP_IDLE_THREAD + select GENERIC_IDLE_POLL_SETUP select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select HARDIRQS_SW_RESEND @@ -49,7 +50,6 @@ config ARM select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS select HAVE_UID16 - select HAVE_VIRT_TO_BUS select KTIME_SCALAR select PERF_USE_VMALLOC select RTC_LIB @@ -550,13 +550,14 @@ config ARCH_IXP4XX select GENERIC_CLOCKEVENTS select MIGHT_HAVE_PCI select NEED_MACH_IO_H + select USB_EHCI_BIG_ENDIAN_MMIO + select USB_EHCI_BIG_ENDIAN_DESC help Support for Intel's IXP4XX (XScale) family of processors. config ARCH_DOVE bool "Marvell Dove" select ARCH_REQUIRE_GPIOLIB - select COMMON_CLK_DOVE select CPU_V7 select GENERIC_CLOCKEVENTS select MIGHT_HAVE_PCI @@ -744,6 +745,7 @@ config ARCH_RPC select NEED_MACH_IO_H select NEED_MACH_MEMORY_H select NO_IOPORT + select VIRT_TO_BUS help On the Acorn Risc-PC, Linux can support the internal IDE disk and CD-ROM interface, serial and parallel port, and the floppy drive. @@ -879,6 +881,7 @@ config ARCH_SHARK select ISA_DMA select NEED_MACH_MEMORY_H select PCI + select VIRT_TO_BUS select ZONE_DMA help Support for the StrongARM based Digital DNARD machine, also known @@ -1006,12 +1009,12 @@ config ARCH_MULTI_V4_V5 bool config ARCH_MULTI_V6 - bool "ARMv6 based platforms (ARM11, Scorpion, ...)" + bool "ARMv6 based platforms (ARM11)" select ARCH_MULTI_V6_V7 select CPU_V6 config ARCH_MULTI_V7 - bool "ARMv7 based platforms (Cortex-A, PJ4, Krait)" + bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)" default y select ARCH_MULTI_V6_V7 select ARCH_VEXPRESS @@ -1183,9 +1186,9 @@ config ARM_NR_BANKS default 8 config IWMMXT - bool "Enable iWMMXt support" + bool "Enable iWMMXt support" if !CPU_PJ4 depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 - default y if PXA27x || PXA3xx || ARCH_MMP + default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 help Enable support for iWMMXt context switching at run time if running on a CPU that supports it. @@ -1439,6 +1442,16 @@ config ARM_ERRATA_775420 to deadlock. This workaround puts DSB before executing ISB if an abort may occur on cache maintenance. +config ARM_ERRATA_798181 + bool "ARM errata: TLBI/DSB failure on Cortex-A15" + depends on CPU_V7 && SMP + help + On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not + adequately shooting down all use of the old entries. This + option enables the Linux kernel workaround for this erratum + which sends an IPI to the CPUs that are running the same ASID + as the one being invalidated. + endmenu source "arch/arm/common/Kconfig" @@ -1462,10 +1475,6 @@ config ISA_DMA bool select ISA_DMA_API -config ARCH_NO_VIRT_TO_BUS - def_bool y - depends on !ARCH_RPC && !ARCH_NETWINDER && !ARCH_SHARK - # Select ISA DMA interface config ISA_DMA_API bool @@ -1657,13 +1666,16 @@ config LOCAL_TIMERS accounting to be spread across the timer interval, preventing a "thundering herd" at every timer tick. +# The GPIO number here must be sorted by descending number. In case of +# a multiplatform kernel, we just want the highest value required by the +# selected platforms. config ARCH_NR_GPIO int default 1024 if ARCH_SHMOBILE || ARCH_TEGRA - default 355 if ARCH_U8500 - default 264 if MACH_H4700 default 512 if SOC_OMAP5 + default 355 if ARCH_U8500 default 288 if ARCH_VT8500 || ARCH_SUNXI + default 264 if MACH_H4700 default 0 help Maximum number of GPIOs in the system. @@ -1887,8 +1899,9 @@ config XEN_DOM0 config XEN bool "Xen guest support on ARM (EXPERIMENTAL)" - depends on ARM && OF + depends on ARM && AEABI && OF depends on CPU_V7 && !CPU_V6 + depends on !GENERIC_ATOMIC64 help Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. @@ -2150,7 +2163,6 @@ endmenu menu "CPU Power Management" if ARCH_HAS_CPUFREQ - source "drivers/cpufreq/Kconfig" config CPU_FREQ_IMX @@ -2160,30 +2172,6 @@ config CPU_FREQ_IMX help This enables the CPUfreq driver for i.MX CPUs. -config CPU_FREQ_SA1100 - bool - -config CPU_FREQ_SA1110 - bool - -config CPU_FREQ_INTEGRATOR - tristate "CPUfreq driver for ARM Integrator CPUs" - depends on ARCH_INTEGRATOR && CPU_FREQ - default y - help - This enables the CPUfreq driver for ARM Integrator CPUs. - - For details, take a look at <file:Documentation/cpu-freq>. - - If in doubt, say Y. - -config CPU_FREQ_PXA - bool - depends on CPU_FREQ && ARCH_PXA && PXA25x - default y - select CPU_FREQ_DEFAULT_GOV_USERSPACE - select CPU_FREQ_TABLE - config CPU_FREQ_S3C bool help diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index acdddda..9b31f43 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -492,9 +492,10 @@ config DEBUG_IMX_UART_PORT DEBUG_IMX31_UART || \ DEBUG_IMX35_UART || \ DEBUG_IMX51_UART || \ - DEBUG_IMX50_IMX53_UART || \ + DEBUG_IMX53_UART || \ DEBUG_IMX6Q_UART default 1 + depends on ARCH_MXC help Choose UART port on which kernel low-level debug messages should be output. diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index 71768b8..84aa2ca 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -115,4 +115,4 @@ i: $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ $(obj)/Image System.map "$(INSTALL_PATH)" -subdir- := bootp compressed +subdir- := bootp compressed dts diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 5cad8a6..afed28e 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -120,7 +120,7 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS) KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) endif -ccflags-y := -fpic -fno-builtin -I$(obj) +ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) asflags-y := -Wa,-march=all -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts index dd0c57d..3234875 100644 --- a/arch/arm/boot/dts/armada-370-mirabox.dts +++ b/arch/arm/boot/dts/armada-370-mirabox.dts @@ -54,7 +54,7 @@ }; mvsdio@d00d4000 { - pinctrl-0 = <&sdio_pins2>; + pinctrl-0 = <&sdio_pins3>; pinctrl-names = "default"; status = "okay"; /* diff --git a/arch/arm/boot/dts/armada-370-rd.dts b/arch/arm/boot/dts/armada-370-rd.dts index f8e4855..070bba4 100644 --- a/arch/arm/boot/dts/armada-370-rd.dts +++ b/arch/arm/boot/dts/armada-370-rd.dts @@ -64,5 +64,13 @@ status = "okay"; /* No CD or WP GPIOs */ }; + + usb@d0050000 { + status = "okay"; + }; + + usb@d0051000 { + status = "okay"; + }; }; }; diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi index 6f1acc7..5b70820 100644 --- a/arch/arm/boot/dts/armada-370-xp.dtsi +++ b/arch/arm/boot/dts/armada-370-xp.dtsi @@ -31,7 +31,6 @@ mpic: interrupt-controller@d0020000 { compatible = "marvell,mpic"; #interrupt-cells = <1>; - #address-cells = <1>; #size-cells = <1>; interrupt-controller; }; @@ -54,7 +53,7 @@ reg = <0xd0012000 0x100>; reg-shift = <2>; interrupts = <41>; - reg-io-width = <4>; + reg-io-width = <1>; status = "disabled"; }; serial@d0012100 { @@ -62,7 +61,7 @@ reg = <0xd0012100 0x100>; reg-shift = <2>; interrupts = <42>; - reg-io-width = <4>; + reg-io-width = <1>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi index 8188d13..a195deb 100644 --- a/arch/arm/boot/dts/armada-370.dtsi +++ b/arch/arm/boot/dts/armada-370.dtsi @@ -59,6 +59,12 @@ "mpp50", "mpp51", "mpp52"; marvell,function = "sd0"; }; + + sdio_pins3: sdio-pins3 { + marvell,pins = "mpp48", "mpp49", "mpp50", + "mpp51", "mpp52", "mpp53"; + marvell,function = "sd0"; + }; }; gpio0: gpio@d0018100 { diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi index 1443949..ca00d83 100644 --- a/arch/arm/boot/dts/armada-xp.dtsi +++ b/arch/arm/boot/dts/armada-xp.dtsi @@ -46,7 +46,7 @@ reg = <0xd0012200 0x100>; reg-shift = <2>; interrupts = <43>; - reg-io-width = <4>; + reg-io-width = <1>; status = "disabled"; }; serial@d0012300 { @@ -54,7 +54,7 @@ reg = <0xd0012300 0x100>; reg-shift = <2>; interrupts = <44>; - reg-io-width = <4>; + reg-io-width = <1>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi index cb7bcc5..39253b9 100644 --- a/arch/arm/boot/dts/at91sam9260.dtsi +++ b/arch/arm/boot/dts/at91sam9260.dtsi @@ -322,6 +322,24 @@ }; }; + spi0 { + pinctrl_spi0: spi0-0 { + atmel,pins = + <0 0 0x1 0x0 /* PA0 periph A SPI0_MISO pin */ + 0 1 0x1 0x0 /* PA1 periph A SPI0_MOSI pin */ + 0 2 0x1 0x0>; /* PA2 periph A SPI0_SPCK pin */ + }; + }; + + spi1 { + pinctrl_spi1: spi1-0 { + atmel,pins = + <1 0 0x1 0x0 /* PB0 periph A SPI1_MISO pin */ + 1 1 0x1 0x0 /* PB1 periph A SPI1_MOSI pin */ + 1 2 0x1 0x0>; /* PB2 periph A SPI1_SPCK pin */ + }; + }; + pioA: gpio@fffff400 { compatible = "atmel,at91rm9200-gpio"; reg = <0xfffff400 0x200>; @@ -471,6 +489,28 @@ status = "disabled"; }; + spi0: spi@fffc8000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "atmel,at91rm9200-spi"; + reg = <0xfffc8000 0x200>; + interrupts = <12 4 3>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi0>; + status = "disabled"; + }; + + spi1: spi@fffcc000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "atmel,at91rm9200-spi"; + reg = <0xfffcc000 0x200>; + interrupts = <13 4 3>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi1>; + status = "disabled"; + }; + adc0: adc@fffe0000 { compatible = "atmel,at91sam9260-adc"; reg = <0xfffe0000 0x100>; diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index 271d4de..94b58ab 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi @@ -303,6 +303,24 @@ }; }; + spi0 { + pinctrl_spi0: spi0-0 { + atmel,pins = + <0 0 0x2 0x0 /* PA0 periph B SPI0_MISO pin */ + 0 1 0x2 0x0 /* PA1 periph B SPI0_MOSI pin */ + 0 2 0x2 0x0>; /* PA2 periph B SPI0_SPCK pin */ + }; + }; + + spi1 { + pinctrl_spi1: spi1-0 { + atmel,pins = + <1 12 0x1 0x0 /* PB12 periph A SPI1_MISO pin */ + 1 13 0x1 0x0 /* PB13 periph A SPI1_MOSI pin */ + 1 14 0x1 0x0>; /* PB14 periph A SPI1_SPCK pin */ + }; + }; + pioA: gpio@fffff200 { compatible = "atmel,at91rm9200-gpio"; reg = <0xfffff200 0x200>; @@ -462,6 +480,28 @@ reg = <0xfffffd40 0x10>; status = "disabled"; }; + + spi0: spi@fffa4000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "atmel,at91rm9200-spi"; + reg = <0xfffa4000 0x200>; + interrupts = <14 4 3>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi0>; + status = "disabled"; + }; + + spi1: spi@fffa8000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "atmel,at91rm9200-spi"; + reg = <0xfffa8000 0x200>; + interrupts = <15 4 3>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi1>; + status = "disabled"; + }; }; nand0: nand@40000000 { diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts index 1eb0872..a14e424 100644 --- a/arch/arm/boot/dts/at91sam9263ek.dts +++ b/arch/arm/boot/dts/at91sam9263ek.dts @@ -79,6 +79,16 @@ }; }; }; + + spi0: spi@fffa4000 { + status = "okay"; + cs-gpios = <&pioA 5 0>, <0>, <0>, <0>; + mtd_dataflash@0 { + compatible = "atmel,at45", "atmel,dataflash"; + spi-max-frequency = <50000000>; + reg = <0>; + }; + }; }; nand0: nand@40000000 { diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi index da15e83..23d1f46 100644 --- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi +++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi @@ -96,6 +96,16 @@ status = "okay"; pinctrl-0 = <&pinctrl_ssc0_tx>; }; + + spi0: spi@fffc8000 { + status = "okay"; + cs-gpios = <0>, <&pioC 11 0>, <0>, <0>; + mtd_dataflash@0 { + compatible = "atmel,at45", "atmel,dataflash"; + spi-max-frequency = <50000000>; + reg = <1>; + }; + }; }; nand0: nand@40000000 { diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index 6b1d4ca..cfdf429 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -322,6 +322,24 @@ }; }; + spi0 { + pinctrl_spi0: spi0-0 { + atmel,pins = + <1 0 0x1 0x0 /* PB0 periph A SPI0_MISO pin */ + 1 1 0x1 0x0 /* PB1 periph A SPI0_MOSI pin */ + 1 2 0x1 0x0>; /* PB2 periph A SPI0_SPCK pin */ + }; + }; + + spi1 { + pinctrl_spi1: spi1-0 { + atmel,pins = + <1 14 0x1 0x0 /* PB14 periph A SPI1_MISO pin */ + 1 15 0x1 0x0 /* PB15 periph A SPI1_MOSI pin */ + 1 16 0x1 0x0>; /* PB16 periph A SPI1_SPCK pin */ + }; + }; + pioA: gpio@fffff200 { compatible = "atmel,at91rm9200-gpio"; reg = <0xfffff200 0x200>; @@ -531,6 +549,28 @@ reg = <0xfffffd40 0x10>; status = "disabled"; }; + + spi0: spi@fffa4000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "atmel,at91rm9200-spi"; + reg = <0xfffa4000 0x200>; + interrupts = <14 4 3>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi0>; + status = "disabled"; + }; + + spi1: spi@fffa8000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "atmel,at91rm9200-spi"; + reg = <0xfffa8000 0x200>; + interrupts = <15 4 3>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi1>; + status = "disabled"; + }; }; nand0: nand@40000000 { diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts index 20c3191..92c52a7 100644 --- a/arch/arm/boot/dts/at91sam9m10g45ek.dts +++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts @@ -102,6 +102,16 @@ }; }; }; + + spi0: spi@fffa4000{ + status = "okay"; + cs-gpios = <&pioB 3 0>, <0>, <0>, <0>; + mtd_dataflash@0 { + compatible = "atmel,at45", "atmel,dataflash"; + spi-max-frequency = <13000000>; + reg = <0>; + }; + }; }; nand0: nand@40000000 { diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi index 7750f98..b2961f1 100644 --- a/arch/arm/boot/dts/at91sam9n12.dtsi +++ b/arch/arm/boot/dts/at91sam9n12.dtsi @@ -261,6 +261,24 @@ }; }; + spi0 { + pinctrl_spi0: spi0-0 { + atmel,pins = + <0 11 0x1 0x0 /* PA11 periph A SPI0_MISO pin */ + 0 12 0x1 0x0 /* PA12 periph A SPI0_MOSI pin */ + 0 13 0x1 0x0>; /* PA13 periph A SPI0_SPCK pin */ + }; + }; + + spi1 { + pinctrl_spi1: spi1-0 { + atmel,pins = + <0 21 0x2 0x0 /* PA21 periph B SPI1_MISO pin */ + 0 22 0x2 0x0 /* PA22 periph B SPI1_MOSI pin */ + 0 23 0x2 0x0>; /* PA23 periph B SPI1_SPCK pin */ + }; + }; + pioA: gpio@fffff400 { compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio"; reg = <0xfffff400 0x200>; @@ -373,6 +391,28 @@ #size-cells = <0>; status = "disabled"; }; + + spi0: spi@f0000000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "atmel,at91rm9200-spi"; + reg = <0xf0000000 0x100>; + interrupts = <13 4 3>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi0>; + status = "disabled"; + }; + + spi1: spi@f0004000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "atmel,at91rm9200-spi"; + reg = <0xf0004000 0x100>; + interrupts = <14 4 3>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi1>; + status = "disabled"; + }; }; nand0: nand@40000000 { diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index d400f8d..34c842b 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts @@ -67,6 +67,16 @@ }; }; }; + + spi0: spi@f0000000 { + status = "okay"; + cs-gpios = <&pioA 14 0>, <0>, <0>, <0>; + m25p80@0 { + compatible = "atmel,at25df321a"; + spi-max-frequency = <50000000>; + reg = <0>; + }; + }; }; nand0: nand@40000000 { diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index aa98e64..347b438 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi @@ -238,8 +238,32 @@ nand { pinctrl_nand: nand-0 { atmel,pins = - <3 4 0x0 0x1 /* PD5 gpio RDY pin pull_up */ - 3 5 0x0 0x1>; /* PD4 gpio enable pin pull_up */ + <3 0 0x1 0x0 /* PD0 periph A Read Enable */ + 3 1 0x1 0x0 /* PD1 periph A Write Enable */ + 3 2 0x1 0x0 /* PD2 periph A Address Latch Enable */ + 3 3 0x1 0x0 /* PD3 periph A Command Latch Enable */ + 3 4 0x0 0x1 /* PD4 gpio Chip Enable pin pull_up */ + 3 5 0x0 0x1 /* PD5 gpio RDY/BUSY pin pull_up */ + 3 6 0x1 0x0 /* PD6 periph A Data bit 0 */ + 3 7 0x1 0x0 /* PD7 periph A Data bit 1 */ + 3 8 0x1 0x0 /* PD8 periph A Data bit 2 */ + 3 9 0x1 0x0 /* PD9 periph A Data bit 3 */ + 3 10 0x1 0x0 /* PD10 periph A Data bit 4 */ + 3 11 0x1 0x0 /* PD11 periph A Data bit 5 */ + 3 12 0x1 0x0 /* PD12 periph A Data bit 6 */ + 3 13 0x1 0x0>; /* PD13 periph A Data bit 7 */ + }; + + pinctrl_nand_16bits: nand_16bits-0 { + atmel,pins = + <3 14 0x1 0x0 /* PD14 periph A Data bit 8 */ + 3 15 0x1 0x0 /* PD15 periph A Data bit 9 */ + 3 16 0x1 0x0 /* PD16 periph A Data bit 10 */ + 3 17 0x1 0x0 /* PD17 periph A Data bit 11 */ + 3 18 0x1 0x0 /* PD18 periph A Data bit 12 */ + 3 19 0x1 0x0 /* PD19 periph A Data bit 13 */ + 3 20 0x1 0x0 /* PD20 periph A Data bit 14 */ + 3 21 0x1 0x0>; /* PD21 periph A Data bit 15 */ }; }; @@ -319,6 +343,24 @@ }; }; + spi0 { + pinctrl_spi0: spi0-0 { + atmel,pins = + <0 11 0x1 0x0 /* PA11 periph A SPI0_MISO pin */ + 0 12 0x1 0x0 /* PA12 periph A SPI0_MOSI pin */ + 0 13 0x1 0x0>; /* PA13 periph A SPI0_SPCK pin */ + }; + }; + + spi1 { + pinctrl_spi1: spi1-0 { + atmel,pins = + <0 21 0x2 0x0 /* PA21 periph B SPI1_MISO pin */ + 0 22 0x2 0x0 /* PA22 periph B SPI1_MOSI pin */ + 0 23 0x2 0x0>; /* PA23 periph B SPI1_SPCK pin */ + }; + }; + pioA: gpio@fffff400 { compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio"; reg = <0xfffff400 0x200>; @@ -505,6 +547,28 @@ trigger-value = <0x6>; }; }; + + spi0: spi@f0000000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "atmel,at91rm9200-spi"; + reg = <0xf0000000 0x100>; + interrupts = <13 4 3>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi0>; + status = "disabled"; + }; + + spi1: spi@f0004000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "atmel,at91rm9200-spi"; + reg = <0xf0004000 0x100>; + interrupts = <14 4 3>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi1>; + status = "disabled"; + }; }; nand0: nand@40000000 { diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi index 8a7cf1d..09f5e66 100644 --- a/arch/arm/boot/dts/at91sam9x5ek.dtsi +++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi @@ -84,6 +84,16 @@ }; }; }; + + spi0: spi@f0000000 { + status = "okay"; + cs-gpios = <&pioA 14 0>, <0>, <0>, <0>; + m25p80@0 { + compatible = "atmel,at25df321a"; + spi-max-frequency = <50000000>; + reg = <0>; + }; + }; }; usb0: ohci@00600000 { diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi index 4bf2a87..7e0481e 100644 --- a/arch/arm/boot/dts/bcm2835.dtsi +++ b/arch/arm/boot/dts/bcm2835.dtsi @@ -105,7 +105,7 @@ compatible = "fixed-clock"; reg = <1>; #clock-cells = <0>; - clock-frequency = <150000000>; + clock-frequency = <250000000>; }; }; }; diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi index 69140ba..aaa63d0 100644 --- a/arch/arm/boot/dts/dbx5x0.dtsi +++ b/arch/arm/boot/dts/dbx5x0.dtsi @@ -191,8 +191,8 @@ prcmu: prcmu@80157000 { compatible = "stericsson,db8500-prcmu"; - reg = <0x80157000 0x1000>; - reg-names = "prcmu"; + reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>; + reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm"; interrupts = <0 47 0x4>; #address-cells = <1>; #size-cells = <1>; @@ -319,9 +319,8 @@ }; }; - ab8500@5 { + ab8500 { compatible = "stericsson,ab8500"; - reg = <5>; /* mailbox 5 is i2c */ interrupt-parent = <&intc>; interrupts = <0 40 0x4>; interrupt-controller; diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi index 67dbe20..f7509ca 100644 --- a/arch/arm/boot/dts/dove.dtsi +++ b/arch/arm/boot/dts/dove.dtsi @@ -197,6 +197,11 @@ status = "disabled"; }; + rtc@d8500 { + compatible = "marvell,orion-rtc"; + reg = <0xd8500 0x20>; + }; + crypto: crypto@30000 { compatible = "marvell,orion-crypto"; reg = <0x30000 0x10000>, diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index e1347fc..1a62bcf 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi @@ -275,18 +275,27 @@ compatible = "arm,pl330", "arm,primecell"; reg = <0x12680000 0x1000>; interrupts = <0 35 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; pdma1: pdma@12690000 { compatible = "arm,pl330", "arm,primecell"; reg = <0x12690000 0x1000>; interrupts = <0 36 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; mdma1: mdma@12850000 { compatible = "arm,pl330", "arm,primecell"; reg = <0x12850000 0x1000>; interrupts = <0 34 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <1>; }; }; }; diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi index 5f3562a..9a99755 100644 --- a/arch/arm/boot/dts/exynos5440.dtsi +++ b/arch/arm/boot/dts/exynos5440.dtsi @@ -142,12 +142,18 @@ compatible = "arm,pl330", "arm,primecell"; reg = <0x120000 0x1000>; interrupts = <0 34 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; pdma1: pdma@121B0000 { compatible = "arm,pl330", "arm,primecell"; reg = <0x121000 0x1000>; interrupts = <0 35 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; }; diff --git a/arch/arm/boot/dts/href.dtsi b/arch/arm/boot/dts/href.dtsi index 592fb9d..379128e 100644 --- a/arch/arm/boot/dts/href.dtsi +++ b/arch/arm/boot/dts/href.dtsi @@ -221,7 +221,7 @@ }; }; - ab8500@5 { + ab8500 { ab8500-regulators { ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { regulator-name = "V-DISPLAY"; diff --git a/arch/arm/boot/dts/hrefv60plus.dts b/arch/arm/boot/dts/hrefv60plus.dts index 55f4191..2b587a7 100644 --- a/arch/arm/boot/dts/hrefv60plus.dts +++ b/arch/arm/boot/dts/hrefv60plus.dts @@ -158,7 +158,7 @@ }; }; - ab8500@5 { + ab8500 { ab8500-regulators { ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { regulator-name = "V-DISPLAY"; diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts index 6ce3d17..fd36e1c 100644 --- a/arch/arm/boot/dts/imx28-m28evk.dts +++ b/arch/arm/boot/dts/imx28-m28evk.dts @@ -152,7 +152,6 @@ i2c0: i2c@80058000 { pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins_a>; - clock-frequency = <400000>; status = "okay"; sgtl5000: codec@0a { diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts index e6cde8a..6c6a544 100644 --- a/arch/arm/boot/dts/imx28-sps1.dts +++ b/arch/arm/boot/dts/imx28-sps1.dts @@ -70,7 +70,6 @@ i2c0: i2c@80058000 { pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins_a>; - clock-frequency = <400000>; status = "okay"; rtc: rtc@51 { diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts index e54fffd..468c0a1 100644 --- a/arch/arm/boot/dts/imx53-mba53.dts +++ b/arch/arm/boot/dts/imx53-mba53.dts @@ -42,10 +42,9 @@ fsl,pins = <689 0x10000 /* DISP1_DRDY */ 482 0x10000 /* DISP1_HSYNC */ 489 0x10000 /* DISP1_VSYNC */ - 684 0x10000 /* DISP1_DAT_0 */ 515 0x10000 /* DISP1_DAT_22 */ 523 0x10000 /* DISP1_DAT_23 */ - 543 0x10000 /* DISP1_DAT_21 */ + 545 0x10000 /* DISP1_DAT_21 */ 553 0x10000 /* DISP1_DAT_20 */ 558 0x10000 /* DISP1_DAT_19 */ 564 0x10000 /* DISP1_DAT_18 */ diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 06ec460..281a223 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -91,6 +91,7 @@ compatible = "arm,cortex-a9-twd-timer"; reg = <0x00a00600 0x20>; interrupts = <1 13 0xf01>; + clocks = <&clks 15>; }; L2: l2-cache@00a02000 { diff --git a/arch/arm/boot/dts/kirkwood-dns320.dts b/arch/arm/boot/dts/kirkwood-dns320.dts index 5bb0bf3..c9c44b2 100644 --- a/arch/arm/boot/dts/kirkwood-dns320.dts +++ b/arch/arm/boot/dts/kirkwood-dns320.dts @@ -42,12 +42,10 @@ ocp@f1000000 { serial@12000 { - clock-frequency = <166666667>; status = "okay"; }; serial@12100 { - clock-frequency = <166666667>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/kirkwood-dns325.dts b/arch/arm/boot/dts/kirkwood-dns325.dts index d430713..e4e4930 100644 --- a/arch/arm/boot/dts/kirkwood-dns325.dts +++ b/arch/arm/boot/dts/kirkwood-dns325.dts @@ -50,7 +50,6 @@ }; }; serial@12000 { - clock-frequency = <200000000>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/kirkwood-dockstar.dts b/arch/arm/boot/dts/kirkwood-dockstar.dts index 2e3dd34..0196cf6 100644 --- a/arch/arm/boot/dts/kirkwood-dockstar.dts +++ b/arch/arm/boot/dts/kirkwood-dockstar.dts @@ -37,7 +37,6 @@ }; }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-dreamplug.dts b/arch/arm/boot/dts/kirkwood-dreamplug.dts index ef2d8c7..289e51d8 100644 --- a/arch/arm/boot/dts/kirkwood-dreamplug.dts +++ b/arch/arm/boot/dts/kirkwood-dreamplug.dts @@ -38,7 +38,6 @@ }; }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts index 1b133e0..c3573be 100644 --- a/arch/arm/boot/dts/kirkwood-goflexnet.dts +++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts @@ -73,11 +73,11 @@ }; }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; nand@3000000 { + chip-delay = <40>; status = "okay"; partition@0 { diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts index 71902da..5335b1a 100644 --- a/arch/arm/boot/dts/kirkwood-ib62x0.dts +++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts @@ -51,7 +51,6 @@ }; }; serial@12000 { - clock-frequency = <200000000>; status = "okay"; }; diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts index 504f16b..12ccf74 100644 --- a/arch/arm/boot/dts/kirkwood-iconnect.dts +++ b/arch/arm/boot/dts/kirkwood-iconnect.dts @@ -78,7 +78,6 @@ }; }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts index 6cae459..3694e94 100644 --- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts +++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts @@ -96,11 +96,11 @@ marvell,function = "gpio"; }; pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 { - marvell,pins = "mpp44"; + marvell,pins = "mpp46"; marvell,function = "gpio"; }; pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 { - marvell,pins = "mpp45"; + marvell,pins = "mpp47"; marvell,function = "gpio"; }; @@ -115,7 +115,6 @@ }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; @@ -158,14 +157,14 @@ gpios = <&gpio0 16 0>; linux,default-trigger = "default-on"; }; - health_led1 { + rebuild_led { + label = "status:white:rebuild_led"; + gpios = <&gpio1 4 0>; + }; + health_led { label = "status:red:health_led"; gpios = <&gpio1 5 0>; }; - health_led2 { - label = "status:white:health_led"; - gpios = <&gpio1 4 0>; - }; backup_led { label = "status:blue:backup_led"; gpios = <&gpio0 15 0>; diff --git a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts index 8db3123..5bbd054 100644 --- a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts +++ b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts @@ -34,7 +34,6 @@ }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-lschlv2.dts b/arch/arm/boot/dts/kirkwood-lschlv2.dts index 9510c9e..9f55d95 100644 --- a/arch/arm/boot/dts/kirkwood-lschlv2.dts +++ b/arch/arm/boot/dts/kirkwood-lschlv2.dts @@ -13,7 +13,6 @@ ocp@f1000000 { serial@12000 { - clock-frequency = <166666667>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/kirkwood-lsxhl.dts b/arch/arm/boot/dts/kirkwood-lsxhl.dts index 739019c..5c84c11 100644 --- a/arch/arm/boot/dts/kirkwood-lsxhl.dts +++ b/arch/arm/boot/dts/kirkwood-lsxhl.dts @@ -13,7 +13,6 @@ ocp@f1000000 { serial@12000 { - clock-frequency = <200000000>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/kirkwood-mplcec4.dts b/arch/arm/boot/dts/kirkwood-mplcec4.dts index 662dfd8..7588241 100644 --- a/arch/arm/boot/dts/kirkwood-mplcec4.dts +++ b/arch/arm/boot/dts/kirkwood-mplcec4.dts @@ -90,7 +90,6 @@ }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi index e8e7ece..6affd92 100644 --- a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi +++ b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi @@ -23,7 +23,6 @@ }; serial@12000 { - clock-frequency = <166666667>; status = "okay"; }; diff --git a/arch/arm/boot/dts/kirkwood-nsa310.dts b/arch/arm/boot/dts/kirkwood-nsa310.dts index 3a178cf..a7412b9 100644 --- a/arch/arm/boot/dts/kirkwood-nsa310.dts +++ b/arch/arm/boot/dts/kirkwood-nsa310.dts @@ -117,7 +117,6 @@ }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts index ede7fe0d..d27f724 100644 --- a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts +++ b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts @@ -18,12 +18,10 @@ ocp@f1000000 { serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; serial@12100 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-topkick.dts b/arch/arm/boot/dts/kirkwood-topkick.dts index 842ff95..66eb45b 100644 --- a/arch/arm/boot/dts/kirkwood-topkick.dts +++ b/arch/arm/boot/dts/kirkwood-topkick.dts @@ -108,7 +108,6 @@ }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi index 2c738d9..fada7e6 100644 --- a/arch/arm/boot/dts/kirkwood.dtsi +++ b/arch/arm/boot/dts/kirkwood.dtsi @@ -38,6 +38,7 @@ interrupt-controller; #interrupt-cells = <2>; interrupts = <35>, <36>, <37>, <38>; + clocks = <&gate_clk 7>; }; gpio1: gpio@10140 { @@ -49,6 +50,7 @@ interrupt-controller; #interrupt-cells = <2>; interrupts = <39>, <40>, <41>; + clocks = <&gate_clk 7>; }; serial@12000 { @@ -57,7 +59,6 @@ reg-shift = <2>; interrupts = <33>; clocks = <&gate_clk 7>; - /* set clock-frequency in board dts */ status = "disabled"; }; @@ -67,7 +68,6 @@ reg-shift = <2>; interrupts = <34>; clocks = <&gate_clk 7>; - /* set clock-frequency in board dts */ status = "disabled"; }; @@ -75,6 +75,7 @@ compatible = "marvell,kirkwood-rtc", "marvell,orion-rtc"; reg = <0x10300 0x20>; interrupts = <53>; + clocks = <&gate_clk 7>; }; spi@10600 { diff --git a/arch/arm/boot/dts/msm8660-surf.dts b/arch/arm/boot/dts/msm8660-surf.dts index 31f2157..67f8670 100644 --- a/arch/arm/boot/dts/msm8660-surf.dts +++ b/arch/arm/boot/dts/msm8660-surf.dts @@ -38,4 +38,10 @@ <0x19c00000 0x1000>; interrupts = <0 195 0x0>; }; + + qcom,ssbi@500000 { + compatible = "qcom,ssbi"; + reg = <0x500000 0x1000>; + qcom,controller-type = "pmic-arbiter"; + }; }; diff --git a/arch/arm/boot/dts/msm8960-cdp.dts b/arch/arm/boot/dts/msm8960-cdp.dts index 9e621b5..c9b09a8 100644 --- a/arch/arm/boot/dts/msm8960-cdp.dts +++ b/arch/arm/boot/dts/msm8960-cdp.dts @@ -38,4 +38,10 @@ <0x16400000 0x1000>; interrupts = <0 154 0x0>; }; + + qcom,ssbi@500000 { + compatible = "qcom,ssbi"; + reg = <0x500000 0x1000>; + qcom,controller-type = "pmic-arbiter"; + }; }; diff --git a/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts b/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts index 5a3a58b..0077fc8 100644 --- a/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts +++ b/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts @@ -11,7 +11,7 @@ / { model = "LaCie Ethernet Disk mini V2"; - compatible = "lacie,ethernet-disk-mini-v2", "marvell-orion5x-88f5182", "marvell,orion5x"; + compatible = "lacie,ethernet-disk-mini-v2", "marvell,orion5x-88f5182", "marvell,orion5x"; memory { reg = <0x00000000 0x4000000>; /* 64 MB */ diff --git a/arch/arm/boot/dts/orion5x.dtsi b/arch/arm/boot/dts/orion5x.dtsi index 8aad00f..f7bec3b 100644 --- a/arch/arm/boot/dts/orion5x.dtsi +++ b/arch/arm/boot/dts/orion5x.dtsi @@ -13,6 +13,9 @@ compatible = "marvell,orion5x"; interrupt-parent = <&intc>; + aliases { + gpio0 = &gpio0; + }; intc: interrupt-controller { compatible = "marvell,orion-intc", "marvell,intc"; interrupt-controller; @@ -32,7 +35,9 @@ #gpio-cells = <2>; gpio-controller; reg = <0x10100 0x40>; - ngpio = <32>; + ngpios = <32>; + interrupt-controller; + #interrupt-cells = <2>; interrupts = <6>, <7>, <8>, <9>; }; @@ -91,7 +96,7 @@ reg = <0x90000 0x10000>, <0xf2200000 0x800>; reg-names = "regs", "sram"; - interrupts = <22>; + interrupts = <28>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/snowball.dts b/arch/arm/boot/dts/snowball.dts index 27f31a5..d3ec32f 100644 --- a/arch/arm/boot/dts/snowball.dts +++ b/arch/arm/boot/dts/snowball.dts @@ -298,7 +298,7 @@ }; }; - ab8500@5 { + ab8500 { ab8500-regulators { ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { regulator-name = "V-DISPLAY"; diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 936d230..7e8769b 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -75,6 +75,9 @@ compatible = "arm,pl330", "arm,primecell"; reg = <0xffe01000 0x1000>; interrupts = <0 180 4>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; }; diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi index 1513c19..122ae94 100644 --- a/arch/arm/boot/dts/spear1310.dtsi +++ b/arch/arm/boot/dts/spear1310.dtsi @@ -89,7 +89,7 @@ pinmux: pinmux@e0700000 { compatible = "st,spear1310-pinmux"; reg = <0xe0700000 0x1000>; - #gpio-range-cells = <2>; + #gpio-range-cells = <3>; }; apb { @@ -212,7 +212,7 @@ interrupt-controller; gpio-controller; #gpio-cells = <2>; - gpio-ranges = <&pinmux 0 246>; + gpio-ranges = <&pinmux 0 0 246>; status = "disabled"; st-plgpio,ngpio = <246>; diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi index 34da11a..c511c47 100644 --- a/arch/arm/boot/dts/spear1340.dtsi +++ b/arch/arm/boot/dts/spear1340.dtsi @@ -63,7 +63,7 @@ pinmux: pinmux@e0700000 { compatible = "st,spear1340-pinmux"; reg = <0xe0700000 0x1000>; - #gpio-range-cells = <2>; + #gpio-range-cells = <3>; }; pwm: pwm@e0180000 { @@ -127,7 +127,7 @@ interrupt-controller; gpio-controller; #gpio-cells = <2>; - gpio-ranges = <&pinmux 0 252>; + gpio-ranges = <&pinmux 0 0 252>; status = "disabled"; st-plgpio,ngpio = <250>; diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi index ab45b8c..9537208 100644 --- a/arch/arm/boot/dts/spear310.dtsi +++ b/arch/arm/boot/dts/spear310.dtsi @@ -25,7 +25,7 @@ pinmux: pinmux@b4000000 { compatible = "st,spear310-pinmux"; reg = <0xb4000000 0x1000>; - #gpio-range-cells = <2>; + #gpio-range-cells = <3>; }; fsmc: flash@44000000 { @@ -102,7 +102,7 @@ interrupt-controller; gpio-controller; #gpio-cells = <2>; - gpio-ranges = <&pinmux 0 102>; + gpio-ranges = <&pinmux 0 0 102>; status = "disabled"; st-plgpio,ngpio = <102>; diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi index caa5520..ffea342 100644 --- a/arch/arm/boot/dts/spear320.dtsi +++ b/arch/arm/boot/dts/spear320.dtsi @@ -24,7 +24,7 @@ pinmux: pinmux@b3000000 { compatible = "st,spear320-pinmux"; reg = <0xb3000000 0x1000>; - #gpio-range-cells = <2>; + #gpio-range-cells = <3>; }; clcd@90000000 { @@ -130,7 +130,7 @@ interrupt-controller; gpio-controller; #gpio-cells = <2>; - gpio-ranges = <&pinmux 0 102>; + gpio-ranges = <&pinmux 0 0 102>; status = "disabled"; st-plgpio,ngpio = <102>; diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index 9a42893..3d3f64d 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi @@ -118,6 +118,7 @@ compatible = "arm,cortex-a9-twd-timer"; reg = <0x50040600 0x20>; interrupts = <1 13 0x304>; + clocks = <&tegra_car 132>; }; intc: interrupt-controller { @@ -384,7 +385,7 @@ spi@7000d800 { compatible = "nvidia,tegra20-slink"; - reg = <0x7000d480 0x200>; + reg = <0x7000d800 0x200>; interrupts = <0 83 0x04>; nvidia,dma-request-selector = <&apbdma 17>; #address-cells = <1>; diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index 767803e..dbf46c2 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -119,6 +119,7 @@ compatible = "arm,cortex-a9-twd-timer"; reg = <0x50040600 0x20>; interrupts = <1 13 0xf04>; + clocks = <&tegra_car 214>; }; intc: interrupt-controller { @@ -371,7 +372,7 @@ spi@7000d800 { compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink"; - reg = <0x7000d480 0x200>; + reg = <0x7000d800 0x200>; interrupts = <0 83 0x04>; nvidia,dma-request-selector = <&apbdma 17>; #address-cells = <1>; diff --git a/arch/arm/boot/dts/vt8500-bv07.dts b/arch/arm/boot/dts/vt8500-bv07.dts index 567cf4e..877b33a 100644 --- a/arch/arm/boot/dts/vt8500-bv07.dts +++ b/arch/arm/boot/dts/vt8500-bv07.dts @@ -11,26 +11,22 @@ / { model = "Benign BV07 Netbook"; +}; - /* - * Display node is based on Sascha Hauer's patch on dri-devel. - * Added a bpp property to calculate the size of the framebuffer - * until the binding is formalized. - */ - display: display@0 { - modes { - mode0: mode@0 { - hactive = <800>; - vactive = <480>; - hback-porch = <88>; - hfront-porch = <40>; - hsync-len = <0>; - vback-porch = <32>; - vfront-porch = <11>; - vsync-len = <1>; - clock = <0>; /* unused but required */ - bpp = <16>; /* non-standard but required */ - }; +&fb { + bits-per-pixel = <16>; + display-timings { + native-mode = <&timing0>; + timing0: 800x480 { + clock-frequency = <0>; /* unused but required */ + hactive = <800>; + vactive = <480>; + hfront-porch = <40>; + hback-porch = <88>; + hsync-len = <0>; + vback-porch = <32>; + vfront-porch = <11>; + vsync-len = <1>; }; }; }; diff --git a/arch/arm/boot/dts/vt8500.dtsi b/arch/arm/boot/dts/vt8500.dtsi index cf31ced..68c8dc6 100644 --- a/arch/arm/boot/dts/vt8500.dtsi +++ b/arch/arm/boot/dts/vt8500.dtsi @@ -98,12 +98,10 @@ interrupts = <43>; }; - fb@d800e400 { + fb: fb@d8050800 { compatible = "via,vt8500-fb"; reg = <0xd800e400 0x400>; interrupts = <12>; - display = <&display>; - default-mode = <&mode0>; }; ge_rops@d8050400 { diff --git a/arch/arm/boot/dts/wm8505-ref.dts b/arch/arm/boot/dts/wm8505-ref.dts index fd4e248..edd2cec 100644 --- a/arch/arm/boot/dts/wm8505-ref.dts +++ b/arch/arm/boot/dts/wm8505-ref.dts @@ -11,26 +11,22 @@ / { model = "Wondermedia WM8505 Netbook"; +}; - /* - * Display node is based on Sascha Hauer's patch on dri-devel. - * Added a bpp property to calculate the size of the framebuffer - * until the binding is formalized. - */ - display: display@0 { - modes { - mode0: mode@0 { - hactive = <800>; - vactive = <480>; - hback-porch = <88>; - hfront-porch = <40>; - hsync-len = <0>; - vback-porch = <32>; - vfront-porch = <11>; - vsync-len = <1>; - clock = <0>; /* unused but required */ - bpp = <32>; /* non-standard but required */ - }; +&fb { + bits-per-pixel = <32>; + display-timings { + native-mode = <&timing0>; + timing0: 800x480 { + clock-frequency = <0>; /* unused but required */ + hactive = <800>; + vactive = <480>; + hfront-porch = <40>; + hback-porch = <88>; + hsync-len = <0>; + vback-porch = <32>; + vfront-porch = <11>; + vsync-len = <1>; }; }; }; diff --git a/arch/arm/boot/dts/wm8505.dtsi b/arch/arm/boot/dts/wm8505.dtsi index e74a1c0..bcf668d 100644 --- a/arch/arm/boot/dts/wm8505.dtsi +++ b/arch/arm/boot/dts/wm8505.dtsi @@ -128,11 +128,9 @@ interrupts = <0>; }; - fb@d8050800 { + fb: fb@d8050800 { compatible = "wm,wm8505-fb"; reg = <0xd8050800 0x200>; - display = <&display>; - default-mode = <&mode0>; }; ge_rops@d8050400 { diff --git a/arch/arm/boot/dts/wm8650-mid.dts b/arch/arm/boot/dts/wm8650-mid.dts index cefd938..61671a0 100644 --- a/arch/arm/boot/dts/wm8650-mid.dts +++ b/arch/arm/boot/dts/wm8650-mid.dts @@ -11,26 +11,24 @@ / { model = "Wondermedia WM8650-MID Tablet"; +}; + +&fb { + bits-per-pixel = <16>; - /* - * Display node is based on Sascha Hauer's patch on dri-devel. - * Added a bpp property to calculate the size of the framebuffer - * until the binding is formalized. - */ - display: display@0 { - modes { - mode0: mode@0 { - hactive = <800>; - vactive = <480>; - hback-porch = <88>; - hfront-porch = <40>; - hsync-len = <0>; - vback-porch = <32>; - vfront-porch = <11>; - vsync-len = <1>; - clock = <0>; /* unused but required */ - bpp = <16>; /* non-standard but required */ - }; + display-timings { + native-mode = <&timing0>; + timing0: 800x480 { + clock-frequency = <0>; /* unused but required */ + hactive = <800>; + vactive = <480>; + hfront-porch = <40>; + hback-porch = <88>; + hsync-len = <0>; + vback-porch = <32>; + vfront-porch = <11>; + vsync-len = <1>; }; }; }; + diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi index db3c0a1..9313407 100644 --- a/arch/arm/boot/dts/wm8650.dtsi +++ b/arch/arm/boot/dts/wm8650.dtsi @@ -128,11 +128,9 @@ interrupts = <43>; }; - fb@d8050800 { + fb: fb@d8050800 { compatible = "wm,wm8505-fb"; reg = <0xd8050800 0x200>; - display = <&display>; - default-mode = <&mode0>; }; ge_rops@d8050400 { diff --git a/arch/arm/boot/dts/wm8850-w70v2.dts b/arch/arm/boot/dts/wm8850-w70v2.dts index fcc660c..32d2253 100644 --- a/arch/arm/boot/dts/wm8850-w70v2.dts +++ b/arch/arm/boot/dts/wm8850-w70v2.dts @@ -15,28 +15,6 @@ / { model = "Wondermedia WM8850-W70v2 Tablet"; - /* - * Display node is based on Sascha Hauer's patch on dri-devel. - * Added a bpp property to calculate the size of the framebuffer - * until the binding is formalized. - */ - display: display@0 { - modes { - mode0: mode@0 { - hactive = <800>; - vactive = <480>; - hback-porch = <88>; - hfront-porch = <40>; - hsync-len = <0>; - vback-porch = <32>; - vfront-porch = <11>; - vsync-len = <1>; - clock = <0>; /* unused but required */ - bpp = <16>; /* non-standard but required */ - }; - }; - }; - backlight { compatible = "pwm-backlight"; pwms = <&pwm 0 50000 1>; /* duty inverted */ @@ -45,3 +23,21 @@ default-brightness-level = <5>; }; }; + +&fb { + bits-per-pixel = <16>; + display-timings { + native-mode = <&timing0>; + timing0: 800x480 { + clock-frequency = <0>; /* unused but required */ + hactive = <800>; + vactive = <480>; + hfront-porch = <40>; + hback-porch = <88>; + hsync-len = <0>; + vback-porch = <32>; + vfront-porch = <11>; + vsync-len = <1>; + }; + }; +}; diff --git a/arch/arm/boot/dts/wm8850.dtsi b/arch/arm/boot/dts/wm8850.dtsi index e8cbfdc..7149cd1 100644 --- a/arch/arm/boot/dts/wm8850.dtsi +++ b/arch/arm/boot/dts/wm8850.dtsi @@ -135,11 +135,9 @@ }; }; - fb@d8051700 { + fb: fb@d8051700 { compatible = "wm,wm8505-fb"; reg = <0xd8051700 0x200>; - display = <&display>; - default-mode = <&mode0>; }; ge_rops@d8050400 { diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index e36b010..088d6c1 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -188,6 +188,7 @@ CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_MXC=y CONFIG_USB_CHIPIDEA=y CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_PHY=y CONFIG_USB_MXS_PHY=y CONFIG_USB_STORAGE=y CONFIG_MMC=y diff --git a/arch/arm/configs/kirkwood_defconfig b/arch/arm/configs/kirkwood_defconfig index 13482ea..93f3794 100644 --- a/arch/arm/configs/kirkwood_defconfig +++ b/arch/arm/configs/kirkwood_defconfig @@ -56,7 +56,6 @@ CONFIG_AEABI=y CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_KIRKWOOD=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig index 92386b2..afa7249f 100644 --- a/arch/arm/configs/lpc32xx_defconfig +++ b/arch/arm/configs/lpc32xx_defconfig @@ -134,6 +134,7 @@ CONFIG_SND_DEBUG_VERBOSE=y # CONFIG_SND_SPI is not set CONFIG_SND_SOC=y CONFIG_USB=y +CONFIG_USB_PHY=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_STORAGE=y CONFIG_USB_GADGET=y diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig index fbbc5bb..87924d6 100644 --- a/arch/arm/configs/mxs_defconfig +++ b/arch/arm/configs/mxs_defconfig @@ -116,9 +116,11 @@ CONFIG_SND_SOC=y CONFIG_SND_MXS_SOC=y CONFIG_SND_SOC_MXS_SGTL5000=y CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y CONFIG_USB_CHIPIDEA=y CONFIG_USB_CHIPIDEA_HOST=y CONFIG_USB_STORAGE=y +CONFIG_USB_PHY=y CONFIG_USB_MXS_PHY=y CONFIG_MMC=y CONFIG_MMC_MXS=y diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig index 42eab9a..7e0ebb6 100644 --- a/arch/arm/configs/omap1_defconfig +++ b/arch/arm/configs/omap1_defconfig @@ -195,6 +195,7 @@ CONFIG_SND_SOC=y CONFIG_SND_OMAP_SOC=y # CONFIG_USB_HID is not set CONFIG_USB=y +CONFIG_USB_PHY=y CONFIG_USB_DEBUG=y CONFIG_USB_DEVICEFS=y # CONFIG_USB_DEVICE_CLASS is not set diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index b16bae2..bd07864 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -126,6 +126,8 @@ CONFIG_INPUT_MISC=y CONFIG_INPUT_TWL4030_PWRBUTTON=y CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=32 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index 720799f..dff714d 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -24,7 +24,7 @@ extern struct arm_delay_ops { void (*delay)(unsigned long); void (*const_udelay)(unsigned long); void (*udelay)(unsigned long); - bool const_clock; + unsigned long ticks_per_jiffy; } arm_delay_ops; #define __delay(n) arm_delay_ops.delay(n) diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index cca9f15..ea289e1 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -19,14 +19,6 @@ #undef _CACHE #undef MULTI_CACHE -#if defined(CONFIG_CPU_CACHE_V3) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v3 -# endif -#endif - #if defined(CONFIG_CPU_CACHE_V4) # ifdef _CACHE # define MULTI_CACHE 1 diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 02fe2fb..ed94b1a 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void); * IOP3XX processor registers */ #define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000 -#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 +#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000 #define IOP3XX_PERIPHERAL_SIZE 0x00002000 #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\ IOP3XX_PERIPHERAL_SIZE - 1) diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 8c5e828..91b99ab 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page); #endif #endif +/* + * Needed to be able to broadcast the TLB invalidation for kmap. + */ +#ifdef CONFIG_ARM_ERRATA_798181 +#undef ARCH_NEEDS_KMAP_HIGH_GET +#endif + #ifdef ARCH_NEEDS_KMAP_HIGH_GET extern void *kmap_high_get(struct page *page); #else diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 9f77e78..e3d5554 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -5,15 +5,15 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID - u64 id; + atomic64_t id; #endif - unsigned int vmalloc_seq; + unsigned int vmalloc_seq; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID #define ASID_BITS 8 #define ASID_MASK ((~0ULL) << ASID_BITS) -#define ASID(mm) ((mm)->context.id & ~ASID_MASK) +#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK) #else #define ASID(mm) (0) #endif @@ -26,7 +26,7 @@ typedef struct { * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com> */ typedef struct { - unsigned long end_brk; + unsigned long end_brk; } mm_context_t; #endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index e1f644b..a7b85e0 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -25,7 +25,9 @@ void __check_vmalloc_seq(struct mm_struct *mm); #ifdef CONFIG_CPU_HAS_ASID void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); -#define init_new_context(tsk,mm) ({ mm->context.id = 0; }) +#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) + +DECLARE_PER_CPU(atomic64_t, active_asids); #else /* !CONFIG_CPU_HAS_ASID */ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 6ef8afd..86b8fe3 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -111,7 +111,7 @@ #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ +#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ /* * Hyp-mode PL2 PTE definitions for LPAE. diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 80d6fc4..9bcd262 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -61,6 +61,15 @@ extern void __pgd_error(const char *file, int line, pgd_t); #define FIRST_USER_ADDRESS PAGE_SIZE /* + * Use TASK_SIZE as the ceiling argument for free_pgtables() and + * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd + * page shared between user and kernel). + */ +#ifdef CONFIG_ARM_LPAE +#define USER_PGTABLES_CEILING TASK_SIZE +#endif + +/* * The pgprot_* and protection_map entries will be fixed up in runtime * to include the cachable and bufferable bits based on memory policy, * as well as any architecture dependent bits like global/ASID and SMP diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h index 5a85f14..21a23e3 100644 --- a/arch/arm/include/asm/system_misc.h +++ b/arch/arm/include/asm/system_misc.h @@ -21,9 +21,6 @@ extern void (*arm_pm_idle)(void); extern unsigned int user_debug; -extern void disable_hlt(void); -extern void enable_hlt(void); - #endif /* !__ASSEMBLY__ */ #endif /* __ASM_ARM_SYSTEM_MISC_H */ diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 6e924d3..ab865e6 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -14,7 +14,6 @@ #include <asm/glue.h> -#define TLB_V3_PAGE (1 << 0) #define TLB_V4_U_PAGE (1 << 1) #define TLB_V4_D_PAGE (1 << 2) #define TLB_V4_I_PAGE (1 << 3) @@ -22,7 +21,6 @@ #define TLB_V6_D_PAGE (1 << 5) #define TLB_V6_I_PAGE (1 << 6) -#define TLB_V3_FULL (1 << 8) #define TLB_V4_U_FULL (1 << 9) #define TLB_V4_D_FULL (1 << 10) #define TLB_V4_I_FULL (1 << 11) @@ -34,10 +32,13 @@ #define TLB_V6_D_ASID (1 << 17) #define TLB_V6_I_ASID (1 << 18) +#define TLB_V6_BP (1 << 19) + /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ -#define TLB_V7_UIS_PAGE (1 << 19) -#define TLB_V7_UIS_FULL (1 << 20) -#define TLB_V7_UIS_ASID (1 << 21) +#define TLB_V7_UIS_PAGE (1 << 20) +#define TLB_V7_UIS_FULL (1 << 21) +#define TLB_V7_UIS_ASID (1 << 22) +#define TLB_V7_UIS_BP (1 << 23) #define TLB_BARRIER (1 << 28) #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ @@ -49,7 +50,6 @@ * ============= * * We have the following to choose from: - * v3 - ARMv3 * v4 - ARMv4 without write buffer * v4wb - ARMv4 with write buffer without I TLB flush entry instruction * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction @@ -150,7 +150,8 @@ #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ TLB_V6_I_FULL | TLB_V6_D_FULL | \ TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ - TLB_V6_I_ASID | TLB_V6_D_ASID) + TLB_V6_I_ASID | TLB_V6_D_ASID | \ + TLB_V6_BP) #ifdef CONFIG_CPU_TLB_V6 # define v6wbi_possible_flags v6wbi_tlb_flags @@ -166,9 +167,11 @@ #endif #define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ - TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) + TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ + TLB_V7_UIS_ASID | TLB_V7_UIS_BP) #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ - TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) + TLB_V6_U_FULL | TLB_V6_U_PAGE | \ + TLB_V6_U_ASID | TLB_V6_BP) #ifdef CONFIG_CPU_TLB_V7 @@ -324,7 +327,6 @@ static inline void local_flush_tlb_all(void) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); @@ -345,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_WB)) dsb(); - if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { + if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { - tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); @@ -379,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) if (tlb_flag(TLB_WB)) dsb(); - if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && + if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr); tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); @@ -412,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr); tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); @@ -430,6 +429,35 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) } } +static inline void local_flush_bp_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_V7_UIS_BP)) + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); + else if (tlb_flag(TLB_V6_BP)) + asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); + + if (tlb_flag(TLB_BARRIER)) + isb(); +} + +#ifdef CONFIG_ARM_ERRATA_798181 +static inline void dummy_flush_tlb_a15_erratum(void) +{ + /* + * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. + */ + asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); + dsb(); +} +#else +static inline void dummy_flush_tlb_a15_erratum(void) +{ +} +#endif + /* * flush_pmd_entry * @@ -480,6 +508,7 @@ static inline void clean_pmd_entry(void *pmd) #define flush_tlb_kernel_page local_flush_tlb_kernel_page #define flush_tlb_range local_flush_tlb_range #define flush_tlb_kernel_range local_flush_tlb_kernel_range +#define flush_bp_all local_flush_bp_all #else extern void flush_tlb_all(void); extern void flush_tlb_mm(struct mm_struct *mm); @@ -487,6 +516,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); extern void flush_tlb_kernel_page(unsigned long kaddr); extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void flush_bp_all(void); #endif /* diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h index 5c27696..8b1f37b 100644 --- a/arch/arm/include/asm/xen/events.h +++ b/arch/arm/include/asm/xen/events.h @@ -2,6 +2,7 @@ #define _ASM_ARM_XEN_EVENTS_H #include <asm/ptrace.h> +#include <asm/atomic.h> enum ipi_vector { XEN_PLACEHOLDER_VECTOR, @@ -15,26 +16,8 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) return raw_irqs_disabled_flags(regs->ARM_cpsr); } -/* - * We cannot use xchg because it does not support 8-byte - * values. However it is safe to use {ldr,dtd}exd directly because all - * platforms which Xen can run on support those instructions. - */ -static inline xen_ulong_t xchg_xen_ulong(xen_ulong_t *ptr, xen_ulong_t val) -{ - xen_ulong_t oldval; - unsigned int tmp; - - wmb(); - asm volatile("@ xchg_xen_ulong\n" - "1: ldrexd %0, %H0, [%3]\n" - " strexd %1, %2, %H2, [%3]\n" - " teq %1, #0\n" - " bne 1b" - : "=&r" (oldval), "=&r" (tmp) - : "r" (val), "r" (ptr) - : "memory", "cc"); - return oldval; -} +#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \ + atomic64_t, \ + counter), (val)) #endif /* _ASM_ARM_XEN_EVENTS_H */ diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 4da7cde..af33b44 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -404,7 +404,7 @@ #define __NR_setns (__NR_SYSCALL_BASE+375) #define __NR_process_vm_readv (__NR_SYSCALL_BASE+376) #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) - /* 378 for kcmp */ +#define __NR_kcmp (__NR_SYSCALL_BASE+378) #define __NR_finit_module (__NR_SYSCALL_BASE+379) /* diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 5ce738b..923eec7 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -110,7 +110,7 @@ int main(void) BLANK(); #endif #ifdef CONFIG_CPU_HAS_ASID - DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); + DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter)); BLANK(); #endif DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 0cc5761..c6ca7e3 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -387,7 +387,7 @@ /* 375 */ CALL(sys_setns) CALL(sys_process_vm_readv) CALL(sys_process_vm_writev) - CALL(sys_ni_syscall) /* reserved for sys_kcmp */ + CALL(sys_kcmp) CALL(sys_finit_module) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls diff --git a/arch/arm/kernel/early_printk.c b/arch/arm/kernel/early_printk.c index 85aa2b2..4307653 100644 --- a/arch/arm/kernel/early_printk.c +++ b/arch/arm/kernel/early_printk.c @@ -29,28 +29,17 @@ static void early_console_write(struct console *con, const char *s, unsigned n) early_write(s, n); } -static struct console early_console = { +static struct console early_console_dev = { .name = "earlycon", .write = early_console_write, .flags = CON_PRINTBUFFER | CON_BOOT, .index = -1, }; -asmlinkage void early_printk(const char *fmt, ...) -{ - char buf[512]; - int n; - va_list ap; - - va_start(ap, fmt); - n = vscnprintf(buf, sizeof(buf), fmt, ap); - early_write(buf, n); - va_end(ap); -} - static int __init setup_early_printk(char *buf) { - register_console(&early_console); + early_console = &early_console_dev; + register_console(&early_console_dev); return 0; } diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 3248cde..fefd7f9 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old) */ .macro mcount_enter +/* + * This pad compensates for the push {lr} at the call site. Note that we are + * unable to unwind through a function which does not otherwise save its lr. + */ + UNWIND(.pad #4) stmdb sp!, {r0-r3, lr} + UNWIND(.save {r0-r3, lr}) .endm .macro mcount_get_lr reg @@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old) .endm ENTRY(__gnu_mcount_nc) +UNWIND(.fnstart) #ifdef CONFIG_DYNAMIC_FTRACE mov ip, lr ldmia sp!, {lr} @@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc) #else __mcount #endif +UNWIND(.fnend) ENDPROC(__gnu_mcount_nc) #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(ftrace_caller) +UNWIND(.fnstart) __ftrace_caller +UNWIND(.fnend) ENDPROC(ftrace_caller) #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) +UNWIND(.fnstart) __ftrace_graph_caller +UNWIND(.fnend) ENDPROC(ftrace_graph_caller) #endif diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c index 9b6de8c..8ff0ecd 100644 --- a/arch/arm/kernel/etm.c +++ b/arch/arm/kernel/etm.c @@ -254,7 +254,7 @@ static void sysrq_etm_dump(int key) static struct sysrq_key_op sysrq_etm_op = { .handler = sysrq_etm_dump, - .help_msg = "ETM buffer dump", + .help_msg = "etm-buffer-dump(v)", .action_msg = "etm", }; diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 486a15a..8bac553 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -184,13 +184,22 @@ __create_page_tables: orr r3, r3, #3 @ PGD block type mov r6, #4 @ PTRS_PER_PGD mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER -1: str r3, [r0], #4 @ set bottom PGD entry bits +1: +#ifdef CONFIG_CPU_ENDIAN_BE8 str r7, [r0], #4 @ set top PGD entry bits + str r3, [r0], #4 @ set bottom PGD entry bits +#else + str r3, [r0], #4 @ set bottom PGD entry bits + str r7, [r0], #4 @ set top PGD entry bits +#endif add r3, r3, #0x1000 @ next PMD table subs r6, r6, #1 bne 1b add r4, r4, #0x1000 @ point to the PMD tables +#ifdef CONFIG_CPU_ENDIAN_BE8 + add r4, r4, #4 @ we only write the bottom word +#endif #endif ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags @@ -258,6 +267,11 @@ __create_page_tables: addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] +#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) + sub r4, r4, #4 @ Fixup page table pointer + @ for 64-bit descriptors +#endif + #ifdef CONFIG_DEBUG_LL #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) /* @@ -276,13 +290,17 @@ __create_page_tables: orr r3, r7, r3, lsl #SECTION_SHIFT #ifdef CONFIG_ARM_LPAE mov r7, #1 << (54 - 32) @ XN +#ifdef CONFIG_CPU_ENDIAN_BE8 + str r7, [r0], #4 + str r3, [r0], #4 #else - orr r3, r3, #PMD_SECT_XN -#endif str r3, [r0], #4 -#ifdef CONFIG_ARM_LPAE str r7, [r0], #4 #endif +#else + orr r3, r3, #PMD_SECT_XN + str r3, [r0], #4 +#endif #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */ /* we don't need any serial debugging mappings */ diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 5eae53e..1fd749e 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused) } if (err) { - pr_warning("CPU %d debug is powered down!\n", cpu); + pr_warn_once("CPU %d debug is powered down!\n", cpu); cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); return; } @@ -987,7 +987,7 @@ clear_vcr: isb(); if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { - pr_warning("CPU %d failed to disable vector catch\n", cpu); + pr_warn_once("CPU %d failed to disable vector catch\n", cpu); return; } @@ -1007,7 +1007,7 @@ clear_vcr: } if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { - pr_warning("CPU %d failed to clear debug register pairs\n", cpu); + pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu); return; } @@ -1023,7 +1023,7 @@ out_mdbgen: static int __cpuinit dbg_reset_notify(struct notifier_block *self, unsigned long action, void *cpu) { - if (action == CPU_ONLINE) + if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); return NOTIFY_OK; @@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { +static struct notifier_block dbg_cpu_pm_nb = { .notifier_call = dbg_cpu_pm_notify, }; diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 31e0eb3..8c3094d 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events, struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu *leader_pmu = event->group_leader->pmu; - if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; return armpmu->get_event_idx(hw_events, event) >= 0; @@ -400,7 +403,7 @@ __hw_perf_event_init(struct perf_event *event) } if (event->group_leader != event) { - if (validate_group(event) != 0); + if (validate_group(event) != 0) return -EINVAL; } @@ -484,7 +487,7 @@ const struct dev_pm_ops armpmu_dev_pm_ops = { SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL) }; -static void __init armpmu_init(struct arm_pmu *armpmu) +static void armpmu_init(struct arm_pmu *armpmu) { atomic_set(&armpmu->active_events, 0); mutex_init(&armpmu->reserve_mutex); diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 8c79a9e..039cffb 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -774,7 +774,7 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] /* * PMXEVTYPER: Event selection reg */ -#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ +#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */ #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ /* diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 047d3e4..ae58d3b 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -57,38 +57,6 @@ static const char *isa_modes[] = { "ARM" , "Thumb" , "Jazelle", "ThumbEE" }; -static volatile int hlt_counter; - -void disable_hlt(void) -{ - hlt_counter++; -} - -EXPORT_SYMBOL(disable_hlt); - -void enable_hlt(void) -{ - hlt_counter--; - BUG_ON(hlt_counter < 0); -} - -EXPORT_SYMBOL(enable_hlt); - -static int __init nohlt_setup(char *__unused) -{ - hlt_counter = 1; - return 1; -} - -static int __init hlt_setup(char *__unused) -{ - hlt_counter = 0; - return 1; -} - -__setup("nohlt", nohlt_setup); -__setup("hlt", hlt_setup); - extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); typedef void (*phys_reset_t)(unsigned long); @@ -172,54 +140,38 @@ static void default_idle(void) local_irq_enable(); } -/* - * The idle thread. - * We always respect 'hlt_counter' to prevent low power idle. - */ -void cpu_idle(void) +void arch_cpu_idle_prepare(void) { local_fiq_enable(); +} - /* endless idle loop with no priority at all */ - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - ledtrig_cpu(CPU_LED_IDLE_START); - while (!need_resched()) { -#ifdef CONFIG_HOTPLUG_CPU - if (cpu_is_offline(smp_processor_id())) - cpu_die(); +void arch_cpu_idle_enter(void) +{ + ledtrig_cpu(CPU_LED_IDLE_START); +#ifdef CONFIG_PL310_ERRATA_769419 + wmb(); #endif +} - /* - * We need to disable interrupts here - * to ensure we don't miss a wakeup call. - */ - local_irq_disable(); -#ifdef CONFIG_PL310_ERRATA_769419 - wmb(); +void arch_cpu_idle_exit(void) +{ + ledtrig_cpu(CPU_LED_IDLE_END); +} + +#ifdef CONFIG_HOTPLUG_CPU +void arch_cpu_idle_dead(void) +{ + cpu_die(); +} #endif - if (hlt_counter) { - local_irq_enable(); - cpu_relax(); - } else if (!need_resched()) { - stop_critical_timings(); - if (cpuidle_idle_call()) - default_idle(); - start_critical_timings(); - /* - * default_idle functions must always - * return with IRQs enabled. - */ - WARN_ON(irqs_disabled()); - } else - local_irq_enable(); - } - ledtrig_cpu(CPU_LED_IDLE_END); - rcu_idle_exit(); - tick_nohz_idle_exit(); - schedule_preempt_disabled(); - } + +/* + * Called from the core idle loop. + */ +void arch_cpu_idle(void) +{ + if (cpuidle_idle_call()) + default_idle(); } static char reboot_mode = 'h'; @@ -273,11 +225,8 @@ void __show_regs(struct pt_regs *regs) unsigned long flags; char buf[64]; - printk("CPU: %d %s (%s %.*s)\n", - raw_smp_processor_id(), print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); + show_regs_print_info(KERN_DEFAULT); + print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("LR is at %s\n", regs->ARM_lr); printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" @@ -332,7 +281,6 @@ void __show_regs(struct pt_regs *regs) void show_regs(struct pt_regs * regs) { printk("\n"); - printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm); __show_regs(regs); dump_stack(); } diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index bd6f56b..59d2adb 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void) static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; -static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) +static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) { return (cyc * mult) >> shift; } -static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) +static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) { u64 epoch_ns; u32 epoch_cyc; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 3f6cbb2..234e339 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -56,7 +56,6 @@ #include <asm/virt.h> #include "atags.h" -#include "tcm.h" #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) @@ -353,6 +352,23 @@ void __init early_print(const char *str, ...) printk("%s", buf); } +static void __init cpuid_init_hwcaps(void) +{ + unsigned int divide_instrs; + + if (cpu_architecture() < CPU_ARCH_ARMv7) + return; + + divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; + + switch (divide_instrs) { + case 2: + elf_hwcap |= HWCAP_IDIVA; + case 1: + elf_hwcap |= HWCAP_IDIVT; + } +} + static void __init feat_v6_fixup(void) { int id = read_cpuid_id(); @@ -483,8 +499,11 @@ static void __init setup_processor(void) snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", list->elf_name, ENDIANNESS); elf_hwcap = list->elf_hwcap; + + cpuid_init_hwcaps(); + #ifndef CONFIG_ARM_THUMB - elf_hwcap &= ~HWCAP_THUMB; + elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); #endif feat_v6_fixup(); @@ -524,7 +543,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size) size -= start & ~PAGE_MASK; bank->start = PAGE_ALIGN(start); -#ifndef CONFIG_LPAE +#ifndef CONFIG_ARM_LPAE if (bank->start + size < bank->start) { printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " "32-bit physical address space\n", (long long)start); @@ -778,8 +797,6 @@ void __init setup_arch(char **cmdline_p) reserve_crashkernel(); - tcm_init(); - #ifdef CONFIG_MULTI_IRQ_HANDLER handle_arch_irq = mdesc->handle_irq; #endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 1bdfd87..4619177 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -285,6 +285,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) * switch away from it before attempting any exclusive accesses. */ cpu_switch_mm(mm->pgd, mm); + local_flush_bp_all(); enter_lazy_tlb(mm, current); local_flush_tlb_all(); @@ -335,7 +336,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) /* * OK, it's off to the idle thread for us */ - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } void __init smp_cpus_done(unsigned int max_cpus) @@ -479,7 +480,7 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) evt->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_DUMMY; - evt->rating = 400; + evt->rating = 100; evt->mult = 1; evt->set_mode = broadcast_timer_set_mode; @@ -672,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb, if (freq->flags & CPUFREQ_CONST_LOOPS) return NOTIFY_OK; - if (arm_delay_ops.const_clock) - return NOTIFY_OK; - if (!per_cpu(l_p_j_ref, cpu)) { per_cpu(l_p_j_ref, cpu) = per_cpu(cpu_data, cpu).loops_per_jiffy; diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index 02c5d2c..e82e1d2 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -12,6 +12,7 @@ #include <asm/smp_plat.h> #include <asm/tlbflush.h> +#include <asm/mmu_context.h> /**********************************************************************/ @@ -64,12 +65,77 @@ static inline void ipi_flush_tlb_kernel_range(void *arg) local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); } +static inline void ipi_flush_bp_all(void *ignored) +{ + local_flush_bp_all(); +} + +#ifdef CONFIG_ARM_ERRATA_798181 +static int erratum_a15_798181(void) +{ + unsigned int midr = read_cpuid_id(); + + /* Cortex-A15 r0p0..r3p2 affected */ + if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) + return 0; + return 1; +} +#else +static int erratum_a15_798181(void) +{ + return 0; +} +#endif + +static void ipi_flush_tlb_a15_erratum(void *arg) +{ + dmb(); +} + +static void broadcast_tlb_a15_erratum(void) +{ + if (!erratum_a15_798181()) + return; + + dummy_flush_tlb_a15_erratum(); + smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum, + NULL, 1); +} + +static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) +{ + int cpu; + cpumask_t mask = { CPU_BITS_NONE }; + + if (!erratum_a15_798181()) + return; + + dummy_flush_tlb_a15_erratum(); + for_each_online_cpu(cpu) { + if (cpu == smp_processor_id()) + continue; + /* + * We only need to send an IPI if the other CPUs are running + * the same ASID as the one being invalidated. There is no + * need for locking around the active_asids check since the + * switch_mm() function has at least one dmb() (as required by + * this workaround) in case a context switch happens on + * another CPU after the condition below. + */ + if (atomic64_read(&mm->context.id) == + atomic64_read(&per_cpu(active_asids, cpu))) + cpumask_set_cpu(cpu, &mask); + } + smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); +} + void flush_tlb_all(void) { if (tlb_ops_need_broadcast()) on_each_cpu(ipi_flush_tlb_all, NULL, 1); else local_flush_tlb_all(); + broadcast_tlb_a15_erratum(); } void flush_tlb_mm(struct mm_struct *mm) @@ -78,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm) on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); else local_flush_tlb_mm(mm); + broadcast_tlb_mm_a15_erratum(mm); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) @@ -90,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) &ta, 1); } else local_flush_tlb_page(vma, uaddr); + broadcast_tlb_mm_a15_erratum(vma->vm_mm); } void flush_tlb_kernel_page(unsigned long kaddr) @@ -100,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr) on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); } else local_flush_tlb_kernel_page(kaddr); + broadcast_tlb_a15_erratum(); } void flush_tlb_range(struct vm_area_struct *vma, @@ -114,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma, &ta, 1); } else local_flush_tlb_range(vma, start, end); + broadcast_tlb_mm_a15_erratum(vma->vm_mm); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) @@ -125,5 +195,13 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); } else local_flush_tlb_kernel_range(start, end); + broadcast_tlb_a15_erratum(); } +void flush_bp_all(void) +{ + if (tlb_ops_need_broadcast()) + on_each_cpu(ipi_flush_bp_all, NULL, 1); + else + local_flush_bp_all(); +} diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index c092115..3f25650 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -22,6 +22,7 @@ #include <linux/of_irq.h> #include <linux/of_address.h> +#include <asm/smp_plat.h> #include <asm/smp_twd.h> #include <asm/localtimer.h> @@ -373,6 +374,9 @@ void __init twd_local_timer_of_register(void) struct device_node *np; int err; + if (!is_smp() || !setup_max_cpus) + return; + np = of_find_matching_node(NULL, twd_of_match); if (!np) return; diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index 358bca3..c59c97e 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -68,6 +68,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ret = __cpu_suspend(arg, fn); if (ret == 0) { cpu_switch_mm(mm->pgd, mm); + local_flush_bp_all(); local_flush_tlb_all(); } diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 30ae6bb..f50f19e 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -17,7 +17,6 @@ #include <asm/mach/map.h> #include <asm/memory.h> #include <asm/system_info.h> -#include "tcm.h" static struct gen_pool *tcm_pool; static bool dtcm_present; diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 79282eb..f10316b 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -100,7 +100,7 @@ static void __init parse_dt_topology(void) int alloc_size, cpu = 0; alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity); - cpu_capacity = (struct cpu_capacity *)kzalloc(alloc_size, GFP_NOWAIT); + cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT); while ((cn = of_find_node_by_type(cn, "cpu"))) { const u32 *rate, *reg; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 1c08911..18b32e8 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -204,13 +204,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) } #endif -void dump_stack(void) -{ - dump_backtrace(NULL, NULL); -} - -EXPORT_SYMBOL(dump_stack); - void show_stack(struct task_struct *tsk, unsigned long *sp) { dump_backtrace(NULL, tsk); diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 5a93698..842098d 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -201,6 +201,7 @@ int kvm_dev_ioctl_check_extension(long ext) break; case KVM_CAP_ARM_SET_DEVICE_ADDR: r = 1; + break; case KVM_CAP_NR_VCPUS: r = num_online_cpus(); break; @@ -613,7 +614,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || !arm_exit_handlers[hsr_ec]) { - kvm_err("Unkown exception class: %#08lx, " + kvm_err("Unknown exception class: %#08lx, " "hsr: %#08x\n", hsr_ec, (unsigned int)vcpu->arch.hsr); BUG(); diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 4ea9a98..7bed755 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, u32 val; int cpu; - cpu = get_cpu(); - if (!p->is_write) return read_from_write_only(vcpu, p); + cpu = get_cpu(); + cpumask_setall(&vcpu->arch.require_dcache_flush); cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index c9a1731..0e4cfe1 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c @@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) lr, irq, vgic_cpu->vgic_lr[lr]); BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; - - goto out; + return true; } /* Try to use another LR for this interrupt */ @@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) vgic_cpu->vgic_irq_lr_map[irq] = lr; set_bit(lr, vgic_cpu->lr_used); -out: if (!vgic_irq_is_edge(vcpu, irq)) vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; @@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); - /* - * We do not need to take the distributor lock here, since the only - * action we perform is clearing the irq_active_bit for an EOIed - * level interrupt. There is a potential race with - * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we - * check if the interrupt is already active. Two possibilities: - * - * - The queuing is occurring on the same vcpu: cannot happen, - * as we're already in the context of this vcpu, and - * executing the handler - * - The interrupt has been migrated to another vcpu, and we - * ignore this interrupt for this run. Big deal. It is still - * pending though, and will get considered when this vcpu - * exits. - */ if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { /* * Some level interrupts have been EOIed. Clear their @@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) } else { vgic_cpu_irq_clear(vcpu, irq); } + + /* + * Despite being EOIed, the LR may not have + * been marked as empty. + */ + set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); + vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; } } @@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) } /* - * Sync back the VGIC state after a guest run. We do not really touch - * the distributor here (the irq_pending_on_cpu bit is safe to set), - * so there is no need for taking its lock. + * Sync back the VGIC state after a guest run. The distributor lock is + * needed so we don't get preempted in the middle of the state processing. */ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) { @@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) { + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + if (!irqchip_in_kernel(vcpu->kvm)) return; + spin_lock(&dist->lock); __kvm_vgic_sync_hwstate(vcpu); + spin_unlock(&dist->lock); } int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index 6b93f6a..64dbfa5 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c @@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles) static void __timer_const_udelay(unsigned long xloops) { unsigned long long loops = xloops; - loops *= loops_per_jiffy; + loops *= arm_delay_ops.ticks_per_jiffy; __timer_delay(loops >> UDELAY_SHIFT); } @@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer) pr_info("Switching to timer-based delay loop\n"); delay_timer = timer; lpj_fine = timer->freq / HZ; - loops_per_jiffy = lpj_fine; + + /* cpufreq may scale loops_per_jiffy, so keep a private copy */ + arm_delay_ops.ticks_per_jiffy = lpj_fine; arm_delay_ops.delay = __timer_delay; arm_delay_ops.const_udelay = __timer_const_udelay; arm_delay_ops.udelay = __timer_udelay; - arm_delay_ops.const_clock = true; + delay_calibrated = true; } else { pr_info("Ignoring duplicate/late registration of read_current_timer delay\n"); diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index 650d592..94b0650 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S @@ -14,27 +14,15 @@ .text .align 5 - .word 0 - -1: subs r2, r2, #4 @ 1 do we have enough - blt 5f @ 1 bytes to align with? - cmp r3, #2 @ 1 - strltb r1, [r0], #1 @ 1 - strleb r1, [r0], #1 @ 1 - strb r1, [r0], #1 @ 1 - add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) -/* - * The pointer is now aligned and the length is adjusted. Try doing the - * memset again. - */ ENTRY(memset) ands r3, r0, #3 @ 1 unaligned? - bne 1b @ 1 + mov ip, r0 @ preserve r0 as return value + bne 6f @ 1 /* - * we know that the pointer in r0 is aligned to a word boundary. + * we know that the pointer in ip is aligned to a word boundary. */ - orr r1, r1, r1, lsl #8 +1: orr r1, r1, r1, lsl #8 orr r1, r1, r1, lsl #16 mov r3, r1 cmp r2, #16 @@ -43,29 +31,28 @@ ENTRY(memset) #if ! CALGN(1)+0 /* - * We need an extra register for this loop - save the return address and - * use the LR + * We need 2 extra registers for this loop - use r8 and the LR */ - str lr, [sp, #-4]! - mov ip, r1 + stmfd sp!, {r8, lr} + mov r8, r1 mov lr, r1 2: subs r2, r2, #64 - stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time. - stmgeia r0!, {r1, r3, ip, lr} - stmgeia r0!, {r1, r3, ip, lr} - stmgeia r0!, {r1, r3, ip, lr} + stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time. + stmgeia ip!, {r1, r3, r8, lr} + stmgeia ip!, {r1, r3, r8, lr} + stmgeia ip!, {r1, r3, r8, lr} bgt 2b - ldmeqfd sp!, {pc} @ Now <64 bytes to go. + ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go. /* * No need to correct the count; we're only testing bits from now on */ tst r2, #32 - stmneia r0!, {r1, r3, ip, lr} - stmneia r0!, {r1, r3, ip, lr} + stmneia ip!, {r1, r3, r8, lr} + stmneia ip!, {r1, r3, r8, lr} tst r2, #16 - stmneia r0!, {r1, r3, ip, lr} - ldr lr, [sp], #4 + stmneia ip!, {r1, r3, r8, lr} + ldmfd sp!, {r8, lr} #else @@ -74,54 +61,63 @@ ENTRY(memset) * whole cache lines at once. */ - stmfd sp!, {r4-r7, lr} + stmfd sp!, {r4-r8, lr} mov r4, r1 mov r5, r1 mov r6, r1 mov r7, r1 - mov ip, r1 + mov r8, r1 mov lr, r1 cmp r2, #96 - tstgt r0, #31 + tstgt ip, #31 ble 3f - and ip, r0, #31 - rsb ip, ip, #32 - sub r2, r2, ip - movs ip, ip, lsl #(32 - 4) - stmcsia r0!, {r4, r5, r6, r7} - stmmiia r0!, {r4, r5} - tst ip, #(1 << 30) - mov ip, r1 - strne r1, [r0], #4 + and r8, ip, #31 + rsb r8, r8, #32 + sub r2, r2, r8 + movs r8, r8, lsl #(32 - 4) + stmcsia ip!, {r4, r5, r6, r7} + stmmiia ip!, {r4, r5} + tst r8, #(1 << 30) + mov r8, r1 + strne r1, [ip], #4 3: subs r2, r2, #64 - stmgeia r0!, {r1, r3-r7, ip, lr} - stmgeia r0!, {r1, r3-r7, ip, lr} + stmgeia ip!, {r1, r3-r8, lr} + stmgeia ip!, {r1, r3-r8, lr} bgt 3b - ldmeqfd sp!, {r4-r7, pc} + ldmeqfd sp!, {r4-r8, pc} tst r2, #32 - stmneia r0!, {r1, r3-r7, ip, lr} + stmneia ip!, {r1, r3-r8, lr} tst r2, #16 - stmneia r0!, {r4-r7} - ldmfd sp!, {r4-r7, lr} + stmneia ip!, {r4-r7} + ldmfd sp!, {r4-r8, lr} #endif 4: tst r2, #8 - stmneia r0!, {r1, r3} + stmneia ip!, {r1, r3} tst r2, #4 - strne r1, [r0], #4 + strne r1, [ip], #4 /* * When we get here, we've got less than 4 bytes to zero. We * may have an unaligned pointer as well. */ 5: tst r2, #2 - strneb r1, [r0], #1 - strneb r1, [r0], #1 + strneb r1, [ip], #1 + strneb r1, [ip], #1 tst r2, #1 - strneb r1, [r0], #1 + strneb r1, [ip], #1 mov pc, lr + +6: subs r2, r2, #4 @ 1 do we have enough + blt 5b @ 1 bytes to align with? + cmp r3, #2 @ 1 + strltb r1, [ip], #1 @ 1 + strleb r1, [ip], #1 @ 1 + strb r1, [ip], #1 @ 1 + add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) + b 1b ENDPROC(memset) diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c index b67cd53..44199bc 100644 --- a/arch/arm/mach-at91/at91sam9260.c +++ b/arch/arm/mach-at91/at91sam9260.c @@ -232,6 +232,8 @@ static struct clk_lookup periph_clocks_lookups[] = { CLKDEV_CON_DEV_ID("t2_clk", "fffdc000.timer", &tc5_clk), CLKDEV_CON_DEV_ID("hclk", "500000.ohci", &ohci_clk), CLKDEV_CON_DEV_ID("mci_clk", "fffa8000.mmc", &mmc_clk), + CLKDEV_CON_DEV_ID("spi_clk", "fffc8000.spi", &spi0_clk), + CLKDEV_CON_DEV_ID("spi_clk", "fffcc000.spi", &spi1_clk), /* fake hclk clock */ CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk), CLKDEV_CON_ID("pioA", &pioA_clk), diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c index d3addee..2ec5efe 100644 --- a/arch/arm/mach-at91/at91sam9g45.c +++ b/arch/arm/mach-at91/at91sam9g45.c @@ -262,6 +262,8 @@ static struct clk_lookup periph_clocks_lookups[] = { CLKDEV_CON_DEV_ID("mci_clk", "fffd0000.mmc", &mmc1_clk), CLKDEV_CON_DEV_ID(NULL, "fff84000.i2c", &twi0_clk), CLKDEV_CON_DEV_ID(NULL, "fff88000.i2c", &twi1_clk), + CLKDEV_CON_DEV_ID("spi_clk", "fffa4000.spi", &spi0_clk), + CLKDEV_CON_DEV_ID("spi_clk", "fffa8000.spi", &spi1_clk), /* fake hclk clock */ CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &uhphs_clk), CLKDEV_CON_DEV_ID(NULL, "fffff200.gpio", &pioA_clk), diff --git a/arch/arm/mach-at91/at91sam9n12.c b/arch/arm/mach-at91/at91sam9n12.c index 5dfc8fd..ccd0783 100644 --- a/arch/arm/mach-at91/at91sam9n12.c +++ b/arch/arm/mach-at91/at91sam9n12.c @@ -172,6 +172,8 @@ static struct clk_lookup periph_clocks_lookups[] = { CLKDEV_CON_DEV_ID("dma_clk", "ffffec00.dma-controller", &dma_clk), CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk), CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk), + CLKDEV_CON_DEV_ID("spi_clk", "f0000000.spi", &spi0_clk), + CLKDEV_CON_DEV_ID("spi_clk", "f0004000.spi", &spi1_clk), CLKDEV_CON_DEV_ID(NULL, "fffff400.gpio", &pioAB_clk), CLKDEV_CON_DEV_ID(NULL, "fffff600.gpio", &pioAB_clk), CLKDEV_CON_DEV_ID(NULL, "fffff800.gpio", &pioCD_clk), diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c index 44a9a62..a200d8a 100644 --- a/arch/arm/mach-at91/at91sam9x5.c +++ b/arch/arm/mach-at91/at91sam9x5.c @@ -237,6 +237,8 @@ static struct clk_lookup periph_clocks_lookups[] = { CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk), CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk), CLKDEV_CON_DEV_ID(NULL, "f8018000.i2c", &twi2_clk), + CLKDEV_CON_DEV_ID("spi_clk", "f0000000.spi", &spi0_clk), + CLKDEV_CON_DEV_ID("spi_clk", "f0004000.spi", &spi1_clk), CLKDEV_CON_DEV_ID(NULL, "fffff400.gpio", &pioAB_clk), CLKDEV_CON_DEV_ID(NULL, "fffff600.gpio", &pioAB_clk), CLKDEV_CON_DEV_ID(NULL, "fffff800.gpio", &pioCD_clk), diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c index 2ea7059..c20a870 100644 --- a/arch/arm/mach-at91/board-foxg20.c +++ b/arch/arm/mach-at91/board-foxg20.c @@ -176,6 +176,7 @@ static struct w1_gpio_platform_data w1_gpio_pdata = { /* If you choose to use a pin other than PB16 it needs to be 3.3V */ .pin = AT91_PIN_PB16, .is_open_drain = 1, + .ext_pullup_enable_pin = -EINVAL, }; static struct platform_device w1_device = { diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c index a033b8d..869cbec 100644 --- a/arch/arm/mach-at91/board-stamp9g20.c +++ b/arch/arm/mach-at91/board-stamp9g20.c @@ -188,6 +188,7 @@ static struct spi_board_info portuxg20_spi_devices[] = { static struct w1_gpio_platform_data w1_gpio_pdata = { .pin = AT91_PIN_PA29, .is_open_drain = 1, + .ext_pullup_enable_pin = -EINVAL, }; static struct platform_device w1_device = { diff --git a/arch/arm/mach-at91/cpuidle.c b/arch/arm/mach-at91/cpuidle.c index 0c63815..48f1228 100644 --- a/arch/arm/mach-at91/cpuidle.c +++ b/arch/arm/mach-at91/cpuidle.c @@ -27,8 +27,6 @@ #define AT91_MAX_STATES 2 -static DEFINE_PER_CPU(struct cpuidle_device, at91_cpuidle_device); - /* Actual code that puts the SoC in different idle states */ static int at91_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, @@ -47,7 +45,6 @@ static int at91_enter_idle(struct cpuidle_device *dev, static struct cpuidle_driver at91_idle_driver = { .name = "at91_idle", .owner = THIS_MODULE, - .en_core_tk_irqen = 1, .states[0] = ARM_CPUIDLE_WFI_STATE, .states[1] = { .enter = at91_enter_idle, @@ -61,20 +58,9 @@ static struct cpuidle_driver at91_idle_driver = { }; /* Initialize CPU idle by registering the idle states */ -static int at91_init_cpuidle(void) +static int __init at91_init_cpuidle(void) { - struct cpuidle_device *device; - - device = &per_cpu(at91_cpuidle_device, smp_processor_id()); - device->state_count = AT91_MAX_STATES; - - cpuidle_register_driver(&at91_idle_driver); - - if (cpuidle_register_device(device)) { - printk(KERN_ERR "at91_init_cpuidle: Failed registering\n"); - return -EIO; - } - return 0; + return cpuidle_register(&at91_idle_driver, NULL); } device_initcall(at91_init_cpuidle); diff --git a/arch/arm/mach-at91/include/mach/gpio.h b/arch/arm/mach-at91/include/mach/gpio.h index eed465a..5fc2377 100644 --- a/arch/arm/mach-at91/include/mach/gpio.h +++ b/arch/arm/mach-at91/include/mach/gpio.h @@ -209,6 +209,14 @@ extern int at91_get_gpio_value(unsigned pin); extern void at91_gpio_suspend(void); extern void at91_gpio_resume(void); +#ifdef CONFIG_PINCTRL_AT91 +extern void at91_pinctrl_gpio_suspend(void); +extern void at91_pinctrl_gpio_resume(void); +#else +static inline void at91_pinctrl_gpio_suspend(void) {} +static inline void at91_pinctrl_gpio_resume(void) {} +#endif + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c index 8e21026..e0ca591 100644 --- a/arch/arm/mach-at91/irq.c +++ b/arch/arm/mach-at91/irq.c @@ -92,23 +92,21 @@ static int at91_aic_set_wake(struct irq_data *d, unsigned value) void at91_irq_suspend(void) { - int i = 0, bit; + int bit = -1; if (has_aic5()) { /* disable enabled irqs */ - while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { + while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) { at91_aic_write(AT91_AIC5_SSR, bit & AT91_AIC5_INTSEL_MSK); at91_aic_write(AT91_AIC5_IDCR, 1); - i = bit; } /* enable wakeup irqs */ - i = 0; - while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { + bit = -1; + while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) { at91_aic_write(AT91_AIC5_SSR, bit & AT91_AIC5_INTSEL_MSK); at91_aic_write(AT91_AIC5_IECR, 1); - i = bit; } } else { at91_aic_write(AT91_AIC_IDCR, *backups); @@ -118,23 +116,21 @@ void at91_irq_suspend(void) void at91_irq_resume(void) { - int i = 0, bit; + int bit = -1; if (has_aic5()) { /* disable wakeup irqs */ - while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { + while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) { at91_aic_write(AT91_AIC5_SSR, bit & AT91_AIC5_INTSEL_MSK); at91_aic_write(AT91_AIC5_IDCR, 1); - i = bit; } /* enable irqs disabled for suspend */ - i = 0; - while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { + bit = -1; + while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) { at91_aic_write(AT91_AIC5_SSR, bit & AT91_AIC5_INTSEL_MSK); at91_aic_write(AT91_AIC5_IECR, 1); - i = bit; } } else { at91_aic_write(AT91_AIC_IDCR, *wakeups); diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index adb6db8..73f1f25 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -201,7 +201,10 @@ extern u32 at91_slow_clock_sz; static int at91_pm_enter(suspend_state_t state) { - at91_gpio_suspend(); + if (of_have_populated_dt()) + at91_pinctrl_gpio_suspend(); + else + at91_gpio_suspend(); at91_irq_suspend(); pr_debug("AT91: PM - wake mask %08x, pm state %d\n", @@ -286,7 +289,10 @@ static int at91_pm_enter(suspend_state_t state) error: target_state = PM_SUSPEND_ON; at91_irq_resume(); - at91_gpio_resume(); + if (of_have_populated_dt()) + at91_pinctrl_gpio_resume(); + else + at91_gpio_resume(); return 0; } diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index bf02471..f112895 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -6,6 +6,7 @@ config ARCH_BCM select ARM_ERRATA_764369 if SMP select ARM_GIC select CPU_V7 + select CLKSRC_OF select GENERIC_CLOCKEVENTS select GENERIC_TIME select GPIO_BCM diff --git a/arch/arm/mach-bcm/board_bcm.c b/arch/arm/mach-bcm/board_bcm.c index f0f9aba..2595935 100644 --- a/arch/arm/mach-bcm/board_bcm.c +++ b/arch/arm/mach-bcm/board_bcm.c @@ -16,14 +16,11 @@ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/irqchip.h> +#include <linux/clocksource.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> -static void timer_init(void) -{ -} - static void __init board_init(void) { @@ -35,7 +32,7 @@ static const char * const bcm11351_dt_compat[] = { "bcm,bcm11351", NULL, }; DT_MACHINE_START(BCM11351_DT, "Broadcom Application Processor") .init_irq = irqchip_init, - .init_time = timer_init, + .init_time = clocksource_of_init, .init_machine = board_init, .dt_compat = bcm11351_dt_compat, MACHINE_END diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c index e698f26..52e4bb5 100644 --- a/arch/arm/mach-cns3xxx/core.c +++ b/arch/arm/mach-cns3xxx/core.c @@ -22,19 +22,9 @@ static struct map_desc cns3xxx_io_desc[] __initdata = { { - .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT, - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE), - .length = SZ_4K, - .type = MT_DEVICE, - }, { - .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT, - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE), - .length = SZ_4K, - .type = MT_DEVICE, - }, { - .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE), - .length = SZ_4K, + .virtual = CNS3XXX_TC11MP_SCU_BASE_VIRT, + .pfn = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE), + .length = SZ_8K, .type = MT_DEVICE, }, { .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT, diff --git a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h index 191c8e5..b1021aa 100644 --- a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h +++ b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h @@ -94,10 +94,10 @@ #define RTC_INTR_STS_OFFSET 0x34 #define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */ -#define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */ +#define CNS3XXX_MISC_BASE_VIRT 0xFB000000 /* Misc Control */ #define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */ -#define CNS3XXX_PM_BASE_VIRT 0xFFF08000 +#define CNS3XXX_PM_BASE_VIRT 0xFB001000 #define PM_CLK_GATE_OFFSET 0x00 #define PM_SOFT_RST_OFFSET 0x04 @@ -109,7 +109,7 @@ #define PM_PLL_HM_PD_OFFSET 0x1C #define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */ -#define CNS3XXX_UART0_BASE_VIRT 0xFFF09000 +#define CNS3XXX_UART0_BASE_VIRT 0xFB002000 #define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */ #define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000 @@ -130,7 +130,7 @@ #define CNS3XXX_I2S_BASE_VIRT 0xFFF10000 #define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */ -#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800 +#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFB003000 #define TIMER1_COUNTER_OFFSET 0x00 #define TIMER1_AUTO_RELOAD_OFFSET 0x04 @@ -227,16 +227,16 @@ * Testchip peripheral and fpga gic regions */ #define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */ -#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000 +#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFB004000 #define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */ -#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100 +#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100) #define CNS3XXX_TC11MP_TWD_BASE 0x90000600 -#define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600 +#define CNS3XXX_TC11MP_TWD_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600) #define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */ -#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000 +#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000) #define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */ #define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000 diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile index fb5c1aa..dd1ffcc 100644 --- a/arch/arm/mach-davinci/Makefile +++ b/arch/arm/mach-davinci/Makefile @@ -37,7 +37,6 @@ obj-$(CONFIG_MACH_MITYOMAPL138) += board-mityomapl138.o obj-$(CONFIG_MACH_OMAPL138_HAWKBOARD) += board-omapl138-hawk.o # Power Management -obj-$(CONFIG_CPU_FREQ) += cpufreq.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_SUSPEND) += pm.o sleep.o obj-$(CONFIG_HAVE_CLK) += pm_domain.o diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index 147b8e1..886481c 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c @@ -242,6 +242,73 @@ static struct vpfe_config vpfe_cfg = { .ccdc = "DM355 CCDC", }; +/* venc standards timings */ +static struct vpbe_enc_mode_info dm355evm_enc_preset_timing[] = { + { + .name = "ntsc", + .timings_type = VPBE_ENC_STD, + .std_id = V4L2_STD_NTSC, + .interlaced = 1, + .xres = 720, + .yres = 480, + .aspect = {11, 10}, + .fps = {30000, 1001}, + .left_margin = 0x79, + .upper_margin = 0x10, + }, + { + .name = "pal", + .timings_type = VPBE_ENC_STD, + .std_id = V4L2_STD_PAL, + .interlaced = 1, + .xres = 720, + .yres = 576, + .aspect = {54, 59}, + .fps = {25, 1}, + .left_margin = 0x7E, + .upper_margin = 0x16 + }, +}; + +#define VENC_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL) + +/* + * The outputs available from VPBE + ecnoders. Keep the + * the order same as that of encoders. First those from venc followed by that + * from encoders. Index in the output refers to index on a particular encoder. + * Driver uses this index to pass it to encoder when it supports more than + * one output. Application uses index of the array to set an output. + */ +static struct vpbe_output dm355evm_vpbe_outputs[] = { + { + .output = { + .index = 0, + .name = "Composite", + .type = V4L2_OUTPUT_TYPE_ANALOG, + .std = VENC_STD_ALL, + .capabilities = V4L2_OUT_CAP_STD, + }, + .subdev_name = DM355_VPBE_VENC_SUBDEV_NAME, + .default_mode = "ntsc", + .num_modes = ARRAY_SIZE(dm355evm_enc_preset_timing), + .modes = dm355evm_enc_preset_timing, + .if_params = V4L2_MBUS_FMT_FIXED, + }, +}; + +static struct vpbe_config dm355evm_display_cfg = { + .module_name = "dm355-vpbe-display", + .i2c_adapter_id = 1, + .osd = { + .module_name = DM355_VPBE_OSD_SUBDEV_NAME, + }, + .venc = { + .module_name = DM355_VPBE_VENC_SUBDEV_NAME, + }, + .num_outputs = ARRAY_SIZE(dm355evm_vpbe_outputs), + .outputs = dm355evm_vpbe_outputs, +}; + static struct platform_device *davinci_evm_devices[] __initdata = { &dm355evm_dm9000, &davinci_nand_device, @@ -253,8 +320,6 @@ static struct davinci_uart_config uart_config __initdata = { static void __init dm355_evm_map_io(void) { - /* setup input configuration for VPFE input devices */ - dm355_set_vpfe_config(&vpfe_cfg); dm355_init(); } @@ -344,6 +409,8 @@ static __init void dm355_evm_init(void) davinci_setup_mmc(0, &dm355evm_mmc_config); davinci_setup_mmc(1, &dm355evm_mmc_config); + dm355_init_video(&vpfe_cfg, &dm355evm_display_cfg); + dm355_init_spi0(BIT(0), dm355_evm_spi_info, ARRAY_SIZE(dm355_evm_spi_info)); diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index c2d4958..2a66743 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -27,6 +27,7 @@ #include <linux/input.h> #include <linux/spi/spi.h> #include <linux/spi/eeprom.h> +#include <linux/v4l2-dv-timings.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> @@ -39,6 +40,7 @@ #include <linux/platform_data/mtd-davinci.h> #include <linux/platform_data/keyscan-davinci.h> +#include <media/ths7303.h> #include <media/tvp514x.h> #include "davinci.h" @@ -374,6 +376,166 @@ static struct vpfe_config vpfe_cfg = { .ccdc = "ISIF", }; +/* venc standards timings */ +static struct vpbe_enc_mode_info dm365evm_enc_std_timing[] = { + { + .name = "ntsc", + .timings_type = VPBE_ENC_STD, + .std_id = V4L2_STD_NTSC, + .interlaced = 1, + .xres = 720, + .yres = 480, + .aspect = {11, 10}, + .fps = {30000, 1001}, + .left_margin = 0x79, + .upper_margin = 0x10, + }, + { + .name = "pal", + .timings_type = VPBE_ENC_STD, + .std_id = V4L2_STD_PAL, + .interlaced = 1, + .xres = 720, + .yres = 576, + .aspect = {54, 59}, + .fps = {25, 1}, + .left_margin = 0x7E, + .upper_margin = 0x16, + }, +}; + +/* venc dv timings */ +static struct vpbe_enc_mode_info dm365evm_enc_preset_timing[] = { + { + .name = "480p59_94", + .timings_type = VPBE_ENC_DV_TIMINGS, + .dv_timings = V4L2_DV_BT_CEA_720X480P59_94, + .interlaced = 0, + .xres = 720, + .yres = 480, + .aspect = {1, 1}, + .fps = {5994, 100}, + .left_margin = 0x8F, + .upper_margin = 0x2D, + }, + { + .name = "576p50", + .timings_type = VPBE_ENC_DV_TIMINGS, + .dv_timings = V4L2_DV_BT_CEA_720X576P50, + .interlaced = 0, + .xres = 720, + .yres = 576, + .aspect = {1, 1}, + .fps = {50, 1}, + .left_margin = 0x8C, + .upper_margin = 0x36, + }, + { + .name = "720p60", + .timings_type = VPBE_ENC_DV_TIMINGS, + .dv_timings = V4L2_DV_BT_CEA_1280X720P60, + .interlaced = 0, + .xres = 1280, + .yres = 720, + .aspect = {1, 1}, + .fps = {60, 1}, + .left_margin = 0x117, + .right_margin = 70, + .upper_margin = 38, + .lower_margin = 3, + .hsync_len = 80, + .vsync_len = 5, + }, + { + .name = "1080i60", + .timings_type = VPBE_ENC_DV_TIMINGS, + .dv_timings = V4L2_DV_BT_CEA_1920X1080I60, + .interlaced = 1, + .xres = 1920, + .yres = 1080, + .aspect = {1, 1}, + .fps = {30, 1}, + .left_margin = 0xc9, + .right_margin = 80, + .upper_margin = 30, + .lower_margin = 3, + .hsync_len = 88, + .vsync_len = 5, + }, +}; + +#define VENC_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL) + +/* + * The outputs available from VPBE + ecnoders. Keep the + * the order same as that of encoders. First those from venc followed by that + * from encoders. Index in the output refers to index on a particular + * encoder.Driver uses this index to pass it to encoder when it supports more + * than one output. Application uses index of the array to set an output. + */ +static struct vpbe_output dm365evm_vpbe_outputs[] = { + { + .output = { + .index = 0, + .name = "Composite", + .type = V4L2_OUTPUT_TYPE_ANALOG, + .std = VENC_STD_ALL, + .capabilities = V4L2_OUT_CAP_STD, + }, + .subdev_name = DM365_VPBE_VENC_SUBDEV_NAME, + .default_mode = "ntsc", + .num_modes = ARRAY_SIZE(dm365evm_enc_std_timing), + .modes = dm365evm_enc_std_timing, + .if_params = V4L2_MBUS_FMT_FIXED, + }, + { + .output = { + .index = 1, + .name = "Component", + .type = V4L2_OUTPUT_TYPE_ANALOG, + .capabilities = V4L2_OUT_CAP_DV_TIMINGS, + }, + .subdev_name = DM365_VPBE_VENC_SUBDEV_NAME, + .default_mode = "480p59_94", + .num_modes = ARRAY_SIZE(dm365evm_enc_preset_timing), + .modes = dm365evm_enc_preset_timing, + .if_params = V4L2_MBUS_FMT_FIXED, + }, +}; + +/* + * Amplifiers on the board + */ +struct ths7303_platform_data ths7303_pdata = { + .ch_1 = 3, + .ch_2 = 3, + .ch_3 = 3, + .init_enable = 1, +}; + +static struct amp_config_info vpbe_amp = { + .module_name = "ths7303", + .is_i2c = 1, + .board_info = { + I2C_BOARD_INFO("ths7303", 0x2c), + .platform_data = &ths7303_pdata, + } +}; + +static struct vpbe_config dm365evm_display_cfg = { + .module_name = "dm365-vpbe-display", + .i2c_adapter_id = 1, + .amp = &vpbe_amp, + .osd = { + .module_name = DM365_VPBE_OSD_SUBDEV_NAME, + }, + .venc = { + .module_name = DM365_VPBE_VENC_SUBDEV_NAME, + }, + .num_outputs = ARRAY_SIZE(dm365evm_vpbe_outputs), + .outputs = dm365evm_vpbe_outputs, +}; + static void __init evm_init_i2c(void) { davinci_init_i2c(&i2c_pdata); @@ -564,8 +726,6 @@ static struct davinci_uart_config uart_config __initdata = { static void __init dm365_evm_map_io(void) { - /* setup input configuration for VPFE input devices */ - dm365_set_vpfe_config(&vpfe_cfg); dm365_init(); } @@ -597,6 +757,8 @@ static __init void dm365_evm_init(void) davinci_setup_mmc(0, &dm365evm_mmc_config); + dm365_init_video(&vpfe_cfg, &dm365evm_display_cfg); + /* maybe setup mmc1/etc ... _after_ mmc0 */ evm_init_cpld(); diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index 71735e7..745280d 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -622,7 +622,7 @@ static struct vpbe_enc_mode_info dm644xevm_enc_std_timing[] = { { .name = "ntsc", .timings_type = VPBE_ENC_STD, - .std_id = V4L2_STD_525_60, + .std_id = V4L2_STD_NTSC, .interlaced = 1, .xres = 720, .yres = 480, @@ -634,7 +634,7 @@ static struct vpbe_enc_mode_info dm644xevm_enc_std_timing[] = { { .name = "pal", .timings_type = VPBE_ENC_STD, - .std_id = V4L2_STD_625_50, + .std_id = V4L2_STD_PAL, .interlaced = 1, .xres = 720, .yres = 576, @@ -649,7 +649,7 @@ static struct vpbe_enc_mode_info dm644xevm_enc_std_timing[] = { static struct vpbe_enc_mode_info dm644xevm_enc_preset_timing[] = { { .name = "480p59_94", - .timings_type = VPBE_ENC_CUSTOM_TIMINGS, + .timings_type = VPBE_ENC_DV_TIMINGS, .dv_timings = V4L2_DV_BT_CEA_720X480P59_94, .interlaced = 0, .xres = 720, @@ -661,7 +661,7 @@ static struct vpbe_enc_mode_info dm644xevm_enc_preset_timing[] = { }, { .name = "576p50", - .timings_type = VPBE_ENC_CUSTOM_TIMINGS, + .timings_type = VPBE_ENC_DV_TIMINGS, .dv_timings = V4L2_DV_BT_CEA_720X576P50, .interlaced = 0, .xres = 720, diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index de7adff..fc4871a 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -514,7 +514,7 @@ static const struct vpif_output dm6467_ch0_outputs[] = { .index = 1, .name = "Component", .type = V4L2_OUTPUT_TYPE_ANALOG, - .capabilities = V4L2_OUT_CAP_CUSTOM_TIMINGS, + .capabilities = V4L2_OUT_CAP_DV_TIMINGS, }, .subdev_name = "adv7343", .output_route = ADV7343_COMPONENT_ID, diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c deleted file mode 100644 index 4729eaa..0000000 --- a/arch/arm/mach-davinci/cpufreq.c +++ /dev/null @@ -1,248 +0,0 @@ -/* - * CPU frequency scaling for DaVinci - * - * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ - * - * Based on linux/arch/arm/plat-omap/cpu-omap.c. Original Copyright follows: - * - * Copyright (C) 2005 Nokia Corporation - * Written by Tony Lindgren <tony@atomide.com> - * - * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King - * - * Copyright (C) 2007-2008 Texas Instruments, Inc. - * Updated to support OMAP3 - * Rajendra Nayak <rnayak@ti.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/types.h> -#include <linux/cpufreq.h> -#include <linux/init.h> -#include <linux/err.h> -#include <linux/clk.h> -#include <linux/platform_device.h> -#include <linux/export.h> - -#include <mach/hardware.h> -#include <mach/cpufreq.h> -#include <mach/common.h> - -#include "clock.h" - -struct davinci_cpufreq { - struct device *dev; - struct clk *armclk; - struct clk *asyncclk; - unsigned long asyncrate; -}; -static struct davinci_cpufreq cpufreq; - -static int davinci_verify_speed(struct cpufreq_policy *policy) -{ - struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; - struct cpufreq_frequency_table *freq_table = pdata->freq_table; - struct clk *armclk = cpufreq.armclk; - - if (freq_table) - return cpufreq_frequency_table_verify(policy, freq_table); - - if (policy->cpu) - return -EINVAL; - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - - policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000; - policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000; - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - return 0; -} - -static unsigned int davinci_getspeed(unsigned int cpu) -{ - if (cpu) - return 0; - - return clk_get_rate(cpufreq.armclk) / 1000; -} - -static int davinci_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) -{ - int ret = 0; - unsigned int idx; - struct cpufreq_freqs freqs; - struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; - struct clk *armclk = cpufreq.armclk; - - /* - * Ensure desired rate is within allowed range. Some govenors - * (ondemand) will just pass target_freq=0 to get the minimum. - */ - if (target_freq < policy->cpuinfo.min_freq) - target_freq = policy->cpuinfo.min_freq; - if (target_freq > policy->cpuinfo.max_freq) - target_freq = policy->cpuinfo.max_freq; - - freqs.old = davinci_getspeed(0); - freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000; - freqs.cpu = 0; - - if (freqs.old == freqs.new) - return ret; - - dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new); - - ret = cpufreq_frequency_table_target(policy, pdata->freq_table, - freqs.new, relation, &idx); - if (ret) - return -EINVAL; - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - /* if moving to higher frequency, up the voltage beforehand */ - if (pdata->set_voltage && freqs.new > freqs.old) { - ret = pdata->set_voltage(idx); - if (ret) - goto out; - } - - ret = clk_set_rate(armclk, idx); - if (ret) - goto out; - - if (cpufreq.asyncclk) { - ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate); - if (ret) - goto out; - } - - /* if moving to lower freq, lower the voltage after lowering freq */ - if (pdata->set_voltage && freqs.new < freqs.old) - pdata->set_voltage(idx); - -out: - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - return ret; -} - -static int davinci_cpu_init(struct cpufreq_policy *policy) -{ - int result = 0; - struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; - struct cpufreq_frequency_table *freq_table = pdata->freq_table; - - if (policy->cpu != 0) - return -EINVAL; - - /* Finish platform specific initialization */ - if (pdata->init) { - result = pdata->init(); - if (result) - return result; - } - - policy->cur = policy->min = policy->max = davinci_getspeed(0); - - if (freq_table) { - result = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (!result) - cpufreq_frequency_table_get_attr(freq_table, - policy->cpu); - } else { - policy->cpuinfo.min_freq = policy->min; - policy->cpuinfo.max_freq = policy->max; - } - - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; - policy->cur = davinci_getspeed(0); - - /* - * Time measurement across the target() function yields ~1500-1800us - * time taken with no drivers on notification list. - * Setting the latency to 2000 us to accommodate addition of drivers - * to pre/post change notification list. - */ - policy->cpuinfo.transition_latency = 2000 * 1000; - return 0; -} - -static int davinci_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static struct freq_attr *davinci_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver davinci_driver = { - .flags = CPUFREQ_STICKY, - .verify = davinci_verify_speed, - .target = davinci_target, - .get = davinci_getspeed, - .init = davinci_cpu_init, - .exit = davinci_cpu_exit, - .name = "davinci", - .attr = davinci_cpufreq_attr, -}; - -static int __init davinci_cpufreq_probe(struct platform_device *pdev) -{ - struct davinci_cpufreq_config *pdata = pdev->dev.platform_data; - struct clk *asyncclk; - - if (!pdata) - return -EINVAL; - if (!pdata->freq_table) - return -EINVAL; - - cpufreq.dev = &pdev->dev; - - cpufreq.armclk = clk_get(NULL, "arm"); - if (IS_ERR(cpufreq.armclk)) { - dev_err(cpufreq.dev, "Unable to get ARM clock\n"); - return PTR_ERR(cpufreq.armclk); - } - - asyncclk = clk_get(cpufreq.dev, "async"); - if (!IS_ERR(asyncclk)) { - cpufreq.asyncclk = asyncclk; - cpufreq.asyncrate = clk_get_rate(asyncclk); - } - - return cpufreq_register_driver(&davinci_driver); -} - -static int __exit davinci_cpufreq_remove(struct platform_device *pdev) -{ - clk_put(cpufreq.armclk); - - if (cpufreq.asyncclk) - clk_put(cpufreq.asyncclk); - - return cpufreq_unregister_driver(&davinci_driver); -} - -static struct platform_driver davinci_cpufreq_driver = { - .driver = { - .name = "cpufreq-davinci", - .owner = THIS_MODULE, - }, - .remove = __exit_p(davinci_cpufreq_remove), -}; - -int __init davinci_cpufreq_init(void) -{ - return platform_driver_probe(&davinci_cpufreq_driver, - davinci_cpufreq_probe); -} - diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c index 5ac9e93..36aef3a 100644 --- a/arch/arm/mach-davinci/cpuidle.c +++ b/arch/arm/mach-davinci/cpuidle.c @@ -25,7 +25,6 @@ #define DAVINCI_CPUIDLE_MAX_STATES 2 -static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device); static void __iomem *ddr2_reg_base; static bool ddr2_pdown; @@ -50,14 +49,10 @@ static void davinci_save_ddr_power(int enter, bool pdown) /* Actual code that puts the SoC in different idle states */ static int davinci_enter_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) + struct cpuidle_driver *drv, int index) { davinci_save_ddr_power(1, ddr2_pdown); - - index = cpuidle_wrap_enter(dev, drv, index, - arm_cpuidle_simple_enter); - + cpu_do_idle(); davinci_save_ddr_power(0, ddr2_pdown); return index; @@ -66,7 +61,6 @@ static int davinci_enter_idle(struct cpuidle_device *dev, static struct cpuidle_driver davinci_idle_driver = { .name = "cpuidle-davinci", .owner = THIS_MODULE, - .en_core_tk_irqen = 1, .states[0] = ARM_CPUIDLE_WFI_STATE, .states[1] = { .enter = davinci_enter_idle, @@ -81,12 +75,8 @@ static struct cpuidle_driver davinci_idle_driver = { static int __init davinci_cpuidle_probe(struct platform_device *pdev) { - int ret; - struct cpuidle_device *device; struct davinci_cpuidle_config *pdata = pdev->dev.platform_data; - device = &per_cpu(davinci_cpuidle_device, smp_processor_id()); - if (!pdata) { dev_err(&pdev->dev, "cannot get platform data\n"); return -ENOENT; @@ -96,20 +86,7 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev) ddr2_pdown = pdata->ddr2_pdown; - ret = cpuidle_register_driver(&davinci_idle_driver); - if (ret) { - dev_err(&pdev->dev, "failed to register driver\n"); - return ret; - } - - ret = cpuidle_register_device(device); - if (ret) { - dev_err(&pdev->dev, "failed to register device\n"); - cpuidle_unregister_driver(&davinci_idle_driver); - return ret; - } - - return 0; + return cpuidle_register(&davinci_idle_driver, NULL); } static struct platform_driver davinci_cpuidle_driver = { diff --git a/arch/arm/mach-davinci/davinci.h b/arch/arm/mach-davinci/davinci.h index 12d544b..1ab3df4 100644 --- a/arch/arm/mach-davinci/davinci.h +++ b/arch/arm/mach-davinci/davinci.h @@ -36,12 +36,19 @@ #include <media/davinci/vpbe_osd.h> #define DAVINCI_SYSTEM_MODULE_BASE 0x01c40000 +#define SYSMOD_VDAC_CONFIG 0x2c #define SYSMOD_VIDCLKCTL 0x38 #define SYSMOD_VPSS_CLKCTL 0x44 #define SYSMOD_VDD3P3VPWDN 0x48 #define SYSMOD_VSCLKDIS 0x6c #define SYSMOD_PUPDCTL1 0x7c +/* VPSS CLKCTL bit definitions */ +#define VPSS_MUXSEL_EXTCLK_ENABLE BIT(1) +#define VPSS_VENCCLKEN_ENABLE BIT(3) +#define VPSS_DACCLKEN_ENABLE BIT(4) +#define VPSS_PLLC2SYSCLK5_ENABLE BIT(5) + extern void __iomem *davinci_sysmod_base; #define DAVINCI_SYSMOD_VIRT(x) (davinci_sysmod_base + (x)) void davinci_map_sysmod(void); @@ -74,7 +81,7 @@ void __init dm355_init(void); void dm355_init_spi0(unsigned chipselect_mask, const struct spi_board_info *info, unsigned len); void __init dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata); -void dm355_set_vpfe_config(struct vpfe_config *cfg); +int dm355_init_video(struct vpfe_config *, struct vpbe_config *); /* DM365 function declarations */ void __init dm365_init(void); @@ -84,7 +91,7 @@ void __init dm365_init_ks(struct davinci_ks_platform_data *pdata); void __init dm365_init_rtc(void); void dm365_init_spi0(unsigned chipselect_mask, const struct spi_board_info *info, unsigned len); -void dm365_set_vpfe_config(struct vpfe_config *cfg); +int dm365_init_video(struct vpfe_config *, struct vpbe_config *); /* DM644x function declarations */ void __init dm644x_init(void); diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index b49c3b7..bf9a9d4 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c @@ -35,6 +35,8 @@ #include "asp.h" #define DM355_UART2_BASE (IO_PHYS + 0x206000) +#define DM355_OSD_BASE (IO_PHYS + 0x70200) +#define DM355_VENC_BASE (IO_PHYS + 0x70400) /* * Device specific clocks @@ -345,8 +347,8 @@ static struct clk_lookup dm355_clks[] = { CLK(NULL, "pll1_aux", &pll1_aux_clk), CLK(NULL, "pll1_sysclkbp", &pll1_sysclkbp), CLK(NULL, "vpss_dac", &vpss_dac_clk), - CLK(NULL, "vpss_master", &vpss_master_clk), - CLK(NULL, "vpss_slave", &vpss_slave_clk), + CLK("vpss", "master", &vpss_master_clk), + CLK("vpss", "slave", &vpss_slave_clk), CLK(NULL, "clkout1", &clkout1_clk), CLK(NULL, "clkout2", &clkout2_clk), CLK(NULL, "pll2", &pll2_clk), @@ -744,11 +746,146 @@ static struct platform_device vpfe_capture_dev = { }, }; -void dm355_set_vpfe_config(struct vpfe_config *cfg) +static struct resource dm355_osd_resources[] = { + { + .start = DM355_OSD_BASE, + .end = DM355_OSD_BASE + 0x17f, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device dm355_osd_dev = { + .name = DM355_VPBE_OSD_SUBDEV_NAME, + .id = -1, + .num_resources = ARRAY_SIZE(dm355_osd_resources), + .resource = dm355_osd_resources, + .dev = { + .dma_mask = &vpfe_capture_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +static struct resource dm355_venc_resources[] = { + { + .start = IRQ_VENCINT, + .end = IRQ_VENCINT, + .flags = IORESOURCE_IRQ, + }, + /* venc registers io space */ + { + .start = DM355_VENC_BASE, + .end = DM355_VENC_BASE + 0x17f, + .flags = IORESOURCE_MEM, + }, + /* VDAC config register io space */ + { + .start = DAVINCI_SYSTEM_MODULE_BASE + SYSMOD_VDAC_CONFIG, + .end = DAVINCI_SYSTEM_MODULE_BASE + SYSMOD_VDAC_CONFIG + 3, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource dm355_v4l2_disp_resources[] = { + { + .start = IRQ_VENCINT, + .end = IRQ_VENCINT, + .flags = IORESOURCE_IRQ, + }, + /* venc registers io space */ + { + .start = DM355_VENC_BASE, + .end = DM355_VENC_BASE + 0x17f, + .flags = IORESOURCE_MEM, + }, +}; + +static int dm355_vpbe_setup_pinmux(enum v4l2_mbus_pixelcode if_type, + int field) +{ + switch (if_type) { + case V4L2_MBUS_FMT_SGRBG8_1X8: + davinci_cfg_reg(DM355_VOUT_FIELD_G70); + break; + case V4L2_MBUS_FMT_YUYV10_1X20: + if (field) + davinci_cfg_reg(DM355_VOUT_FIELD); + else + davinci_cfg_reg(DM355_VOUT_FIELD_G70); + break; + default: + return -EINVAL; + } + + davinci_cfg_reg(DM355_VOUT_COUTL_EN); + davinci_cfg_reg(DM355_VOUT_COUTH_EN); + + return 0; +} + +static int dm355_venc_setup_clock(enum vpbe_enc_timings_type type, + unsigned int pclock) { - vpfe_capture_dev.dev.platform_data = cfg; + void __iomem *vpss_clk_ctrl_reg; + + vpss_clk_ctrl_reg = DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL); + + switch (type) { + case VPBE_ENC_STD: + writel(VPSS_DACCLKEN_ENABLE | VPSS_VENCCLKEN_ENABLE, + vpss_clk_ctrl_reg); + break; + case VPBE_ENC_DV_TIMINGS: + if (pclock > 27000000) + /* + * For HD, use external clock source since we cannot + * support HD mode with internal clocks. + */ + writel(VPSS_MUXSEL_EXTCLK_ENABLE, vpss_clk_ctrl_reg); + break; + default: + return -EINVAL; + } + + return 0; } +static struct platform_device dm355_vpbe_display = { + .name = "vpbe-v4l2", + .id = -1, + .num_resources = ARRAY_SIZE(dm355_v4l2_disp_resources), + .resource = dm355_v4l2_disp_resources, + .dev = { + .dma_mask = &vpfe_capture_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +struct venc_platform_data dm355_venc_pdata = { + .setup_pinmux = dm355_vpbe_setup_pinmux, + .setup_clock = dm355_venc_setup_clock, +}; + +static struct platform_device dm355_venc_dev = { + .name = DM355_VPBE_VENC_SUBDEV_NAME, + .id = -1, + .num_resources = ARRAY_SIZE(dm355_venc_resources), + .resource = dm355_venc_resources, + .dev = { + .dma_mask = &vpfe_capture_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + .platform_data = (void *)&dm355_venc_pdata, + }, +}; + +static struct platform_device dm355_vpbe_dev = { + .name = "vpbe_controller", + .id = -1, + .dev = { + .dma_mask = &vpfe_capture_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + /*----------------------------------------------------------------------*/ static struct map_desc dm355_io_desc[] = { @@ -868,19 +1005,36 @@ void __init dm355_init(void) davinci_map_sysmod(); } +int __init dm355_init_video(struct vpfe_config *vpfe_cfg, + struct vpbe_config *vpbe_cfg) +{ + if (vpfe_cfg || vpbe_cfg) + platform_device_register(&dm355_vpss_device); + + if (vpfe_cfg) { + vpfe_capture_dev.dev.platform_data = vpfe_cfg; + platform_device_register(&dm355_ccdc_dev); + platform_device_register(&vpfe_capture_dev); + } + + if (vpbe_cfg) { + dm355_vpbe_dev.dev.platform_data = vpbe_cfg; + platform_device_register(&dm355_osd_dev); + platform_device_register(&dm355_venc_dev); + platform_device_register(&dm355_vpbe_dev); + platform_device_register(&dm355_vpbe_display); + } + + return 0; +} + static int __init dm355_init_devices(void) { if (!cpu_is_davinci_dm355()) return 0; - /* Add ccdc clock aliases */ - clk_add_alias("master", dm355_ccdc_dev.name, "vpss_master", NULL); - clk_add_alias("slave", dm355_ccdc_dev.name, "vpss_master", NULL); davinci_cfg_reg(DM355_INT_EDMA_CC); platform_device_register(&dm355_edma_device); - platform_device_register(&dm355_vpss_device); - platform_device_register(&dm355_ccdc_dev); - platform_device_register(&vpfe_capture_dev); return 0; } diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 6c39805..ff771ce 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c @@ -39,16 +39,13 @@ #include "asp.h" #define DM365_REF_FREQ 24000000 /* 24 MHz on the DM365 EVM */ - -/* Base of key scan register bank */ -#define DM365_KEYSCAN_BASE 0x01c69400 - #define DM365_RTC_BASE 0x01c69000 - +#define DM365_KEYSCAN_BASE 0x01c69400 +#define DM365_OSD_BASE 0x01c71c00 +#define DM365_VENC_BASE 0x01c71e00 #define DAVINCI_DM365_VC_BASE 0x01d0c000 #define DAVINCI_DMA_VC_TX 2 #define DAVINCI_DMA_VC_RX 3 - #define DM365_EMAC_BASE 0x01d07000 #define DM365_EMAC_MDIO_BASE (DM365_EMAC_BASE + 0x4000) #define DM365_EMAC_CNTRL_OFFSET 0x0000 @@ -257,6 +254,12 @@ static struct clk vpss_master_clk = { .flags = CLK_PSC, }; +static struct clk vpss_slave_clk = { + .name = "vpss_slave", + .parent = &pll1_sysclk5, + .lpsc = DAVINCI_LPSC_VPSSSLV, +}; + static struct clk arm_clk = { .name = "arm_clk", .parent = &pll2_sysclk2, @@ -449,7 +452,8 @@ static struct clk_lookup dm365_clks[] = { CLK(NULL, "pll2_sysclk8", &pll2_sysclk8), CLK(NULL, "pll2_sysclk9", &pll2_sysclk9), CLK(NULL, "vpss_dac", &vpss_dac_clk), - CLK(NULL, "vpss_master", &vpss_master_clk), + CLK("vpss", "master", &vpss_master_clk), + CLK("vpss", "slave", &vpss_slave_clk), CLK(NULL, "arm", &arm_clk), CLK(NULL, "uart0", &uart0_clk), CLK(NULL, "uart1", &uart1_clk), @@ -1226,6 +1230,173 @@ static struct platform_device dm365_isif_dev = { }, }; +static struct resource dm365_osd_resources[] = { + { + .start = DM365_OSD_BASE, + .end = DM365_OSD_BASE + 0xff, + .flags = IORESOURCE_MEM, + }, +}; + +static u64 dm365_video_dma_mask = DMA_BIT_MASK(32); + +static struct platform_device dm365_osd_dev = { + .name = DM365_VPBE_OSD_SUBDEV_NAME, + .id = -1, + .num_resources = ARRAY_SIZE(dm365_osd_resources), + .resource = dm365_osd_resources, + .dev = { + .dma_mask = &dm365_video_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +static struct resource dm365_venc_resources[] = { + { + .start = IRQ_VENCINT, + .end = IRQ_VENCINT, + .flags = IORESOURCE_IRQ, + }, + /* venc registers io space */ + { + .start = DM365_VENC_BASE, + .end = DM365_VENC_BASE + 0x177, + .flags = IORESOURCE_MEM, + }, + /* vdaccfg registers io space */ + { + .start = DAVINCI_SYSTEM_MODULE_BASE + SYSMOD_VDAC_CONFIG, + .end = DAVINCI_SYSTEM_MODULE_BASE + SYSMOD_VDAC_CONFIG + 3, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource dm365_v4l2_disp_resources[] = { + { + .start = IRQ_VENCINT, + .end = IRQ_VENCINT, + .flags = IORESOURCE_IRQ, + }, + /* venc registers io space */ + { + .start = DM365_VENC_BASE, + .end = DM365_VENC_BASE + 0x177, + .flags = IORESOURCE_MEM, + }, +}; + +static int dm365_vpbe_setup_pinmux(enum v4l2_mbus_pixelcode if_type, + int field) +{ + switch (if_type) { + case V4L2_MBUS_FMT_SGRBG8_1X8: + davinci_cfg_reg(DM365_VOUT_FIELD_G81); + davinci_cfg_reg(DM365_VOUT_COUTL_EN); + davinci_cfg_reg(DM365_VOUT_COUTH_EN); + break; + case V4L2_MBUS_FMT_YUYV10_1X20: + if (field) + davinci_cfg_reg(DM365_VOUT_FIELD); + else + davinci_cfg_reg(DM365_VOUT_FIELD_G81); + davinci_cfg_reg(DM365_VOUT_COUTL_EN); + davinci_cfg_reg(DM365_VOUT_COUTH_EN); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int dm365_venc_setup_clock(enum vpbe_enc_timings_type type, + unsigned int pclock) +{ + void __iomem *vpss_clkctl_reg; + u32 val; + + vpss_clkctl_reg = DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL); + + switch (type) { + case VPBE_ENC_STD: + val = VPSS_VENCCLKEN_ENABLE | VPSS_DACCLKEN_ENABLE; + break; + case VPBE_ENC_DV_TIMINGS: + if (pclock <= 27000000) { + val = VPSS_VENCCLKEN_ENABLE | VPSS_DACCLKEN_ENABLE; + } else { + /* set sysclk4 to output 74.25 MHz from pll1 */ + val = VPSS_PLLC2SYSCLK5_ENABLE | VPSS_DACCLKEN_ENABLE | + VPSS_VENCCLKEN_ENABLE; + } + break; + default: + return -EINVAL; + } + writel(val, vpss_clkctl_reg); + + return 0; +} + +static struct platform_device dm365_vpbe_display = { + .name = "vpbe-v4l2", + .id = -1, + .num_resources = ARRAY_SIZE(dm365_v4l2_disp_resources), + .resource = dm365_v4l2_disp_resources, + .dev = { + .dma_mask = &dm365_video_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +struct venc_platform_data dm365_venc_pdata = { + .setup_pinmux = dm365_vpbe_setup_pinmux, + .setup_clock = dm365_venc_setup_clock, +}; + +static struct platform_device dm365_venc_dev = { + .name = DM365_VPBE_VENC_SUBDEV_NAME, + .id = -1, + .num_resources = ARRAY_SIZE(dm365_venc_resources), + .resource = dm365_venc_resources, + .dev = { + .dma_mask = &dm365_video_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + .platform_data = (void *)&dm365_venc_pdata, + }, +}; + +static struct platform_device dm365_vpbe_dev = { + .name = "vpbe_controller", + .id = -1, + .dev = { + .dma_mask = &dm365_video_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +int __init dm365_init_video(struct vpfe_config *vpfe_cfg, + struct vpbe_config *vpbe_cfg) +{ + if (vpfe_cfg || vpbe_cfg) + platform_device_register(&dm365_vpss_device); + + if (vpfe_cfg) { + vpfe_capture_dev.dev.platform_data = vpfe_cfg; + platform_device_register(&dm365_isif_dev); + platform_device_register(&vpfe_capture_dev); + } + if (vpbe_cfg) { + dm365_vpbe_dev.dev.platform_data = vpbe_cfg; + platform_device_register(&dm365_osd_dev); + platform_device_register(&dm365_venc_dev); + platform_device_register(&dm365_vpbe_dev); + platform_device_register(&dm365_vpbe_display); + } + + return 0; +} + static int __init dm365_init_devices(void) { if (!cpu_is_davinci_dm365()) @@ -1239,16 +1410,6 @@ static int __init dm365_init_devices(void) clk_add_alias(NULL, dev_name(&dm365_mdio_device.dev), NULL, &dm365_emac_device.dev); - /* Add isif clock alias */ - clk_add_alias("master", dm365_isif_dev.name, "vpss_master", NULL); - platform_device_register(&dm365_vpss_device); - platform_device_register(&dm365_isif_dev); - platform_device_register(&vpfe_capture_dev); return 0; } postcore_initcall(dm365_init_devices); - -void dm365_set_vpfe_config(struct vpfe_config *cfg) -{ - vpfe_capture_dev.dev.platform_data = cfg; -} diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index db1dd92..c2a9273 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c @@ -300,8 +300,8 @@ static struct clk_lookup dm644x_clks[] = { CLK(NULL, "dsp", &dsp_clk), CLK(NULL, "arm", &arm_clk), CLK(NULL, "vicp", &vicp_clk), - CLK(NULL, "vpss_master", &vpss_master_clk), - CLK(NULL, "vpss_slave", &vpss_slave_clk), + CLK("vpss", "master", &vpss_master_clk), + CLK("vpss", "slave", &vpss_slave_clk), CLK(NULL, "arm", &arm_clk), CLK(NULL, "uart0", &uart0_clk), CLK(NULL, "uart1", &uart1_clk), @@ -706,7 +706,7 @@ static int dm644x_venc_setup_clock(enum vpbe_enc_timings_type type, v |= DM644X_VPSS_DACCLKEN; writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL)); break; - case VPBE_ENC_CUSTOM_TIMINGS: + case VPBE_ENC_DV_TIMINGS: if (pclock <= 27000000) { v |= DM644X_VPSS_DACCLKEN; writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL)); @@ -901,11 +901,6 @@ int __init dm644x_init_video(struct vpfe_config *vpfe_cfg, dm644x_vpfe_dev.dev.platform_data = vpfe_cfg; platform_device_register(&dm644x_ccdc_dev); platform_device_register(&dm644x_vpfe_dev); - /* Add ccdc clock aliases */ - clk_add_alias("master", dm644x_ccdc_dev.name, - "vpss_master", NULL); - clk_add_alias("slave", dm644x_ccdc_dev.name, - "vpss_slave", NULL); } if (vpbe_cfg) { diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c index a685e97..45b7c71 100644 --- a/arch/arm/mach-davinci/dma.c +++ b/arch/arm/mach-davinci/dma.c @@ -743,6 +743,9 @@ EXPORT_SYMBOL(edma_free_channel); */ int edma_alloc_slot(unsigned ctlr, int slot) { + if (!edma_cc[ctlr]) + return -EINVAL; + if (slot >= 0) slot = EDMA_CHAN_SLOT(slot); diff --git a/arch/arm/mach-davinci/pm_domain.c b/arch/arm/mach-davinci/pm_domain.c index c90250e..6b98413 100644 --- a/arch/arm/mach-davinci/pm_domain.c +++ b/arch/arm/mach-davinci/pm_domain.c @@ -53,7 +53,7 @@ static struct dev_pm_domain davinci_pm_domain = { static struct pm_clk_notifier_block platform_bus_notifier = { .pm_domain = &davinci_pm_domain, - .con_ids = { "fck", NULL, }, + .con_ids = { "fck", "master", "slave", NULL }, }; static int __init davinci_pm_runtime_init(void) diff --git a/arch/arm/mach-ep93xx/include/mach/uncompress.h b/arch/arm/mach-ep93xx/include/mach/uncompress.h index d2afb4d..b5cc77d 100644 --- a/arch/arm/mach-ep93xx/include/mach/uncompress.h +++ b/arch/arm/mach-ep93xx/include/mach/uncompress.h @@ -47,9 +47,13 @@ static void __raw_writel(unsigned int value, unsigned int ptr) static inline void putc(int c) { - /* Transmit fifo not full? */ - while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF) - ; + int i; + + for (i = 0; i < 10000; i++) { + /* Transmit fifo not full? */ + if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)) + break; + } __raw_writeb(c, PHYS_UART_DATA); } diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index 70f94c8..d5dde07 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -72,10 +72,12 @@ config SOC_EXYNOS5440 bool "SAMSUNG EXYNOS5440" default y depends on ARCH_EXYNOS5 + select ARCH_HAS_OPP select ARM_ARCH_TIMER select AUTO_ZRELADDR select PINCTRL select PINCTRL_EXYNOS5440 + select PM_OPP help Enable EXYNOS5440 SoC support diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c index fcfe025..498a7a23 100644 --- a/arch/arm/mach-exynos/cpuidle.c +++ b/arch/arm/mach-exynos/cpuidle.c @@ -58,7 +58,6 @@ static DEFINE_PER_CPU(struct cpuidle_device, exynos4_cpuidle_device); static struct cpuidle_driver exynos4_idle_driver = { .name = "exynos4_idle", .owner = THIS_MODULE, - .en_core_tk_irqen = 1, }; /* Ext-GIC nIRQ/nFIQ is the only wakeup source in AFTR */ diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c index 497fcb7..d28c7fb 100644 --- a/arch/arm/mach-exynos/mach-universal_c210.c +++ b/arch/arm/mach-exynos/mach-universal_c210.c @@ -97,6 +97,19 @@ static struct s3c2410_uartcfg universal_uartcfgs[] __initdata = { static struct regulator_consumer_supply max8952_consumer = REGULATOR_SUPPLY("vdd_arm", NULL); +static struct regulator_init_data universal_max8952_reg_data = { + .constraints = { + .name = "VARM_1.2V", + .min_uV = 770000, + .max_uV = 1400000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, + .always_on = 1, + .boot_on = 1, + }, + .num_consumer_supplies = 1, + .consumer_supplies = &max8952_consumer, +}; + static struct max8952_platform_data universal_max8952_pdata __initdata = { .gpio_vid0 = EXYNOS4_GPX0(3), .gpio_vid1 = EXYNOS4_GPX0(4), @@ -105,19 +118,7 @@ static struct max8952_platform_data universal_max8952_pdata __initdata = { .dvs_mode = { 48, 32, 28, 18 }, /* 1.25, 1.20, 1.05, 0.95V */ .sync_freq = 0, /* default: fastest */ .ramp_speed = 0, /* default: fastest */ - - .reg_data = { - .constraints = { - .name = "VARM_1.2V", - .min_uV = 770000, - .max_uV = 1400000, - .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, - .always_on = 1, - .boot_on = 1, - }, - .num_consumer_supplies = 1, - .consumer_supplies = &max8952_consumer, - }, + .reg_data = &universal_max8952_reg_data, }; static struct regulator_consumer_supply lp3974_buck1_consumer = diff --git a/arch/arm/mach-exynos/setup-usb-phy.c b/arch/arm/mach-exynos/setup-usb-phy.c index b81cc56..6af4066 100644 --- a/arch/arm/mach-exynos/setup-usb-phy.c +++ b/arch/arm/mach-exynos/setup-usb-phy.c @@ -204,9 +204,9 @@ static int exynos4210_usb_phy1_exit(struct platform_device *pdev) int s5p_usb_phy_init(struct platform_device *pdev, int type) { - if (type == S5P_USB_PHY_DEVICE) + if (type == USB_PHY_TYPE_DEVICE) return exynos4210_usb_phy0_init(pdev); - else if (type == S5P_USB_PHY_HOST) + else if (type == USB_PHY_TYPE_HOST) return exynos4210_usb_phy1_init(pdev); return -EINVAL; @@ -214,9 +214,9 @@ int s5p_usb_phy_init(struct platform_device *pdev, int type) int s5p_usb_phy_exit(struct platform_device *pdev, int type) { - if (type == S5P_USB_PHY_DEVICE) + if (type == USB_PHY_TYPE_DEVICE) return exynos4210_usb_phy0_exit(pdev); - else if (type == S5P_USB_PHY_HOST) + else if (type == USB_PHY_TYPE_HOST) return exynos4210_usb_phy1_exit(pdev); return -EINVAL; diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig index abda5a1..0f2111a 100644 --- a/arch/arm/mach-footbridge/Kconfig +++ b/arch/arm/mach-footbridge/Kconfig @@ -67,6 +67,7 @@ config ARCH_NETWINDER select ISA select ISA_DMA select PCI + select VIRT_TO_BUS help Say Y here if you intend to run this kernel on the Rebel.COM NetWinder. Information about this machine can be found at: diff --git a/arch/arm/mach-gemini/idle.c b/arch/arm/mach-gemini/idle.c index 92bbd6b..87dff4f 100644 --- a/arch/arm/mach-gemini/idle.c +++ b/arch/arm/mach-gemini/idle.c @@ -13,9 +13,11 @@ static void gemini_idle(void) * will never wakeup... Acctualy it is not very good to enable * interrupts first since scheduler can miss a tick, but there is * no other way around this. Platforms that needs it for power saving - * should call enable_hlt() in init code, since by default it is + * should enable it in init code, since by default it is * disabled. */ + + /* FIXME: Enabling interrupts here is racy! */ local_irq_enable(); cpu_do_idle(); } diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c index 020852d..6d8f6d1 100644 --- a/arch/arm/mach-gemini/irq.c +++ b/arch/arm/mach-gemini/irq.c @@ -15,6 +15,8 @@ #include <linux/stddef.h> #include <linux/list.h> #include <linux/sched.h> +#include <linux/cpu.h> + #include <asm/irq.h> #include <asm/mach/irq.h> #include <asm/system_misc.h> @@ -77,7 +79,7 @@ void __init gemini_init_irq(void) * Disable the idle handler by default since it is buggy * For more info see arch/arm/mach-gemini/idle.c */ - disable_hlt(); + cpu_idle_poll_ctrl(true); request_resource(&iomem_resource, &irq_resource); diff --git a/arch/arm/mach-highbank/hotplug.c b/arch/arm/mach-highbank/hotplug.c index f30c528..890cae2 100644 --- a/arch/arm/mach-highbank/hotplug.c +++ b/arch/arm/mach-highbank/hotplug.c @@ -28,13 +28,11 @@ extern void secondary_startup(void); */ void __ref highbank_cpu_die(unsigned int cpu) { - flush_cache_all(); - highbank_set_cpu_jump(cpu, phys_to_virt(0)); - highbank_set_core_pwr(); - cpu_do_idle(); + flush_cache_louis(); + highbank_set_core_pwr(); - /* We should never return from idle */ - panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu); + while (1) + cpu_do_idle(); } diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index c4ce090..cb70961 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -30,7 +30,7 @@ obj-$(CONFIG_MXC_DEBUG_BOARD) += 3ds_debugboard.o obj-$(CONFIG_CPU_FREQ_IMX) += cpufreq.o ifeq ($(CONFIG_CPU_IDLE),y) -obj-y += cpuidle.o +obj-$(CONFIG_SOC_IMX5) += cpuidle-imx5.o obj-$(CONFIG_SOC_IMX6Q) += cpuidle-imx6q.o endif diff --git a/arch/arm/mach-imx/clk-busy.c b/arch/arm/mach-imx/clk-busy.c index 1ab91b5..85b728c 100644 --- a/arch/arm/mach-imx/clk-busy.c +++ b/arch/arm/mach-imx/clk-busy.c @@ -169,7 +169,7 @@ struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift, busy->mux.reg = reg; busy->mux.shift = shift; - busy->mux.width = width; + busy->mux.mask = BIT(width) - 1; busy->mux.lock = &imx_ccm_lock; busy->mux_ops = &clk_mux_ops; diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index 74e3a34..2193c83 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c @@ -257,6 +257,7 @@ int __init mx35_clocks_init(void) clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); + clk_register_clkdev(clk[admux_gate], "audmux", NULL); clk_prepare_enable(clk[spba_gate]); clk_prepare_enable(clk[gpio1_gate]); @@ -264,6 +265,8 @@ int __init mx35_clocks_init(void) clk_prepare_enable(clk[gpio3_gate]); clk_prepare_enable(clk[iim_gate]); clk_prepare_enable(clk[emi_gate]); + clk_prepare_enable(clk[max_gate]); + clk_prepare_enable(clk[iomuxc_gate]); /* * SCC is needed to boot via mmc after a watchdog reset. The clock code diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 7b025ee..d38e54f 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m" static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; -static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", }; +static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; @@ -172,7 +172,7 @@ static struct clk *clk[clk_max]; static struct clk_onecell_data clk_data; static enum mx6q_clks const clks_init_on[] __initconst = { - mmdc_ch0_axi, rom, + mmdc_ch0_axi, rom, pll1_sys, }; static struct clk_div_table clk_enet_ref_table[] = { @@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void) clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); - clk_register_clkdev(clk[twd], NULL, "smp_twd"); clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); clk_register_clkdev(clk[ahb], "ahb", NULL); clk_register_clkdev(clk[cko1], "cko1", NULL); diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 5a800bf..5bf4a97 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -110,6 +110,8 @@ void tzic_handle_irq(struct pt_regs *); extern void imx_enable_cpu(int cpu, bool enable); extern void imx_set_cpu_jump(int cpu, void *jump_addr); +extern u32 imx_get_cpu_arg(int cpu); +extern void imx_set_cpu_arg(int cpu, u32 arg); extern void v7_cpu_resume(void); extern u32 *pl310_get_save_ptr(void); #ifdef CONFIG_SMP diff --git a/arch/arm/mach-imx/cpufreq.c b/arch/arm/mach-imx/cpufreq.c index d8c75c3..387dc4c 100644 --- a/arch/arm/mach-imx/cpufreq.c +++ b/arch/arm/mach-imx/cpufreq.c @@ -87,13 +87,12 @@ static int mxc_set_target(struct cpufreq_policy *policy, freqs.old = clk_get_rate(cpu_clk) / 1000; freqs.new = freq_Hz / 1000; - freqs.cpu = 0; freqs.flags = 0; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); ret = set_cpu_freq(freq_Hz); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return ret; } @@ -145,14 +144,11 @@ static int mxc_cpufreq_init(struct cpufreq_policy *policy) imx_freq_table[i].frequency = CPUFREQ_TABLE_END; policy->cur = clk_get_rate(cpu_clk) / 1000; - policy->min = policy->cpuinfo.min_freq = cpu_freq_khz_min; - policy->max = policy->cpuinfo.max_freq = cpu_freq_khz_max; /* Manual states, that PLL stabilizes in two CLK32 periods */ policy->cpuinfo.transition_latency = 2 * NANOSECOND / CLK32_FREQ; ret = cpufreq_frequency_table_cpuinfo(policy, imx_freq_table); - if (ret < 0) { printk(KERN_ERR "%s: failed to register i.MXC CPUfreq with error code %d\n", __func__, ret); diff --git a/arch/arm/mach-imx/cpuidle-imx5.c b/arch/arm/mach-imx/cpuidle-imx5.c new file mode 100644 index 0000000..5a47e3c --- /dev/null +++ b/arch/arm/mach-imx/cpuidle-imx5.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2012 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/cpuidle.h> +#include <linux/module.h> +#include <asm/system_misc.h> + +static int imx5_cpuidle_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + arm_pm_idle(); + return index; +} + +static struct cpuidle_driver imx5_cpuidle_driver = { + .name = "imx5_cpuidle", + .owner = THIS_MODULE, + .states[0] = { + .enter = imx5_cpuidle_enter, + .exit_latency = 2, + .target_residency = 1, + .flags = CPUIDLE_FLAG_TIME_VALID, + .name = "IMX5 SRPG", + .desc = "CPU state retained,powered off", + }, + .state_count = 1, +}; + +int __init imx5_cpuidle_init(void) +{ + return cpuidle_register(&imx5_cpuidle_driver, NULL); +} diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c index d533e26..23ddfb6 100644 --- a/arch/arm/mach-imx/cpuidle-imx6q.c +++ b/arch/arm/mach-imx/cpuidle-imx6q.c @@ -6,7 +6,6 @@ * published by the Free Software Foundation. */ -#include <linux/clockchips.h> #include <linux/cpuidle.h> #include <linux/module.h> #include <asm/cpuidle.h> @@ -21,10 +20,6 @@ static DEFINE_SPINLOCK(master_lock); static int imx6q_enter_wait(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - int cpu = dev->cpu; - - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); - if (atomic_inc_return(&master) == num_online_cpus()) { /* * With this lock, we prevent other cpu to exit and enter @@ -43,26 +38,13 @@ idle: cpu_do_idle(); done: atomic_dec(&master); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); return index; } -/* - * For each cpu, setup the broadcast timer because local timer - * stops for the states other than WFI. - */ -static void imx6q_setup_broadcast_timer(void *arg) -{ - int cpu = smp_processor_id(); - - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu); -} - static struct cpuidle_driver imx6q_cpuidle_driver = { .name = "imx6q_cpuidle", .owner = THIS_MODULE, - .en_core_tk_irqen = 1, .states = { /* WFI */ ARM_CPUIDLE_WFI_STATE, @@ -70,7 +52,8 @@ static struct cpuidle_driver imx6q_cpuidle_driver = { { .exit_latency = 50, .target_residency = 75, - .flags = CPUIDLE_FLAG_TIME_VALID, + .flags = CPUIDLE_FLAG_TIME_VALID | + CPUIDLE_FLAG_TIMER_STOP, .enter = imx6q_enter_wait, .name = "WAIT", .desc = "Clock off", @@ -88,8 +71,5 @@ int __init imx6q_cpuidle_init(void) /* Set chicken bit to get a reliable WAIT mode support */ imx6q_set_chicken_bit(); - /* Configure the broadcast timer on each cpu */ - on_each_cpu(imx6q_setup_broadcast_timer, NULL, 1); - - return imx_cpuidle_init(&imx6q_cpuidle_driver); + return cpuidle_register(&imx6q_cpuidle_driver, NULL); } diff --git a/arch/arm/mach-imx/cpuidle.c b/arch/arm/mach-imx/cpuidle.c deleted file mode 100644 index d4cb511..0000000 --- a/arch/arm/mach-imx/cpuidle.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2012 Freescale Semiconductor, Inc. - * Copyright 2012 Linaro Ltd. - * - * The code contained herein is licensed under the GNU General Public - * License. You may obtain a copy of the GNU General Public License - * Version 2 or later at the following locations: - * - * http://www.opensource.org/licenses/gpl-license.html - * http://www.gnu.org/copyleft/gpl.html - */ - -#include <linux/cpuidle.h> -#include <linux/err.h> -#include <linux/hrtimer.h> -#include <linux/io.h> -#include <linux/kernel.h> -#include <linux/slab.h> - -static struct cpuidle_device __percpu * imx_cpuidle_devices; - -static void __init imx_cpuidle_devices_uninit(void) -{ - int cpu_id; - struct cpuidle_device *dev; - - for_each_possible_cpu(cpu_id) { - dev = per_cpu_ptr(imx_cpuidle_devices, cpu_id); - cpuidle_unregister_device(dev); - } - - free_percpu(imx_cpuidle_devices); -} - -int __init imx_cpuidle_init(struct cpuidle_driver *drv) -{ - struct cpuidle_device *dev; - int cpu_id, ret; - - if (drv->state_count > CPUIDLE_STATE_MAX) { - pr_err("%s: state_count exceeds maximum\n", __func__); - return -EINVAL; - } - - ret = cpuidle_register_driver(drv); - if (ret) { - pr_err("%s: Failed to register cpuidle driver with error: %d\n", - __func__, ret); - return ret; - } - - imx_cpuidle_devices = alloc_percpu(struct cpuidle_device); - if (imx_cpuidle_devices == NULL) { - ret = -ENOMEM; - goto unregister_drv; - } - - /* initialize state data for each cpuidle_device */ - for_each_possible_cpu(cpu_id) { - dev = per_cpu_ptr(imx_cpuidle_devices, cpu_id); - dev->cpu = cpu_id; - dev->state_count = drv->state_count; - - ret = cpuidle_register_device(dev); - if (ret) { - pr_err("%s: Failed to register cpu %u, error: %d\n", - __func__, cpu_id, ret); - goto uninit; - } - } - - return 0; - -uninit: - imx_cpuidle_devices_uninit(); - -unregister_drv: - cpuidle_unregister_driver(drv); - return ret; -} diff --git a/arch/arm/mach-imx/cpuidle.h b/arch/arm/mach-imx/cpuidle.h index e092d13..786f98e 100644 --- a/arch/arm/mach-imx/cpuidle.h +++ b/arch/arm/mach-imx/cpuidle.h @@ -10,18 +10,16 @@ * http://www.gnu.org/copyleft/gpl.html */ -#include <linux/cpuidle.h> - #ifdef CONFIG_CPU_IDLE -extern int imx_cpuidle_init(struct cpuidle_driver *drv); +extern int imx5_cpuidle_init(void); extern int imx6q_cpuidle_init(void); #else -static inline int imx_cpuidle_init(struct cpuidle_driver *drv) +static inline int imx5_cpuidle_init(void) { - return -ENODEV; + return 0; } static inline int imx6q_cpuidle_init(void) { - return -ENODEV; + return 0; } #endif diff --git a/arch/arm/mach-imx/headsmp.S b/arch/arm/mach-imx/headsmp.S index 921fc15..a58c8b0 100644 --- a/arch/arm/mach-imx/headsmp.S +++ b/arch/arm/mach-imx/headsmp.S @@ -26,16 +26,16 @@ ENDPROC(v7_secondary_startup) #ifdef CONFIG_PM /* - * The following code is located into the .data section. This is to - * allow phys_l2x0_saved_regs to be accessed with a relative load - * as we are running on physical address here. + * The following code must assume it is running from physical address + * where absolute virtual addresses to the data section have to be + * turned into relative ones. */ - .data - .align #ifdef CONFIG_CACHE_L2X0 .macro pl310_resume - ldr r2, phys_l2x0_saved_regs + adr r0, l2x0_saved_regs_offset + ldr r2, [r0] + add r2, r2, r0 ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0 ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl @@ -43,9 +43,9 @@ ENDPROC(v7_secondary_startup) str r1, [r0, #L2X0_CTRL] @ re-enable L2 .endm - .globl phys_l2x0_saved_regs -phys_l2x0_saved_regs: - .long 0 +l2x0_saved_regs_offset: + .word l2x0_saved_regs - . + #else .macro pl310_resume .endm diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c index 7bc5fe1..361a253 100644 --- a/arch/arm/mach-imx/hotplug.c +++ b/arch/arm/mach-imx/hotplug.c @@ -46,11 +46,23 @@ static inline void cpu_enter_lowpower(void) void imx_cpu_die(unsigned int cpu) { cpu_enter_lowpower(); + /* + * We use the cpu jumping argument register to sync with + * imx_cpu_kill() which is running on cpu0 and waiting for + * the register being cleared to kill the cpu. + */ + imx_set_cpu_arg(cpu, ~0); cpu_do_idle(); } int imx_cpu_kill(unsigned int cpu) { + unsigned long timeout = jiffies + msecs_to_jiffies(50); + + while (imx_get_cpu_arg(cpu) == 0) + if (time_after(jiffies, timeout)) + return 0; imx_enable_cpu(cpu, false); + imx_set_cpu_arg(cpu, 0); return 1; } diff --git a/arch/arm/mach-imx/imx25-dt.c b/arch/arm/mach-imx/imx25-dt.c index 03b65e5..8234839 100644 --- a/arch/arm/mach-imx/imx25-dt.c +++ b/arch/arm/mach-imx/imx25-dt.c @@ -27,6 +27,11 @@ static const char * const imx25_dt_board_compat[] __initconst = { NULL }; +static void __init imx25_timer_init(void) +{ + mx25_clocks_init_dt(); +} + DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)") .map_io = mx25_map_io, .init_early = imx25_init_early, diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c index f67fd7e..82e79c6 100644 --- a/arch/arm/mach-imx/pm-imx5.c +++ b/arch/arm/mach-imx/pm-imx5.c @@ -149,33 +149,6 @@ static void imx5_pm_idle(void) imx5_cpu_do_idle(); } -static int imx5_cpuidle_enter(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int idx) -{ - int ret; - - ret = imx5_cpu_do_idle(); - if (ret < 0) - return ret; - - return idx; -} - -static struct cpuidle_driver imx5_cpuidle_driver = { - .name = "imx5_cpuidle", - .owner = THIS_MODULE, - .en_core_tk_irqen = 1, - .states[0] = { - .enter = imx5_cpuidle_enter, - .exit_latency = 2, - .target_residency = 1, - .flags = CPUIDLE_FLAG_TIME_VALID, - .name = "IMX5 SRPG", - .desc = "CPU state retained,powered off", - }, - .state_count = 1, -}; - static int __init imx5_pm_common_init(void) { int ret; @@ -193,8 +166,7 @@ static int __init imx5_pm_common_init(void) /* Set the registers to the default cpu idle state. */ mx5_cpu_lp_set(IMX5_DEFAULT_CPU_IDLE_STATE); - imx_cpuidle_init(&imx5_cpuidle_driver); - return 0; + return imx5_cpuidle_init(); } void __init imx51_pm_init(void) diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c index ee42d20..5faba7a 100644 --- a/arch/arm/mach-imx/pm-imx6q.c +++ b/arch/arm/mach-imx/pm-imx6q.c @@ -22,8 +22,6 @@ #include "common.h" #include "hardware.h" -extern unsigned long phys_l2x0_saved_regs; - static int imx6q_suspend_finish(unsigned long val) { cpu_do_idle(); @@ -57,18 +55,5 @@ static const struct platform_suspend_ops imx6q_pm_ops = { void __init imx6q_pm_init(void) { - /* - * The l2x0 core code provides an infrastucture to save and restore - * l2x0 registers across suspend/resume cycle. But because imx6q - * retains L2 content during suspend and needs to resume L2 before - * MMU is enabled, it can only utilize register saving support and - * have to take care of restoring on its own. So we save physical - * address of the data structure used by l2x0 core to save registers, - * and later restore the necessary ones in imx6q resume entry. - */ -#ifdef CONFIG_CACHE_L2X0 - phys_l2x0_saved_regs = __pa(&l2x0_saved_regs); -#endif - suspend_set_ops(&imx6q_pm_ops); } diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c index e15f155..09a742f 100644 --- a/arch/arm/mach-imx/src.c +++ b/arch/arm/mach-imx/src.c @@ -43,6 +43,18 @@ void imx_set_cpu_jump(int cpu, void *jump_addr) src_base + SRC_GPR1 + cpu * 8); } +u32 imx_get_cpu_arg(int cpu) +{ + cpu = cpu_logical_map(cpu); + return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4); +} + +void imx_set_cpu_arg(int cpu, u32 arg) +{ + cpu = cpu_logical_map(cpu); + writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4); +} + void imx_src_prepare_restart(void) { u32 val; diff --git a/arch/arm/mach-integrator/Makefile b/arch/arm/mach-integrator/Makefile index 5521d18..d14d6b7 100644 --- a/arch/arm/mach-integrator/Makefile +++ b/arch/arm/mach-integrator/Makefile @@ -9,5 +9,4 @@ obj-$(CONFIG_ARCH_INTEGRATOR_AP) += integrator_ap.o obj-$(CONFIG_ARCH_INTEGRATOR_CP) += integrator_cp.o obj-$(CONFIG_PCI) += pci_v3.o pci.o -obj-$(CONFIG_CPU_FREQ_INTEGRATOR) += cpu.o obj-$(CONFIG_INTEGRATOR_IMPD1) += impd1.o diff --git a/arch/arm/mach-integrator/cpu.c b/arch/arm/mach-integrator/cpu.c deleted file mode 100644 index 590c192..0000000 --- a/arch/arm/mach-integrator/cpu.c +++ /dev/null @@ -1,224 +0,0 @@ -/* - * linux/arch/arm/mach-integrator/cpu.c - * - * Copyright (C) 2001-2002 Deep Blue Solutions Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * CPU support functions - */ -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/cpufreq.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/init.h> -#include <linux/io.h> - -#include <mach/hardware.h> -#include <mach/platform.h> -#include <asm/mach-types.h> -#include <asm/hardware/icst.h> - -static struct cpufreq_driver integrator_driver; - -#define CM_ID __io_address(INTEGRATOR_HDR_ID) -#define CM_OSC __io_address(INTEGRATOR_HDR_OSC) -#define CM_STAT __io_address(INTEGRATOR_HDR_STAT) -#define CM_LOCK __io_address(INTEGRATOR_HDR_LOCK) - -static const struct icst_params lclk_params = { - .ref = 24000000, - .vco_max = ICST525_VCO_MAX_5V, - .vco_min = ICST525_VCO_MIN, - .vd_min = 8, - .vd_max = 132, - .rd_min = 24, - .rd_max = 24, - .s2div = icst525_s2div, - .idx2s = icst525_idx2s, -}; - -static const struct icst_params cclk_params = { - .ref = 24000000, - .vco_max = ICST525_VCO_MAX_5V, - .vco_min = ICST525_VCO_MIN, - .vd_min = 12, - .vd_max = 160, - .rd_min = 24, - .rd_max = 24, - .s2div = icst525_s2div, - .idx2s = icst525_idx2s, -}; - -/* - * Validate the speed policy. - */ -static int integrator_verify_policy(struct cpufreq_policy *policy) -{ - struct icst_vco vco; - - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - - vco = icst_hz_to_vco(&cclk_params, policy->max * 1000); - policy->max = icst_hz(&cclk_params, vco) / 1000; - - vco = icst_hz_to_vco(&cclk_params, policy->min * 1000); - policy->min = icst_hz(&cclk_params, vco) / 1000; - - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - - return 0; -} - - -static int integrator_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - cpumask_t cpus_allowed; - int cpu = policy->cpu; - struct icst_vco vco; - struct cpufreq_freqs freqs; - u_int cm_osc; - - /* - * Save this threads cpus_allowed mask. - */ - cpus_allowed = current->cpus_allowed; - - /* - * Bind to the specified CPU. When this call returns, - * we should be running on the right CPU. - */ - set_cpus_allowed(current, cpumask_of_cpu(cpu)); - BUG_ON(cpu != smp_processor_id()); - - /* get current setting */ - cm_osc = __raw_readl(CM_OSC); - - if (machine_is_integrator()) { - vco.s = (cm_osc >> 8) & 7; - } else if (machine_is_cintegrator()) { - vco.s = 1; - } - vco.v = cm_osc & 255; - vco.r = 22; - freqs.old = icst_hz(&cclk_params, vco) / 1000; - - /* icst_hz_to_vco rounds down -- so we need the next - * larger freq in case of CPUFREQ_RELATION_L. - */ - if (relation == CPUFREQ_RELATION_L) - target_freq += 999; - if (target_freq > policy->max) - target_freq = policy->max; - vco = icst_hz_to_vco(&cclk_params, target_freq * 1000); - freqs.new = icst_hz(&cclk_params, vco) / 1000; - - freqs.cpu = policy->cpu; - - if (freqs.old == freqs.new) { - set_cpus_allowed(current, cpus_allowed); - return 0; - } - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - cm_osc = __raw_readl(CM_OSC); - - if (machine_is_integrator()) { - cm_osc &= 0xfffff800; - cm_osc |= vco.s << 8; - } else if (machine_is_cintegrator()) { - cm_osc &= 0xffffff00; - } - cm_osc |= vco.v; - - __raw_writel(0xa05f, CM_LOCK); - __raw_writel(cm_osc, CM_OSC); - __raw_writel(0, CM_LOCK); - - /* - * Restore the CPUs allowed mask. - */ - set_cpus_allowed(current, cpus_allowed); - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - return 0; -} - -static unsigned int integrator_get(unsigned int cpu) -{ - cpumask_t cpus_allowed; - unsigned int current_freq; - u_int cm_osc; - struct icst_vco vco; - - cpus_allowed = current->cpus_allowed; - - set_cpus_allowed(current, cpumask_of_cpu(cpu)); - BUG_ON(cpu != smp_processor_id()); - - /* detect memory etc. */ - cm_osc = __raw_readl(CM_OSC); - - if (machine_is_integrator()) { - vco.s = (cm_osc >> 8) & 7; - } else { - vco.s = 1; - } - vco.v = cm_osc & 255; - vco.r = 22; - - current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */ - - set_cpus_allowed(current, cpus_allowed); - - return current_freq; -} - -static int integrator_cpufreq_init(struct cpufreq_policy *policy) -{ - - /* set default policy and cpuinfo */ - policy->cpuinfo.max_freq = 160000; - policy->cpuinfo.min_freq = 12000; - policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */ - policy->cur = policy->min = policy->max = integrator_get(policy->cpu); - - return 0; -} - -static struct cpufreq_driver integrator_driver = { - .verify = integrator_verify_policy, - .target = integrator_set_target, - .get = integrator_get, - .init = integrator_cpufreq_init, - .name = "integrator", -}; - -static int __init integrator_cpu_init(void) -{ - return cpufreq_register_driver(&integrator_driver); -} - -static void __exit integrator_cpu_exit(void) -{ - cpufreq_unregister_driver(&integrator_driver); -} - -MODULE_AUTHOR ("Russell M. King"); -MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs"); -MODULE_LICENSE ("GPL"); - -module_init(integrator_cpu_init); -module_exit(integrator_cpu_exit); diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 1dbeb7c..6600cff 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -29,6 +29,7 @@ #include <linux/io.h> #include <linux/export.h> #include <linux/gpio.h> +#include <linux/cpu.h> #include <mach/udc.h> #include <mach/hardware.h> @@ -239,7 +240,7 @@ void __init ixp4xx_init_irq(void) * ixp4xx does not implement the XScale PWRMODE register * so it must not call cpu_do_idle(). */ - disable_hlt(); + cpu_idle_poll_ctrl(true); /* Route all sources to IRQ instead of FIQ */ *IXP4XX_ICLR = 0x0; diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c index d42730a..d599e35 100644 --- a/arch/arm/mach-ixp4xx/vulcan-setup.c +++ b/arch/arm/mach-ixp4xx/vulcan-setup.c @@ -163,6 +163,7 @@ static struct platform_device vulcan_max6369 = { static struct w1_gpio_platform_data vulcan_w1_gpio_pdata = { .pin = 14, + .ext_pullup_enable_pin = -EINVAL, }; static struct platform_device vulcan_w1_gpio = { diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c index 2e73e9d..d367aa6 100644 --- a/arch/arm/mach-kirkwood/board-dt.c +++ b/arch/arm/mach-kirkwood/board-dt.c @@ -41,16 +41,12 @@ static void __init kirkwood_legacy_clk_init(void) struct device_node *np = of_find_compatible_node( NULL, NULL, "marvell,kirkwood-gating-clock"); - struct of_phandle_args clkspec; + struct clk *clk; clkspec.np = np; clkspec.args_count = 1; - clkspec.args[0] = CGC_BIT_GE0; - orion_clkdev_add(NULL, "mv643xx_eth_port.0", - of_clk_get_from_provider(&clkspec)); - clkspec.args[0] = CGC_BIT_PEX0; orion_clkdev_add("0", "pcie", of_clk_get_from_provider(&clkspec)); @@ -59,9 +55,24 @@ static void __init kirkwood_legacy_clk_init(void) orion_clkdev_add("1", "pcie", of_clk_get_from_provider(&clkspec)); - clkspec.args[0] = CGC_BIT_GE1; - orion_clkdev_add(NULL, "mv643xx_eth_port.1", + clkspec.args[0] = CGC_BIT_SDIO; + orion_clkdev_add(NULL, "mvsdio", of_clk_get_from_provider(&clkspec)); + + /* + * The ethernet interfaces forget the MAC address assigned by + * u-boot if the clocks are turned off. Until proper DT support + * is available we always enable them for now. + */ + clkspec.args[0] = CGC_BIT_GE0; + clk = of_clk_get_from_provider(&clkspec); + orion_clkdev_add(NULL, "mv643xx_eth_port.0", clk); + clk_prepare_enable(clk); + + clkspec.args[0] = CGC_BIT_GE1; + clk = of_clk_get_from_provider(&clkspec); + orion_clkdev_add(NULL, "mv643xx_eth_port.1", clk); + clk_prepare_enable(clk); } static void __init kirkwood_of_clk_init(void) diff --git a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c index f655b26..e5f7041 100644 --- a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c +++ b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c @@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = { .duplex = DUPLEX_FULL, }; +static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(11), +}; + void __init iomega_ix2_200_init(void) { /* * Basic setup. Needs to be called early. */ - kirkwood_ge01_init(&iomega_ix2_200_ge00_data); + kirkwood_ge00_init(&iomega_ix2_200_ge00_data); + kirkwood_ge01_init(&iomega_ix2_200_ge01_data); } diff --git a/arch/arm/mach-kirkwood/guruplug-setup.c b/arch/arm/mach-kirkwood/guruplug-setup.c index 1c6e736..08dd739 100644 --- a/arch/arm/mach-kirkwood/guruplug-setup.c +++ b/arch/arm/mach-kirkwood/guruplug-setup.c @@ -53,6 +53,8 @@ static struct mv_sata_platform_data guruplug_sata_data = { static struct mvsdio_platform_data guruplug_mvsdio_data = { /* unfortunately the CD signal has not been connected */ + .gpio_card_detect = -1, + .gpio_write_protect = -1, }; static struct gpio_led guruplug_led_pins[] = { diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c index 8ddd69f..6a6eb54 100644 --- a/arch/arm/mach-kirkwood/openrd-setup.c +++ b/arch/arm/mach-kirkwood/openrd-setup.c @@ -55,6 +55,7 @@ static struct mv_sata_platform_data openrd_sata_data = { static struct mvsdio_platform_data openrd_mvsdio_data = { .gpio_card_detect = 29, /* MPP29 used as SD card detect */ + .gpio_write_protect = -1, }; static unsigned int openrd_mpp_config[] __initdata = { diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c index c7d93b4..d242231 100644 --- a/arch/arm/mach-kirkwood/rd88f6281-setup.c +++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c @@ -69,6 +69,7 @@ static struct mv_sata_platform_data rd88f6281_sata_data = { static struct mvsdio_platform_data rd88f6281_mvsdio_data = { .gpio_card_detect = 28, + .gpio_write_protect = -1, }; static unsigned int rd88f6281_mpp_config[] __initdata = { diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c index 9f64d56..76901f4 100644 --- a/arch/arm/mach-mmp/aspenite.c +++ b/arch/arm/mach-mmp/aspenite.c @@ -223,13 +223,7 @@ static struct pxa27x_keypad_platform_data aspenite_keypad_info __initdata = { }; #if defined(CONFIG_USB_EHCI_MV) -static char *pxa168_sph_clock_name[] = { - [0] = "PXA168-USBCLK", -}; - static struct mv_usb_platform_data pxa168_sph_pdata = { - .clknum = 1, - .clkname = pxa168_sph_clock_name, .mode = MV_USB_MODE_HOST, .phy_init = pxa_usb_phy_init, .phy_deinit = pxa_usb_phy_deinit, diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c index d1e2d59..f62b68d 100644 --- a/arch/arm/mach-mmp/gplugd.c +++ b/arch/arm/mach-mmp/gplugd.c @@ -9,6 +9,7 @@ */ #include <linux/init.h> +#include <linux/platform_device.h> #include <linux/gpio.h> #include <asm/mach/arch.h> diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c index 22a9058..6528a5f 100644 --- a/arch/arm/mach-mmp/ttc_dkb.c +++ b/arch/arm/mach-mmp/ttc_dkb.c @@ -162,13 +162,7 @@ static struct i2c_board_info ttc_dkb_i2c_info[] = { #ifdef CONFIG_USB_SUPPORT #if defined(CONFIG_USB_MV_UDC) || defined(CONFIG_USB_EHCI_MV_U2O) -static char *pxa910_usb_clock_name[] = { - [0] = "U2OCLK", -}; - static struct mv_usb_platform_data ttc_usb_pdata = { - .clknum = 1, - .clkname = pxa910_usb_clock_name, .vbus = NULL, .mode = MV_USB_MODE_OTG, .otg_force_a_bus_req = 1, diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 2969027..f9fd77e 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -62,7 +62,10 @@ static int msm_timer_set_next_event(unsigned long cycles, { u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); - writel_relaxed(0, event_base + TIMER_CLEAR); + ctrl &= ~TIMER_ENABLE_EN; + writel_relaxed(ctrl, event_base + TIMER_ENABLE); + + writel_relaxed(ctrl, event_base + TIMER_CLEAR); writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); return 0; diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c index 274ff58..830139a 100644 --- a/arch/arm/mach-mvebu/irq-armada-370-xp.c +++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c @@ -44,6 +44,8 @@ #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) +#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5) + #define ACTIVE_DOORBELLS (8) static DEFINE_RAW_SPINLOCK(irq_controller_lock); @@ -55,40 +57,30 @@ static struct irq_domain *armada_370_xp_mpic_domain; /* * In SMP mode: * For shared global interrupts, mask/unmask global enable bit - * For CPU interrtups, mask/unmask the calling CPU's bit + * For CPU interrupts, mask/unmask the calling CPU's bit */ static void armada_370_xp_irq_mask(struct irq_data *d) { -#ifdef CONFIG_SMP irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) + if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) writel(hwirq, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); else writel(hwirq, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); -#else - writel(irqd_to_hwirq(d), - per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); -#endif } static void armada_370_xp_irq_unmask(struct irq_data *d) { -#ifdef CONFIG_SMP irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) + if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) writel(hwirq, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); else writel(hwirq, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); -#else - writel(irqd_to_hwirq(d), - per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); -#endif } #ifdef CONFIG_SMP @@ -144,10 +136,14 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { armada_370_xp_irq_mask(irq_get_irq_data(virq)); - writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); + if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) + writel(hw, per_cpu_int_base + + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + else + writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); irq_set_status_flags(virq, IRQ_LEVEL); - if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) { + if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) { irq_set_percpu_devid(virq); irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, handle_percpu_devid_irq); diff --git a/arch/arm/mach-mxs/icoll.c b/arch/arm/mach-mxs/icoll.c index 8fb23af..e26eeba 100644 --- a/arch/arm/mach-mxs/icoll.c +++ b/arch/arm/mach-mxs/icoll.c @@ -100,7 +100,7 @@ static struct irq_domain_ops icoll_irq_domain_ops = { .xlate = irq_domain_xlate_onecell, }; -void __init icoll_of_init(struct device_node *np, +static void __init icoll_of_init(struct device_node *np, struct device_node *interrupt_parent) { /* diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c index 0521867..e7b781d 100644 --- a/arch/arm/mach-mxs/mach-mxs.c +++ b/arch/arm/mach-mxs/mach-mxs.c @@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = { .lower_margin = 4, .hsync_len = 1, .vsync_len = 1, - .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT | - FB_SYNC_DOTCLK_FAILING_ACT, }, }; @@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = { .lower_margin = 10, .hsync_len = 10, .vsync_len = 10, - .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT | - FB_SYNC_DOTCLK_FAILING_ACT, }, }; @@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = { .lower_margin = 45, .hsync_len = 1, .vsync_len = 1, - .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT, }, }; @@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = { .lower_margin = 13, .hsync_len = 48, .vsync_len = 3, - .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | - FB_SYNC_DATA_ENABLE_HIGH_ACT | - FB_SYNC_DOTCLK_FAILING_ACT, + .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, }; @@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = { .lower_margin = 0x15, .hsync_len = 64, .vsync_len = 4, - .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | - FB_SYNC_DATA_ENABLE_HIGH_ACT | - FB_SYNC_DOTCLK_FAILING_ACT, + .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, }; @@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = { .lower_margin = 2, .hsync_len = 15, .vsync_len = 15, - .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT }, }; @@ -259,6 +249,8 @@ static void __init imx23_evk_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes); mxsfb_pdata.default_bpp = 32; mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | + MXSFB_SYNC_DOTCLK_FAILING_ACT; } static inline void enable_clk_enet_out(void) @@ -278,6 +270,8 @@ static void __init imx28_evk_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes); mxsfb_pdata.default_bpp = 32; mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | + MXSFB_SYNC_DOTCLK_FAILING_ACT; mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); } @@ -297,6 +291,7 @@ static void __init m28evk_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes); mxsfb_pdata.default_bpp = 16; mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT; } static void __init sc_sps1_init(void) @@ -322,6 +317,8 @@ static void __init apx4devkit_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes); mxsfb_pdata.default_bpp = 32; mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | + MXSFB_SYNC_DOTCLK_FAILING_ACT; } #define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0) @@ -402,17 +399,18 @@ static void __init cfa10049_init(void) { enable_clk_enet_out(); update_fec_mac_prop(OUI_CRYSTALFONTZ); + + mxsfb_pdata.mode_list = cfa10049_video_modes; + mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes); + mxsfb_pdata.default_bpp = 32; + mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT; } static void __init cfa10037_init(void) { enable_clk_enet_out(); update_fec_mac_prop(OUI_CRYSTALFONTZ); - - mxsfb_pdata.mode_list = cfa10049_video_modes; - mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes); - mxsfb_pdata.default_bpp = 32; - mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; } static void __init apf28_init(void) @@ -423,6 +421,8 @@ static void __init apf28_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes); mxsfb_pdata.default_bpp = 16; mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | + MXSFB_SYNC_DOTCLK_FAILING_ACT; } static void __init mxs_machine_init(void) diff --git a/arch/arm/mach-mxs/mm.c b/arch/arm/mach-mxs/mm.c index a4294aa..e63b7d8 100644 --- a/arch/arm/mach-mxs/mm.c +++ b/arch/arm/mach-mxs/mm.c @@ -18,6 +18,7 @@ #include <mach/mx23.h> #include <mach/mx28.h> +#include <mach/common.h> /* * Define the MX23 memory map. diff --git a/arch/arm/mach-mxs/ocotp.c b/arch/arm/mach-mxs/ocotp.c index 54add60..1dff467 100644 --- a/arch/arm/mach-mxs/ocotp.c +++ b/arch/arm/mach-mxs/ocotp.c @@ -19,6 +19,7 @@ #include <asm/processor.h> /* for cpu_relax() */ #include <mach/mxs.h> +#include <mach/common.h> #define OCOTP_WORD_OFFSET 0x20 #define OCOTP_WORD_COUNT 0x20 diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c index 27c2cb7..1504b68 100644 --- a/arch/arm/mach-netx/generic.c +++ b/arch/arm/mach-netx/generic.c @@ -168,7 +168,7 @@ void __init netx_init_irq(void) { int irq; - vic_init(io_p2v(NETX_PA_VIC), 0, ~0, 0); + vic_init(io_p2v(NETX_PA_VIC), NETX_IRQ_VIC_START, ~0, 0); for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) { irq_set_chip_and_handler(irq, &netx_hif_chip, diff --git a/arch/arm/mach-netx/include/mach/irqs.h b/arch/arm/mach-netx/include/mach/irqs.h index 6ce914d..8f74a84 100644 --- a/arch/arm/mach-netx/include/mach/irqs.h +++ b/arch/arm/mach-netx/include/mach/irqs.h @@ -17,42 +17,42 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#define NETX_IRQ_VIC_START 0 -#define NETX_IRQ_SOFTINT 0 -#define NETX_IRQ_TIMER0 1 -#define NETX_IRQ_TIMER1 2 -#define NETX_IRQ_TIMER2 3 -#define NETX_IRQ_SYSTIME_NS 4 -#define NETX_IRQ_SYSTIME_S 5 -#define NETX_IRQ_GPIO_15 6 -#define NETX_IRQ_WATCHDOG 7 -#define NETX_IRQ_UART0 8 -#define NETX_IRQ_UART1 9 -#define NETX_IRQ_UART2 10 -#define NETX_IRQ_USB 11 -#define NETX_IRQ_SPI 12 -#define NETX_IRQ_I2C 13 -#define NETX_IRQ_LCD 14 -#define NETX_IRQ_HIF 15 -#define NETX_IRQ_GPIO_0_14 16 -#define NETX_IRQ_XPEC0 17 -#define NETX_IRQ_XPEC1 18 -#define NETX_IRQ_XPEC2 19 -#define NETX_IRQ_XPEC3 20 -#define NETX_IRQ_XPEC(no) (17 + (no)) -#define NETX_IRQ_MSYNC0 21 -#define NETX_IRQ_MSYNC1 22 -#define NETX_IRQ_MSYNC2 23 -#define NETX_IRQ_MSYNC3 24 -#define NETX_IRQ_IRQ_PHY 25 -#define NETX_IRQ_ISO_AREA 26 +#define NETX_IRQ_VIC_START 64 +#define NETX_IRQ_SOFTINT (NETX_IRQ_VIC_START + 0) +#define NETX_IRQ_TIMER0 (NETX_IRQ_VIC_START + 1) +#define NETX_IRQ_TIMER1 (NETX_IRQ_VIC_START + 2) +#define NETX_IRQ_TIMER2 (NETX_IRQ_VIC_START + 3) +#define NETX_IRQ_SYSTIME_NS (NETX_IRQ_VIC_START + 4) +#define NETX_IRQ_SYSTIME_S (NETX_IRQ_VIC_START + 5) +#define NETX_IRQ_GPIO_15 (NETX_IRQ_VIC_START + 6) +#define NETX_IRQ_WATCHDOG (NETX_IRQ_VIC_START + 7) +#define NETX_IRQ_UART0 (NETX_IRQ_VIC_START + 8) +#define NETX_IRQ_UART1 (NETX_IRQ_VIC_START + 9) +#define NETX_IRQ_UART2 (NETX_IRQ_VIC_START + 10) +#define NETX_IRQ_USB (NETX_IRQ_VIC_START + 11) +#define NETX_IRQ_SPI (NETX_IRQ_VIC_START + 12) +#define NETX_IRQ_I2C (NETX_IRQ_VIC_START + 13) +#define NETX_IRQ_LCD (NETX_IRQ_VIC_START + 14) +#define NETX_IRQ_HIF (NETX_IRQ_VIC_START + 15) +#define NETX_IRQ_GPIO_0_14 (NETX_IRQ_VIC_START + 16) +#define NETX_IRQ_XPEC0 (NETX_IRQ_VIC_START + 17) +#define NETX_IRQ_XPEC1 (NETX_IRQ_VIC_START + 18) +#define NETX_IRQ_XPEC2 (NETX_IRQ_VIC_START + 19) +#define NETX_IRQ_XPEC3 (NETX_IRQ_VIC_START + 20) +#define NETX_IRQ_XPEC(no) (NETX_IRQ_VIC_START + 17 + (no)) +#define NETX_IRQ_MSYNC0 (NETX_IRQ_VIC_START + 21) +#define NETX_IRQ_MSYNC1 (NETX_IRQ_VIC_START + 22) +#define NETX_IRQ_MSYNC2 (NETX_IRQ_VIC_START + 23) +#define NETX_IRQ_MSYNC3 (NETX_IRQ_VIC_START + 24) +#define NETX_IRQ_IRQ_PHY (NETX_IRQ_VIC_START + 25) +#define NETX_IRQ_ISO_AREA (NETX_IRQ_VIC_START + 26) /* int 27 is reserved */ /* int 28 is reserved */ -#define NETX_IRQ_TIMER3 29 -#define NETX_IRQ_TIMER4 30 +#define NETX_IRQ_TIMER3 (NETX_IRQ_VIC_START + 29) +#define NETX_IRQ_TIMER4 (NETX_IRQ_VIC_START + 30) /* int 31 is reserved */ -#define NETX_IRQS 32 +#define NETX_IRQS (NETX_IRQ_VIC_START + 32) /* for multiplexed irqs on gpio 0..14 */ #define NETX_IRQ_GPIO(x) (NETX_IRQS + (x)) diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c index cb7c6ae..6c4f766 100644 --- a/arch/arm/mach-omap1/clock_data.c +++ b/arch/arm/mach-omap1/clock_data.c @@ -543,15 +543,6 @@ static struct clk usb_dc_ck = { /* Direct from ULPD, no parent */ .rate = 48000000, .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), - .enable_bit = USB_REQ_EN_SHIFT, -}; - -static struct clk usb_dc_ck7xx = { - .name = "usb_dc_ck", - .ops = &clkops_generic, - /* Direct from ULPD, no parent */ - .rate = 48000000, - .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), .enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT, }; @@ -727,8 +718,7 @@ static struct omap_clk omap_clks[] = { CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310), CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310), CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX), - CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX), - CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX), + CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX | CK_7XX), CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310), CLK(NULL, "mclk", &mclk_16xx, CK_16XX), CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310), diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h index fb18831..14f7e99 100644 --- a/arch/arm/mach-omap1/common.h +++ b/arch/arm/mach-omap1/common.h @@ -31,6 +31,8 @@ #include <plat/i2c.h> +#include <mach/irqs.h> + #if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850) void omap7xx_map_io(void); #else diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c index 7a7690a..db37f49 100644 --- a/arch/arm/mach-omap1/pm.c +++ b/arch/arm/mach-omap1/pm.c @@ -43,6 +43,7 @@ #include <linux/module.h> #include <linux/io.h> #include <linux/atomic.h> +#include <linux/cpu.h> #include <asm/fncpy.h> #include <asm/system_misc.h> @@ -584,8 +585,7 @@ static void omap_pm_init_proc(void) static int omap_pm_prepare(void) { /* We cannot sleep in idle until we have resumed */ - disable_hlt(); - + cpu_idle_poll_ctrl(true); return 0; } @@ -621,7 +621,7 @@ static int omap_pm_enter(suspend_state_t state) static void omap_pm_finish(void) { - enable_hlt(); + cpu_idle_poll_ctrl(false); } diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 49ac3df..8111cd9 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -311,9 +311,6 @@ config MACH_OMAP_ZOOM2 default y select OMAP_PACKAGE_CBB select REGULATOR_FIXED_VOLTAGE if REGULATOR - select SERIAL_8250 - select SERIAL_8250_CONSOLE - select SERIAL_CORE_CONSOLE config MACH_OMAP_ZOOM3 bool "OMAP3630 Zoom3 board" @@ -321,9 +318,6 @@ config MACH_OMAP_ZOOM3 default y select OMAP_PACKAGE_CBP select REGULATOR_FIXED_VOLTAGE if REGULATOR - select SERIAL_8250 - select SERIAL_8250_CONSOLE - select SERIAL_CORE_CONSOLE config MACH_CM_T35 bool "CompuLab CM-T35/CM-T3730 modules" diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 0274ff7..e54a480 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -102,6 +102,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)") .init_irq = omap_intc_of_init, .handle_irq = omap3_intc_handle_irq, .init_machine = omap_generic_init, + .init_late = omap3_init_late, .init_time = omap3_sync32k_timer_init, .dt_compat = omap3_boards_compat, .restart = omap3xxx_restart, @@ -119,6 +120,7 @@ DT_MACHINE_START(OMAP3_GP_DT, "Generic OMAP3-GP (Flattened Device Tree)") .init_irq = omap_intc_of_init, .handle_irq = omap3_intc_handle_irq, .init_machine = omap_generic_init, + .init_late = omap3_init_late, .init_time = omap3_secure_sync32k_timer_init, .dt_compat = omap3_gp_boards_compat, .restart = omap3xxx_restart, diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c index f7c4616..d2ea68e 100644 --- a/arch/arm/mach-omap2/board-rx51.c +++ b/arch/arm/mach-omap2/board-rx51.c @@ -17,6 +17,7 @@ #include <linux/io.h> #include <linux/gpio.h> #include <linux/leds.h> +#include <linux/usb/phy.h> #include <linux/usb/musb.h> #include <linux/platform_data/spi-omap2-mcspi.h> @@ -98,6 +99,7 @@ static void __init rx51_init(void) sdrc_params = nokia_get_sdram_timings(); omap_sdrc_init(sdrc_params, sdrc_params); + usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb"); usb_musb_init(&musb_board_data); rx51_peripherals_init(); diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c index 3d58f33..0c6834a 100644 --- a/arch/arm/mach-omap2/cclock44xx_data.c +++ b/arch/arm/mach-omap2/cclock44xx_data.c @@ -52,6 +52,13 @@ */ #define OMAP4_DPLL_ABE_DEFFREQ 98304000 +/* + * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section + * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred + * locked frequency for the USB DPLL is 960MHz. + */ +#define OMAP4_DPLL_USB_DEFFREQ 960000000 + /* Root clocks */ DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0); @@ -1011,6 +1018,10 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel, OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK, hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops); +DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0, + OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, + OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL); + DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0, OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); @@ -1538,6 +1549,7 @@ static struct omap_clk omap44xx_clks[] = { CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X), CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X), CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X), + CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X), CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X), CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X), CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X), @@ -1705,5 +1717,13 @@ int __init omap4xxx_clk_init(void) if (rc) pr_err("%s: failed to configure ABE DPLL!\n", __func__); + /* + * Lock USB DPLL on OMAP4 devices so that the L3INIT power + * domain can transition to retention state when not in use. + */ + rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ); + if (rc) + pr_err("%s: failed to configure USB DPLL!\n", __func__); + return 0; } diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 0a6b9c7..14522d0 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -108,7 +108,6 @@ void omap35xx_init_late(void); void omap3630_init_late(void); void am35xx_init_late(void); void ti81xx_init_late(void); -void omap4430_init_late(void); int omap2_common_pm_late_init(void); #if defined(CONFIG_SOC_OMAP2420) || defined(CONFIG_SOC_OMAP2430) @@ -250,7 +249,6 @@ extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); extern int omap4_finish_suspend(unsigned long cpu_state); extern void omap4_cpu_resume(void); extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); -extern u32 omap4_mpuss_read_prev_context_state(void); #else static inline int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) @@ -278,10 +276,6 @@ static inline int omap4_finish_suspend(unsigned long cpu_state) static inline void omap4_cpu_resume(void) {} -static inline u32 omap4_mpuss_read_prev_context_state(void) -{ - return 0; -} #endif struct omap_sdrc_params; @@ -294,5 +288,8 @@ extern void omap_reserve(void); struct omap_hwmod; extern int omap_dss_reset(struct omap_hwmod *); +/* SoC specific clock initializer */ +extern int (*omap_clk_init)(void); + #endif /* __ASSEMBLER__ */ #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */ diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index 80392fc..cca045c 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c @@ -26,6 +26,7 @@ #include <linux/cpuidle.h> #include <linux/export.h> #include <linux/cpu_pm.h> +#include <asm/cpuidle.h> #include "powerdomain.h" #include "clockdomain.h" @@ -99,11 +100,15 @@ static struct omap3_idle_statedata omap3_idle_data[] = { }, }; -/* Private functions */ - -static int __omap3_enter_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) +/** + * omap3_enter_idle - Programs OMAP3 to enter the specified state + * @dev: cpuidle device + * @drv: cpuidle driver + * @index: the index of state to be entered + */ +static int omap3_enter_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) { struct omap3_idle_statedata *cx = &omap3_idle_data[index]; @@ -149,22 +154,6 @@ return_sleep_time: } /** - * omap3_enter_idle - Programs OMAP3 to enter the specified state - * @dev: cpuidle device - * @drv: cpuidle driver - * @index: the index of state to be entered - * - * Called from the CPUidle framework to program the device to the - * specified target state selected by the governor. - */ -static inline int omap3_enter_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) -{ - return cpuidle_wrap_enter(dev, drv, index, __omap3_enter_idle); -} - -/** * next_valid_state - Find next valid C-state * @dev: cpuidle device * @drv: cpuidle driver @@ -271,11 +260,9 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, return ret; } -static DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev); - static struct cpuidle_driver omap3_idle_driver = { - .name = "omap3_idle", - .owner = THIS_MODULE, + .name = "omap3_idle", + .owner = THIS_MODULE, .states = { { .enter = omap3_enter_idle_bm, @@ -348,8 +335,6 @@ static struct cpuidle_driver omap3_idle_driver = { */ int __init omap3_idle_init(void) { - struct cpuidle_device *dev; - mpu_pd = pwrdm_lookup("mpu_pwrdm"); core_pd = pwrdm_lookup("core_pwrdm"); per_pd = pwrdm_lookup("per_pwrdm"); @@ -358,16 +343,5 @@ int __init omap3_idle_init(void) if (!mpu_pd || !core_pd || !per_pd || !cam_pd) return -ENODEV; - cpuidle_register_driver(&omap3_idle_driver); - - dev = &per_cpu(omap3_idle_dev, smp_processor_id()); - dev->cpu = 0; - - if (cpuidle_register_device(dev)) { - printk(KERN_ERR "%s: CPUidle register device failed\n", - __func__); - return -EIO; - } - - return 0; + return cpuidle_register(&omap3_idle_driver, NULL); } diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index d639aef..5a286b5 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -1,7 +1,7 @@ /* - * OMAP4 CPU idle Routines + * OMAP4+ CPU idle Routines * - * Copyright (C) 2011 Texas Instruments, Inc. + * Copyright (C) 2011-2013 Texas Instruments, Inc. * Santosh Shilimkar <santosh.shilimkar@ti.com> * Rajendra Nayak <rnayak@ti.com> * @@ -14,8 +14,8 @@ #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include <linux/export.h> -#include <linux/clockchips.h> +#include <asm/cpuidle.h> #include <asm/proc-fns.h> #include "common.h" @@ -24,13 +24,13 @@ #include "clockdomain.h" /* Machine specific information */ -struct omap4_idle_statedata { +struct idle_statedata { u32 cpu_state; u32 mpu_logic_state; u32 mpu_state; }; -static struct omap4_idle_statedata omap4_idle_data[] = { +static struct idle_statedata omap4_idle_data[] = { { .cpu_state = PWRDM_POWER_ON, .mpu_state = PWRDM_POWER_ON, @@ -53,11 +53,12 @@ static struct clockdomain *cpu_clkdm[NR_CPUS]; static atomic_t abort_barrier; static bool cpu_done[NR_CPUS]; +static struct idle_statedata *state_ptr = &omap4_idle_data[0]; /* Private functions */ /** - * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions + * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions * @dev: cpuidle device * @drv: cpuidle driver * @index: the index of state to be entered @@ -66,7 +67,7 @@ static bool cpu_done[NR_CPUS]; * specified low power state selected by the governor. * Returns the amount of time spent in the low power state. */ -static int omap4_enter_idle_simple(struct cpuidle_device *dev, +static int omap_enter_idle_simple(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { @@ -77,12 +78,11 @@ static int omap4_enter_idle_simple(struct cpuidle_device *dev, return index; } -static int omap4_enter_idle_coupled(struct cpuidle_device *dev, +static int omap_enter_idle_coupled(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - struct omap4_idle_statedata *cx = &omap4_idle_data[index]; - int cpu_id = smp_processor_id(); + struct idle_statedata *cx = state_ptr + index; local_fiq_disable(); @@ -109,8 +109,6 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev, } } - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); - /* * Call idle CPU PM enter notifier chain so that * VFP and per CPU interrupt context is saved. @@ -149,11 +147,10 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev, * Call idle CPU cluster PM exit notifier chain * to restore GIC and wakeupgen context. */ - if (omap4_mpuss_read_prev_context_state()) + if ((cx->mpu_state == PWRDM_POWER_RET) && + (cx->mpu_logic_state == PWRDM_POWER_OFF)) cpu_cluster_pm_exit(); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); - fail: cpuidle_coupled_parallel_barrier(dev, &abort_barrier); cpu_done[dev->cpu] = false; @@ -163,49 +160,38 @@ fail: return index; } -/* - * For each cpu, setup the broadcast timer because local timers - * stops for the states above C1. - */ -static void omap_setup_broadcast_timer(void *arg) -{ - int cpu = smp_processor_id(); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu); -} - -static DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev); - static struct cpuidle_driver omap4_idle_driver = { .name = "omap4_idle", .owner = THIS_MODULE, - .en_core_tk_irqen = 1, .states = { { /* C1 - CPU0 ON + CPU1 ON + MPU ON */ .exit_latency = 2 + 2, .target_residency = 5, .flags = CPUIDLE_FLAG_TIME_VALID, - .enter = omap4_enter_idle_simple, + .enter = omap_enter_idle_simple, .name = "C1", - .desc = "MPUSS ON" + .desc = "CPUx ON, MPUSS ON" }, { /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ .exit_latency = 328 + 440, .target_residency = 960, - .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, - .enter = omap4_enter_idle_coupled, + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED | + CPUIDLE_FLAG_TIMER_STOP, + .enter = omap_enter_idle_coupled, .name = "C2", - .desc = "MPUSS CSWR", + .desc = "CPUx OFF, MPUSS CSWR", }, { /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ .exit_latency = 460 + 518, .target_residency = 1100, - .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, - .enter = omap4_enter_idle_coupled, + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED | + CPUIDLE_FLAG_TIMER_STOP, + .enter = omap_enter_idle_coupled, .name = "C3", - .desc = "MPUSS OSWR", + .desc = "CPUx OFF, MPUSS OSWR", }, }, .state_count = ARRAY_SIZE(omap4_idle_data), @@ -215,16 +201,13 @@ static struct cpuidle_driver omap4_idle_driver = { /* Public functions */ /** - * omap4_idle_init - Init routine for OMAP4 idle + * omap4_idle_init - Init routine for OMAP4+ idle * - * Registers the OMAP4 specific cpuidle driver to the cpuidle + * Registers the OMAP4+ specific cpuidle driver to the cpuidle * framework with the valid set of states. */ int __init omap4_idle_init(void) { - struct cpuidle_device *dev; - unsigned int cpu_id = 0; - mpu_pd = pwrdm_lookup("mpu_pwrdm"); cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm"); cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm"); @@ -236,22 +219,5 @@ int __init omap4_idle_init(void) if (!cpu_clkdm[0] || !cpu_clkdm[1]) return -ENODEV; - /* Configure the broadcast timer on each cpu */ - on_each_cpu(omap_setup_broadcast_timer, NULL, 1); - - for_each_cpu(cpu_id, cpu_online_mask) { - dev = &per_cpu(omap4_idle_dev, cpu_id); - dev->cpu = cpu_id; -#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED - dev->coupled_cpus = *cpu_online_mask; -#endif - cpuidle_register_driver(&omap4_idle_driver); - - if (cpuidle_register_device(dev)) { - pr_err("%s: CPUidle register failed\n", __func__); - return -EIO; - } - } - - return 0; + return cpuidle_register(&omap4_idle_driver, cpu_online_mask); } diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index e4b16c8..410e1ba 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -1122,9 +1122,6 @@ int gpmc_calc_timings(struct gpmc_timings *gpmc_t, /* TODO: remove, see function definition */ gpmc_convert_ps_to_ns(gpmc_t); - /* Now the GPMC is initialised, unreserve the chip-selects */ - gpmc_cs_map = 0; - return 0; } @@ -1383,6 +1380,9 @@ static int gpmc_probe(struct platform_device *pdev) if (IS_ERR_VALUE(gpmc_setup_irq())) dev_warn(gpmc_dev, "gpmc_setup_irq failed\n"); + /* Now the GPMC is initialised, unreserve the chip-selects */ + gpmc_cs_map = 0; + rc = gpmc_probe_dt(pdev); if (rc < 0) { clk_disable_unprepare(gpmc_l3_clk); diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 2c3fdd6..5c445ca 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -55,6 +55,12 @@ #include "prm44xx.h" /* + * omap_clk_init: points to a function that does the SoC-specific + * clock initializations + */ +int (*omap_clk_init)(void); + +/* * The machine specific code may provide the extra mapping besides the * default mapping provided here. */ @@ -397,7 +403,7 @@ void __init omap2420_init_early(void) omap242x_clockdomains_init(); omap2420_hwmod_init(); omap_hwmod_init_postsetup(); - omap2420_clk_init(); + omap_clk_init = omap2420_clk_init; } void __init omap2420_init_late(void) @@ -427,7 +433,7 @@ void __init omap2430_init_early(void) omap243x_clockdomains_init(); omap2430_hwmod_init(); omap_hwmod_init_postsetup(); - omap2430_clk_init(); + omap_clk_init = omap2430_clk_init; } void __init omap2430_init_late(void) @@ -462,7 +468,7 @@ void __init omap3_init_early(void) omap3xxx_clockdomains_init(); omap3xxx_hwmod_init(); omap_hwmod_init_postsetup(); - omap3xxx_clk_init(); + omap_clk_init = omap3xxx_clk_init; } void __init omap3430_init_early(void) @@ -500,7 +506,7 @@ void __init ti81xx_init_early(void) omap3xxx_clockdomains_init(); omap3xxx_hwmod_init(); omap_hwmod_init_postsetup(); - omap3xxx_clk_init(); + omap_clk_init = omap3xxx_clk_init; } void __init omap3_init_late(void) @@ -568,7 +574,7 @@ void __init am33xx_init_early(void) am33xx_clockdomains_init(); am33xx_hwmod_init(); omap_hwmod_init_postsetup(); - am33xx_clk_init(); + omap_clk_init = am33xx_clk_init; } #endif @@ -593,7 +599,7 @@ void __init omap4430_init_early(void) omap44xx_clockdomains_init(); omap44xx_hwmod_init(); omap_hwmod_init_postsetup(); - omap4xxx_clk_init(); + omap_clk_init = omap4xxx_clk_init; } void __init omap4430_init_late(void) diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index 6a217c9..f82cf87 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c @@ -211,8 +211,6 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition, return -EINVAL; } - pr_err("%s: Could not find signal %s\n", __func__, muxname); - return -ENODEV; } @@ -234,6 +232,8 @@ int __init omap_mux_get_by_name(const char *muxname, return mux_mode; } + pr_err("%s: Could not find signal %s\n", __func__, muxname); + return -ENODEV; } @@ -739,8 +739,9 @@ static void __init omap_mux_dbg_create_entry( list_for_each_entry(e, &partition->muxmodes, node) { struct omap_mux *m = &e->mux; - (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir, - m, &omap_mux_dbg_signal_fops); + (void)debugfs_create_file(m->muxnames[0], S_IWUSR | S_IRUGO, + mux_dbg_dir, m, + &omap_mux_dbg_signal_fops); } } diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index 8bcb64b..e80327b 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -139,20 +139,6 @@ static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id) } } -/** - * omap4_mpuss_read_prev_context_state: - * Function returns the MPUSS previous context state - */ -u32 omap4_mpuss_read_prev_context_state(void) -{ - u32 reg; - - reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, - OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); - reg &= OMAP4430_LOSTCONTEXT_DFF_MASK; - return reg; -} - /* * Store the CPU cluster state for L2X0 low power operations. */ diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index c2c798c..e512253 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -138,6 +138,7 @@ #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/bootmem.h> +#include <linux/cpu.h> #include <asm/system_misc.h> @@ -1368,7 +1369,9 @@ static void _enable_sysc(struct omap_hwmod *oh) } if (sf & SYSC_HAS_MIDLEMODE) { - if (oh->flags & HWMOD_SWSUP_MSTANDBY) { + if (oh->flags & HWMOD_FORCE_MSTANDBY) { + idlemode = HWMOD_IDLEMODE_FORCE; + } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) { idlemode = HWMOD_IDLEMODE_NO; } else { if (sf & SYSC_HAS_ENAWAKEUP) @@ -1440,7 +1443,8 @@ static void _idle_sysc(struct omap_hwmod *oh) } if (sf & SYSC_HAS_MIDLEMODE) { - if (oh->flags & HWMOD_SWSUP_MSTANDBY) { + if ((oh->flags & HWMOD_SWSUP_MSTANDBY) || + (oh->flags & HWMOD_FORCE_MSTANDBY)) { idlemode = HWMOD_IDLEMODE_FORCE; } else { if (sf & SYSC_HAS_ENAWAKEUP) @@ -2154,7 +2158,7 @@ static int _enable(struct omap_hwmod *oh) if (soc_ops.enable_module) soc_ops.enable_module(oh); if (oh->flags & HWMOD_BLOCK_WFI) - disable_hlt(); + cpu_idle_poll_ctrl(true); if (soc_ops.update_context_lost) soc_ops.update_context_lost(oh); @@ -2218,7 +2222,7 @@ static int _idle(struct omap_hwmod *oh) _del_initiator_dep(oh, mpu_oh); if (oh->flags & HWMOD_BLOCK_WFI) - enable_hlt(); + cpu_idle_poll_ctrl(false); if (soc_ops.disable_module) soc_ops.disable_module(oh); @@ -2328,7 +2332,7 @@ static int _shutdown(struct omap_hwmod *oh) _del_initiator_dep(oh, mpu_oh); /* XXX what about the other system initiators here? dma, dsp */ if (oh->flags & HWMOD_BLOCK_WFI) - enable_hlt(); + cpu_idle_poll_ctrl(false); if (soc_ops.disable_module) soc_ops.disable_module(oh); _disable_clocks(oh); diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index d43d9b6..d5dc935 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm { * * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out * of idle, rather than relying on module smart-idle - * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out - * of standby, rather than relying on module smart-standby + * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and + * out of standby, rather than relying on module smart-standby * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for * SDRAM controller, etc. XXX probably belongs outside the main hwmod file * XXX Should be HWMOD_SETUP_NO_RESET @@ -459,6 +459,10 @@ struct omap_hwmod_omap4_prcm { * correctly, or this is being abused to deal with some PM latency * issues -- but we're currently suffering from a shortage of * folks who are able to track these issues down properly. + * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device + * is kept in force-standby mode. Failing to do so causes PM problems + * with musb on OMAP3630 at least. Note that musb has a dedicated register + * to control MSTANDBY signal when MIDLEMODE is set to force-standby. */ #define HWMOD_SWSUP_SIDLE (1 << 0) #define HWMOD_SWSUP_MSTANDBY (1 << 1) @@ -471,6 +475,7 @@ struct omap_hwmod_omap4_prcm { #define HWMOD_16BIT_REG (1 << 8) #define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) #define HWMOD_BLOCK_WFI (1 << 10) +#define HWMOD_FORCE_MSTANDBY (1 << 11) /* * omap_hwmod._int_flags definitions diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index ac7e03e..5112d04 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -1707,9 +1707,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = { * Erratum ID: i479 idle_req / idle_ack mechanism potentially * broken when autoidle is enabled * workaround is to disable the autoidle bit at module level. + * + * Enabling the device in any other MIDLEMODE setting but force-idle + * causes core_pwrdm not enter idle states at least on OMAP3630. + * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY + * signal when MIDLEMODE is set to force-idle. */ .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE - | HWMOD_SWSUP_MSTANDBY, + | HWMOD_FORCE_MSTANDBY, }; /* usb_otg_hs */ diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 0e47d2e..eaba9dc 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -2719,7 +2719,17 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { .name = "ocp2scp_usb_phy", .class = &omap44xx_ocp2scp_hwmod_class, .clkdm_name = "l3_init_clkdm", - .main_clk = "func_48m_fclk", + /* + * ocp2scp_usb_phy_phy_48m is provided by the OMAP4 PRCM IP + * block as an "optional clock," and normally should never be + * specified as the main_clk for an OMAP IP block. However it + * turns out that this clock is actually the main clock for + * the ocp2scp_usb_phy IP block: + * http://lists.infradead.org/pipermail/linux-arm-kernel/2012-September/119943.html + * So listing ocp2scp_usb_phy_phy_48m as a main_clk here seems + * to be the best workaround. + */ + .main_clk = "ocp2scp_usb_phy_phy_48m", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET, diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 673a4c1..e742118 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c @@ -218,7 +218,7 @@ static int omap_pm_enter(suspend_state_t suspend_state) static int omap_pm_begin(suspend_state_t state) { - disable_hlt(); + cpu_idle_poll_ctrl(true); if (cpu_is_omap34xx()) omap_prcm_irq_prepare(); return 0; @@ -226,8 +226,7 @@ static int omap_pm_begin(suspend_state_t state) static void omap_pm_end(void) { - enable_hlt(); - return; + cpu_idle_poll_ctrl(false); } static void omap_pm_finish(void) @@ -265,6 +264,12 @@ static void __init omap4_init_voltages(void) omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva"); } +static inline void omap_init_cpufreq(void) +{ + struct platform_device_info devinfo = { .name = "omap-cpufreq", }; + platform_device_register_full(&devinfo); +} + static int __init omap2_common_pm_init(void) { if (!of_have_populated_dt()) @@ -294,6 +299,9 @@ int __init omap2_common_pm_late_init(void) /* Smartreflex device init */ omap_devinit_smartreflex(); + + /* cpufreq dummy device instantiation */ + omap_init_cpufreq(); } #ifdef CONFIG_SUSPEND diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index ea62e75..152a10c 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c @@ -126,8 +126,8 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) * omap_default_idle - OMAP4 default ilde routine.' * * Implements OMAP4 memory, IO ordering requirements which can't be addressed - * with default cpu_do_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and - * by secondary CPU with CONFIG_CPUIDLE. + * with default cpu_do_idle() hook. Used by all CPUs with !CONFIG_CPU_IDLE and + * by secondary CPU with CONFIG_CPU_IDLE. */ static void omap_default_idle(void) { diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 2bdd4cf..f62b509 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -547,6 +547,8 @@ static inline void __init realtime_counter_init(void) clksrc_nr, clksrc_src) \ void __init omap##name##_gptimer_timer_init(void) \ { \ + if (omap_clk_init) \ + omap_clk_init(); \ omap_dmtimer_init(); \ omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \ @@ -556,6 +558,8 @@ void __init omap##name##_gptimer_timer_init(void) \ clksrc_nr, clksrc_src) \ void __init omap##name##_sync32k_timer_init(void) \ { \ + if (omap_clk_init) \ + omap_clk_init(); \ omap_dmtimer_init(); \ omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ /* Enable the use of clocksource="gp_timer" kernel parameter */ \ diff --git a/arch/arm/mach-orion5x/board-dt.c b/arch/arm/mach-orion5x/board-dt.c index 35a8014..94fbb81 100644 --- a/arch/arm/mach-orion5x/board-dt.c +++ b/arch/arm/mach-orion5x/board-dt.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/of.h> #include <linux/of_platform.h> +#include <linux/cpu.h> #include <asm/system_misc.h> #include <asm/mach/arch.h> #include <mach/orion5x.h> @@ -52,7 +53,7 @@ static void __init orion5x_dt_init(void) */ if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) { printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n"); - disable_hlt(); + cpu_idle_poll_ctrl(true); } if (of_machine_is_compatible("lacie,ethernet-disk-mini-v2")) diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index d068f14..ad71c8a 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c @@ -293,7 +293,7 @@ void __init orion5x_init(void) */ if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) { printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n"); - disable_hlt(); + cpu_idle_poll_ctrl(true); } /* diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile index 12c5005..648867a 100644 --- a/arch/arm/mach-pxa/Makefile +++ b/arch/arm/mach-pxa/Makefile @@ -7,12 +7,6 @@ obj-y += clock.o devices.o generic.o irq.o \ time.o reset.o obj-$(CONFIG_PM) += pm.o sleep.o standby.o -ifeq ($(CONFIG_CPU_FREQ),y) -obj-$(CONFIG_PXA25x) += cpufreq-pxa2xx.o -obj-$(CONFIG_PXA27x) += cpufreq-pxa2xx.o -obj-$(CONFIG_PXA3xx) += cpufreq-pxa3xx.o -endif - # Generic drivers that other drivers may depend upon # SoC-specific code diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c deleted file mode 100644 index 6a7aeab..0000000 --- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c +++ /dev/null @@ -1,494 +0,0 @@ -/* - * linux/arch/arm/mach-pxa/cpufreq-pxa2xx.c - * - * Copyright (C) 2002,2003 Intrinsyc Software - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * History: - * 31-Jul-2002 : Initial version [FB] - * 29-Jan-2003 : added PXA255 support [FB] - * 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.) - * - * Note: - * This driver may change the memory bus clock rate, but will not do any - * platform specific access timing changes... for example if you have flash - * memory connected to CS0, you will need to register a platform specific - * notifier which will adjust the memory access strobes to maintain a - * minimum strobe width. - * - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/init.h> -#include <linux/cpufreq.h> -#include <linux/err.h> -#include <linux/regulator/consumer.h> -#include <linux/io.h> - -#include <mach/pxa2xx-regs.h> -#include <mach/smemc.h> - -#ifdef DEBUG -static unsigned int freq_debug; -module_param(freq_debug, uint, 0); -MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0"); -#else -#define freq_debug 0 -#endif - -static struct regulator *vcc_core; - -static unsigned int pxa27x_maxfreq; -module_param(pxa27x_maxfreq, uint, 0); -MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz" - "(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)"); - -typedef struct { - unsigned int khz; - unsigned int membus; - unsigned int cccr; - unsigned int div2; - unsigned int cclkcfg; - int vmin; - int vmax; -} pxa_freqs_t; - -/* Define the refresh period in mSec for the SDRAM and the number of rows */ -#define SDRAM_TREF 64 /* standard 64ms SDRAM */ -static unsigned int sdram_rows; - -#define CCLKCFG_TURBO 0x1 -#define CCLKCFG_FCS 0x2 -#define CCLKCFG_HALFTURBO 0x4 -#define CCLKCFG_FASTBUS 0x8 -#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2) -#define MDREFR_DRI_MASK 0xFFF - -#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3) -#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3) - -/* - * PXA255 definitions - */ -/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */ -#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS - -static pxa_freqs_t pxa255_run_freqs[] = -{ - /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */ - { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */ - {132700, 132700, 0x123, 1, CCLKCFG, -1, -1}, /* 133, 133, 66, 66 */ - {199100, 99500, 0x141, 0, CCLKCFG, -1, -1}, /* 199, 199, 99, 99 */ - {265400, 132700, 0x143, 1, CCLKCFG, -1, -1}, /* 265, 265, 133, 66 */ - {331800, 165900, 0x145, 1, CCLKCFG, -1, -1}, /* 331, 331, 166, 83 */ - {398100, 99500, 0x161, 0, CCLKCFG, -1, -1}, /* 398, 398, 196, 99 */ -}; - -/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */ -static pxa_freqs_t pxa255_turbo_freqs[] = -{ - /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */ - { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */ - {199100, 99500, 0x221, 0, CCLKCFG, -1, -1}, /* 99, 199, 50, 99 */ - {298500, 99500, 0x321, 0, CCLKCFG, -1, -1}, /* 99, 287, 50, 99 */ - {298600, 99500, 0x1c1, 0, CCLKCFG, -1, -1}, /* 199, 287, 99, 99 */ - {398100, 99500, 0x241, 0, CCLKCFG, -1, -1}, /* 199, 398, 99, 99 */ -}; - -#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs) -#define NUM_PXA25x_TURBO_FREQS ARRAY_SIZE(pxa255_turbo_freqs) - -static struct cpufreq_frequency_table - pxa255_run_freq_table[NUM_PXA25x_RUN_FREQS+1]; -static struct cpufreq_frequency_table - pxa255_turbo_freq_table[NUM_PXA25x_TURBO_FREQS+1]; - -static unsigned int pxa255_turbo_table; -module_param(pxa255_turbo_table, uint, 0); -MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)"); - -/* - * PXA270 definitions - * - * For the PXA27x: - * Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG. - * - * A = 0 => memory controller clock from table 3-7, - * A = 1 => memory controller clock = system bus clock - * Run mode frequency = 13 MHz * L - * Turbo mode frequency = 13 MHz * L * N - * System bus frequency = 13 MHz * L / (B + 1) - * - * In CCCR: - * A = 1 - * L = 16 oscillator to run mode ratio - * 2N = 6 2 * (turbo mode to run mode ratio) - * - * In CCLKCFG: - * B = 1 Fast bus mode - * HT = 0 Half-Turbo mode - * T = 1 Turbo mode - * - * For now, just support some of the combinations in table 3-7 of - * PXA27x Processor Family Developer's Manual to simplify frequency - * change sequences. - */ -#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L) -#define CCLKCFG2(B, HT, T) \ - (CCLKCFG_FCS | \ - ((B) ? CCLKCFG_FASTBUS : 0) | \ - ((HT) ? CCLKCFG_HALFTURBO : 0) | \ - ((T) ? CCLKCFG_TURBO : 0)) - -static pxa_freqs_t pxa27x_freqs[] = { - {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 }, - {156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 }, - {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 }, - {312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 }, - {416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 }, - {520000, 208000, PXA27x_CCCR(1, 16, 5), 1, CCLKCFG2(1, 0, 1), 1450000, 1705000 }, - {624000, 208000, PXA27x_CCCR(1, 16, 6), 1, CCLKCFG2(1, 0, 1), 1550000, 1705000 } -}; - -#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs) -static struct cpufreq_frequency_table - pxa27x_freq_table[NUM_PXA27x_FREQS+1]; - -extern unsigned get_clk_frequency_khz(int info); - -#ifdef CONFIG_REGULATOR - -static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq) -{ - int ret = 0; - int vmin, vmax; - - if (!cpu_is_pxa27x()) - return 0; - - vmin = pxa_freq->vmin; - vmax = pxa_freq->vmax; - if ((vmin == -1) || (vmax == -1)) - return 0; - - ret = regulator_set_voltage(vcc_core, vmin, vmax); - if (ret) - pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n", - vmin, vmax); - return ret; -} - -static __init void pxa_cpufreq_init_voltages(void) -{ - vcc_core = regulator_get(NULL, "vcc_core"); - if (IS_ERR(vcc_core)) { - pr_info("cpufreq: Didn't find vcc_core regulator\n"); - vcc_core = NULL; - } else { - pr_info("cpufreq: Found vcc_core regulator\n"); - } -} -#else -static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq) -{ - return 0; -} - -static __init void pxa_cpufreq_init_voltages(void) { } -#endif - -static void find_freq_tables(struct cpufreq_frequency_table **freq_table, - pxa_freqs_t **pxa_freqs) -{ - if (cpu_is_pxa25x()) { - if (!pxa255_turbo_table) { - *pxa_freqs = pxa255_run_freqs; - *freq_table = pxa255_run_freq_table; - } else { - *pxa_freqs = pxa255_turbo_freqs; - *freq_table = pxa255_turbo_freq_table; - } - } - if (cpu_is_pxa27x()) { - *pxa_freqs = pxa27x_freqs; - *freq_table = pxa27x_freq_table; - } -} - -static void pxa27x_guess_max_freq(void) -{ - if (!pxa27x_maxfreq) { - pxa27x_maxfreq = 416000; - printk(KERN_INFO "PXA CPU 27x max frequency not defined " - "(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n", - pxa27x_maxfreq); - } else { - pxa27x_maxfreq *= 1000; - } -} - -static void init_sdram_rows(void) -{ - uint32_t mdcnfg = __raw_readl(MDCNFG); - unsigned int drac2 = 0, drac0 = 0; - - if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3)) - drac2 = MDCNFG_DRAC2(mdcnfg); - - if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1)) - drac0 = MDCNFG_DRAC0(mdcnfg); - - sdram_rows = 1 << (11 + max(drac0, drac2)); -} - -static u32 mdrefr_dri(unsigned int freq) -{ - u32 interval = freq * SDRAM_TREF / sdram_rows; - - return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32; -} - -/* find a valid frequency point */ -static int pxa_verify_policy(struct cpufreq_policy *policy) -{ - struct cpufreq_frequency_table *pxa_freqs_table; - pxa_freqs_t *pxa_freqs; - int ret; - - find_freq_tables(&pxa_freqs_table, &pxa_freqs); - ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table); - - if (freq_debug) - pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n", - policy->min, policy->max); - - return ret; -} - -static unsigned int pxa_cpufreq_get(unsigned int cpu) -{ - return get_clk_frequency_khz(0); -} - -static int pxa_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct cpufreq_frequency_table *pxa_freqs_table; - pxa_freqs_t *pxa_freq_settings; - struct cpufreq_freqs freqs; - unsigned int idx; - unsigned long flags; - unsigned int new_freq_cpu, new_freq_mem; - unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg; - int ret = 0; - - /* Get the current policy */ - find_freq_tables(&pxa_freqs_table, &pxa_freq_settings); - - /* Lookup the next frequency */ - if (cpufreq_frequency_table_target(policy, pxa_freqs_table, - target_freq, relation, &idx)) { - return -EINVAL; - } - - new_freq_cpu = pxa_freq_settings[idx].khz; - new_freq_mem = pxa_freq_settings[idx].membus; - freqs.old = policy->cur; - freqs.new = new_freq_cpu; - freqs.cpu = policy->cpu; - - if (freq_debug) - pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", - freqs.new / 1000, (pxa_freq_settings[idx].div2) ? - (new_freq_mem / 2000) : (new_freq_mem / 1000)); - - if (vcc_core && freqs.new > freqs.old) - ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); - if (ret) - return ret; - /* - * Tell everyone what we're about to do... - * you should add a notify client with any platform specific - * Vcc changing capability - */ - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - /* Calculate the next MDREFR. If we're slowing down the SDRAM clock - * we need to preset the smaller DRI before the change. If we're - * speeding up we need to set the larger DRI value after the change. - */ - preset_mdrefr = postset_mdrefr = __raw_readl(MDREFR); - if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(new_freq_mem)) { - preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK); - preset_mdrefr |= mdrefr_dri(new_freq_mem); - } - postset_mdrefr = - (postset_mdrefr & ~MDREFR_DRI_MASK) | mdrefr_dri(new_freq_mem); - - /* If we're dividing the memory clock by two for the SDRAM clock, this - * must be set prior to the change. Clearing the divide must be done - * after the change. - */ - if (pxa_freq_settings[idx].div2) { - preset_mdrefr |= MDREFR_DB2_MASK; - postset_mdrefr |= MDREFR_DB2_MASK; - } else { - postset_mdrefr &= ~MDREFR_DB2_MASK; - } - - local_irq_save(flags); - - /* Set new the CCCR and prepare CCLKCFG */ - CCCR = pxa_freq_settings[idx].cccr; - cclkcfg = pxa_freq_settings[idx].cclkcfg; - - asm volatile(" \n\ - ldr r4, [%1] /* load MDREFR */ \n\ - b 2f \n\ - .align 5 \n\ -1: \n\ - str %3, [%1] /* preset the MDREFR */ \n\ - mcr p14, 0, %2, c6, c0, 0 /* set CCLKCFG[FCS] */ \n\ - str %4, [%1] /* postset the MDREFR */ \n\ - \n\ - b 3f \n\ -2: b 1b \n\ -3: nop \n\ - " - : "=&r" (unused) - : "r" (MDREFR), "r" (cclkcfg), - "r" (preset_mdrefr), "r" (postset_mdrefr) - : "r4", "r5"); - local_irq_restore(flags); - - /* - * Tell everyone what we've just done... - * you should add a notify client with any platform specific - * SDRAM refresh timer adjustments - */ - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - /* - * Even if voltage setting fails, we don't report it, as the frequency - * change succeeded. The voltage reduction is not a critical failure, - * only power savings will suffer from this. - * - * Note: if the voltage change fails, and a return value is returned, a - * bug is triggered (seems a deadlock). Should anybody find out where, - * the "return 0" should become a "return ret". - */ - if (vcc_core && freqs.new < freqs.old) - ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); - - return 0; -} - -static int pxa_cpufreq_init(struct cpufreq_policy *policy) -{ - int i; - unsigned int freq; - struct cpufreq_frequency_table *pxa255_freq_table; - pxa_freqs_t *pxa255_freqs; - - /* try to guess pxa27x cpu */ - if (cpu_is_pxa27x()) - pxa27x_guess_max_freq(); - - pxa_cpufreq_init_voltages(); - - init_sdram_rows(); - - /* set default policy and cpuinfo */ - policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ - policy->cur = get_clk_frequency_khz(0); /* current freq */ - policy->min = policy->max = policy->cur; - - /* Generate pxa25x the run cpufreq_frequency_table struct */ - for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) { - pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz; - pxa255_run_freq_table[i].index = i; - } - pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END; - - /* Generate pxa25x the turbo cpufreq_frequency_table struct */ - for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) { - pxa255_turbo_freq_table[i].frequency = - pxa255_turbo_freqs[i].khz; - pxa255_turbo_freq_table[i].index = i; - } - pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END; - - pxa255_turbo_table = !!pxa255_turbo_table; - - /* Generate the pxa27x cpufreq_frequency_table struct */ - for (i = 0; i < NUM_PXA27x_FREQS; i++) { - freq = pxa27x_freqs[i].khz; - if (freq > pxa27x_maxfreq) - break; - pxa27x_freq_table[i].frequency = freq; - pxa27x_freq_table[i].index = i; - } - pxa27x_freq_table[i].index = i; - pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END; - - /* - * Set the policy's minimum and maximum frequencies from the tables - * just constructed. This sets cpuinfo.mxx_freq, min and max. - */ - if (cpu_is_pxa25x()) { - find_freq_tables(&pxa255_freq_table, &pxa255_freqs); - pr_info("PXA255 cpufreq using %s frequency table\n", - pxa255_turbo_table ? "turbo" : "run"); - cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table); - } - else if (cpu_is_pxa27x()) - cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table); - - printk(KERN_INFO "PXA CPU frequency change support initialized\n"); - - return 0; -} - -static struct cpufreq_driver pxa_cpufreq_driver = { - .verify = pxa_verify_policy, - .target = pxa_set_target, - .init = pxa_cpufreq_init, - .get = pxa_cpufreq_get, - .name = "PXA2xx", -}; - -static int __init pxa_cpu_init(void) -{ - int ret = -ENODEV; - if (cpu_is_pxa25x() || cpu_is_pxa27x()) - ret = cpufreq_register_driver(&pxa_cpufreq_driver); - return ret; -} - -static void __exit pxa_cpu_exit(void) -{ - cpufreq_unregister_driver(&pxa_cpufreq_driver); -} - - -MODULE_AUTHOR("Intrinsyc Software Inc."); -MODULE_DESCRIPTION("CPU frequency changing driver for the PXA architecture"); -MODULE_LICENSE("GPL"); -module_init(pxa_cpu_init); -module_exit(pxa_cpu_exit); diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c deleted file mode 100644 index b85b4ab..0000000 --- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c +++ /dev/null @@ -1,258 +0,0 @@ -/* - * linux/arch/arm/mach-pxa/cpufreq-pxa3xx.c - * - * Copyright (C) 2008 Marvell International Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/init.h> -#include <linux/cpufreq.h> -#include <linux/slab.h> -#include <linux/io.h> - -#include <mach/pxa3xx-regs.h> - -#include "generic.h" - -#define HSS_104M (0) -#define HSS_156M (1) -#define HSS_208M (2) -#define HSS_312M (3) - -#define SMCFS_78M (0) -#define SMCFS_104M (2) -#define SMCFS_208M (5) - -#define SFLFS_104M (0) -#define SFLFS_156M (1) -#define SFLFS_208M (2) -#define SFLFS_312M (3) - -#define XSPCLK_156M (0) -#define XSPCLK_NONE (3) - -#define DMCFS_26M (0) -#define DMCFS_260M (3) - -struct pxa3xx_freq_info { - unsigned int cpufreq_mhz; - unsigned int core_xl : 5; - unsigned int core_xn : 3; - unsigned int hss : 2; - unsigned int dmcfs : 2; - unsigned int smcfs : 3; - unsigned int sflfs : 2; - unsigned int df_clkdiv : 3; - - int vcc_core; /* in mV */ - int vcc_sram; /* in mV */ -}; - -#define OP(cpufreq, _xl, _xn, _hss, _dmc, _smc, _sfl, _dfi, vcore, vsram) \ -{ \ - .cpufreq_mhz = cpufreq, \ - .core_xl = _xl, \ - .core_xn = _xn, \ - .hss = HSS_##_hss##M, \ - .dmcfs = DMCFS_##_dmc##M, \ - .smcfs = SMCFS_##_smc##M, \ - .sflfs = SFLFS_##_sfl##M, \ - .df_clkdiv = _dfi, \ - .vcc_core = vcore, \ - .vcc_sram = vsram, \ -} - -static struct pxa3xx_freq_info pxa300_freqs[] = { - /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */ - OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */ - OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */ - OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */ - OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */ -}; - -static struct pxa3xx_freq_info pxa320_freqs[] = { - /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */ - OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */ - OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */ - OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */ - OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */ - OP(806, 31, 2, 208, 260, 208, 312, 3, 1400, 1400), /* 806MHz */ -}; - -static unsigned int pxa3xx_freqs_num; -static struct pxa3xx_freq_info *pxa3xx_freqs; -static struct cpufreq_frequency_table *pxa3xx_freqs_table; - -static int setup_freqs_table(struct cpufreq_policy *policy, - struct pxa3xx_freq_info *freqs, int num) -{ - struct cpufreq_frequency_table *table; - int i; - - table = kzalloc((num + 1) * sizeof(*table), GFP_KERNEL); - if (table == NULL) - return -ENOMEM; - - for (i = 0; i < num; i++) { - table[i].index = i; - table[i].frequency = freqs[i].cpufreq_mhz * 1000; - } - table[num].index = i; - table[num].frequency = CPUFREQ_TABLE_END; - - pxa3xx_freqs = freqs; - pxa3xx_freqs_num = num; - pxa3xx_freqs_table = table; - - return cpufreq_frequency_table_cpuinfo(policy, table); -} - -static void __update_core_freq(struct pxa3xx_freq_info *info) -{ - uint32_t mask = ACCR_XN_MASK | ACCR_XL_MASK; - uint32_t accr = ACCR; - uint32_t xclkcfg; - - accr &= ~(ACCR_XN_MASK | ACCR_XL_MASK | ACCR_XSPCLK_MASK); - accr |= ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl); - - /* No clock until core PLL is re-locked */ - accr |= ACCR_XSPCLK(XSPCLK_NONE); - - xclkcfg = (info->core_xn == 2) ? 0x3 : 0x2; /* turbo bit */ - - ACCR = accr; - __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg)); - - while ((ACSR & mask) != (accr & mask)) - cpu_relax(); -} - -static void __update_bus_freq(struct pxa3xx_freq_info *info) -{ - uint32_t mask; - uint32_t accr = ACCR; - - mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK | - ACCR_DMCFS_MASK; - - accr &= ~mask; - accr |= ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) | - ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs); - - ACCR = accr; - - while ((ACSR & mask) != (accr & mask)) - cpu_relax(); -} - -static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table); -} - -static unsigned int pxa3xx_cpufreq_get(unsigned int cpu) -{ - return pxa3xx_get_clk_frequency_khz(0); -} - -static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct pxa3xx_freq_info *next; - struct cpufreq_freqs freqs; - unsigned long flags; - int idx; - - if (policy->cpu != 0) - return -EINVAL; - - /* Lookup the next frequency */ - if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table, - target_freq, relation, &idx)) - return -EINVAL; - - next = &pxa3xx_freqs[idx]; - - freqs.old = policy->cur; - freqs.new = next->cpufreq_mhz * 1000; - freqs.cpu = policy->cpu; - - pr_debug("CPU frequency from %d MHz to %d MHz%s\n", - freqs.old / 1000, freqs.new / 1000, - (freqs.old == freqs.new) ? " (skipped)" : ""); - - if (freqs.old == target_freq) - return 0; - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - local_irq_save(flags); - __update_core_freq(next); - __update_bus_freq(next); - local_irq_restore(flags); - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - return 0; -} - -static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) -{ - int ret = -EINVAL; - - /* set default policy and cpuinfo */ - policy->cpuinfo.min_freq = 104000; - policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000; - policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ - policy->max = pxa3xx_get_clk_frequency_khz(0); - policy->cur = policy->min = policy->max; - - if (cpu_is_pxa300() || cpu_is_pxa310()) - ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa300_freqs)); - - if (cpu_is_pxa320()) - ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa320_freqs)); - - if (ret) { - pr_err("failed to setup frequency table\n"); - return ret; - } - - pr_info("CPUFREQ support for PXA3xx initialized\n"); - return 0; -} - -static struct cpufreq_driver pxa3xx_cpufreq_driver = { - .verify = pxa3xx_cpufreq_verify, - .target = pxa3xx_cpufreq_set, - .init = pxa3xx_cpufreq_init, - .get = pxa3xx_cpufreq_get, - .name = "pxa3xx-cpufreq", -}; - -static int __init cpufreq_init(void) -{ - if (cpu_is_pxa3xx()) - return cpufreq_register_driver(&pxa3xx_cpufreq_driver); - - return 0; -} -module_init(cpufreq_init); - -static void __exit cpufreq_exit(void) -{ - cpufreq_unregister_driver(&pxa3xx_cpufreq_driver); -} -module_exit(cpufreq_exit); - -MODULE_DESCRIPTION("CPU frequency scaling driver for PXA3xx"); -MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-pxa/include/mach/generic.h b/arch/arm/mach-pxa/include/mach/generic.h new file mode 100644 index 0000000..665542e --- /dev/null +++ b/arch/arm/mach-pxa/include/mach/generic.h @@ -0,0 +1 @@ +#include "../../generic.h" diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index af41888..969b0ba 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c @@ -505,6 +505,7 @@ static struct w1_gpio_platform_data w1_gpio_platform_data = { .pin = GPIO_ONE_WIRE, .is_open_drain = 0, .enable_external_pullup = w1_enable_external_pullup, + .ext_pullup_enable_pin = -EINVAL, }; struct platform_device raumfeld_w1_gpio_device = { diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c index 04b87ec..1069b56 100644 --- a/arch/arm/mach-s3c24xx/clock-s3c2440.c +++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c @@ -123,6 +123,11 @@ static struct clk s3c2440_clk_ac97 = { .ctrlbit = S3C2440_CLKCON_AC97, }; +#define S3C24XX_VA_UART0 (S3C_VA_UART) +#define S3C24XX_VA_UART1 (S3C_VA_UART + 0x4000 ) +#define S3C24XX_VA_UART2 (S3C_VA_UART + 0x8000 ) +#define S3C24XX_VA_UART3 (S3C_VA_UART + 0xC000 ) + static unsigned long s3c2440_fclk_n_getrate(struct clk *clk) { unsigned long ucon0, ucon1, ucon2, divisor; diff --git a/arch/arm/mach-s3c24xx/common.c b/arch/arm/mach-s3c24xx/common.c index 6bcf87f..92e6094 100644 --- a/arch/arm/mach-s3c24xx/common.c +++ b/arch/arm/mach-s3c24xx/common.c @@ -239,6 +239,11 @@ void __init s3c24xx_init_io(struct map_desc *mach_desc, int size) /* Serial port registrations */ +#define S3C2410_PA_UART0 (S3C24XX_PA_UART) +#define S3C2410_PA_UART1 (S3C24XX_PA_UART + 0x4000 ) +#define S3C2410_PA_UART2 (S3C24XX_PA_UART + 0x8000 ) +#define S3C2443_PA_UART3 (S3C24XX_PA_UART + 0xC000 ) + static struct resource s3c2410_uart0_resource[] = { [0] = DEFINE_RES_MEM(S3C2410_PA_UART0, SZ_16K), [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX0, \ diff --git a/arch/arm/mach-s3c24xx/cpufreq.c b/arch/arm/mach-s3c24xx/cpufreq.c index 5f181e7..3c0e78e 100644 --- a/arch/arm/mach-s3c24xx/cpufreq.c +++ b/arch/arm/mach-s3c24xx/cpufreq.c @@ -204,7 +204,6 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy, freqs.old = cpu_cur.freq; freqs.new = cpu_new.freq; - freqs.freqs.cpu = 0; freqs.freqs.old = cpu_cur.freq.armclk / 1000; freqs.freqs.new = cpu_new.freq.armclk / 1000; @@ -218,9 +217,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy, s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk); /* start the frequency change */ - - if (policy) - cpufreq_notify_transition(&freqs.freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_PRECHANGE); /* If hclk is staying the same, then we do not need to * re-write the IO or the refresh timings whilst we are changing @@ -264,8 +261,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy, local_irq_restore(flags); /* notify everyone we've done this */ - if (policy) - cpufreq_notify_transition(&freqs.freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_POSTCHANGE); s3c_freq_dbg("%s: finished\n", __func__); return 0; diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h index b7a9f4d..1e73f5f 100644 --- a/arch/arm/mach-s3c24xx/include/mach/irqs.h +++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h @@ -188,10 +188,8 @@ #if defined(CONFIG_CPU_S3C2416) #define NR_IRQS (IRQ_S3C2416_I2S1 + 1) -#elif defined(CONFIG_CPU_S3C2443) -#define NR_IRQS (IRQ_S3C2443_AC97+1) #else -#define NR_IRQS (IRQ_S3C2440_AC97+1) +#define NR_IRQS (IRQ_S3C2443_AC97 + 1) #endif /* compatibility define. */ diff --git a/arch/arm/mach-s3c24xx/irq.c b/arch/arm/mach-s3c24xx/irq.c index cb9f5e0..d8ba9be 100644 --- a/arch/arm/mach-s3c24xx/irq.c +++ b/arch/arm/mach-s3c24xx/irq.c @@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np, base = (void *)0xfd000000; intc->reg_mask = base + 0xa4; - intc->reg_pending = base + 0x08; + intc->reg_pending = base + 0xa8; irq_num = 20; irq_start = S3C2410_IRQ(32); irq_offset = 4; diff --git a/arch/arm/mach-s3c64xx/cpuidle.c b/arch/arm/mach-s3c64xx/cpuidle.c index ead5fab..3c8ab07 100644 --- a/arch/arm/mach-s3c64xx/cpuidle.c +++ b/arch/arm/mach-s3c64xx/cpuidle.c @@ -40,12 +40,9 @@ static int s3c64xx_enter_idle(struct cpuidle_device *dev, return index; } -static DEFINE_PER_CPU(struct cpuidle_device, s3c64xx_cpuidle_device); - static struct cpuidle_driver s3c64xx_cpuidle_driver = { .name = "s3c64xx_cpuidle", .owner = THIS_MODULE, - .en_core_tk_irqen = 1, .states = { { .enter = s3c64xx_enter_idle, @@ -61,16 +58,6 @@ static struct cpuidle_driver s3c64xx_cpuidle_driver = { static int __init s3c64xx_init_cpuidle(void) { - int ret; - - cpuidle_register_driver(&s3c64xx_cpuidle_driver); - - ret = cpuidle_register_device(&s3c64xx_cpuidle_device); - if (ret) { - pr_err("Failed to register cpuidle device: %d\n", ret); - return ret; - } - - return 0; + return cpuidle_register(&s3c64xx_cpuidle_driver, NULL); } device_initcall(s3c64xx_init_cpuidle); diff --git a/arch/arm/mach-s3c64xx/setup-usb-phy.c b/arch/arm/mach-s3c64xx/setup-usb-phy.c index c8174d9..ca960bd 100644 --- a/arch/arm/mach-s3c64xx/setup-usb-phy.c +++ b/arch/arm/mach-s3c64xx/setup-usb-phy.c @@ -76,7 +76,7 @@ static int s3c_usb_otgphy_exit(struct platform_device *pdev) int s5p_usb_phy_init(struct platform_device *pdev, int type) { - if (type == S5P_USB_PHY_DEVICE) + if (type == USB_PHY_TYPE_DEVICE) return s3c_usb_otgphy_init(pdev); return -EINVAL; @@ -84,7 +84,7 @@ int s5p_usb_phy_init(struct platform_device *pdev, int type) int s5p_usb_phy_exit(struct platform_device *pdev, int type) { - if (type == S5P_USB_PHY_DEVICE) + if (type == USB_PHY_TYPE_DEVICE) return s3c_usb_otgphy_exit(pdev); return -EINVAL; diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c index fcdf52d..f051f53 100644 --- a/arch/arm/mach-s5pv210/clock.c +++ b/arch/arm/mach-s5pv210/clock.c @@ -214,11 +214,6 @@ static struct clk clk_pcmcdclk2 = { .name = "pcmcdclk", }; -static struct clk dummy_apb_pclk = { - .name = "apb_pclk", - .id = -1, -}; - static struct clk *clkset_vpllsrc_list[] = { [0] = &clk_fin_vpll, [1] = &clk_sclk_hdmi27m, @@ -305,18 +300,6 @@ static struct clk_ops clk_fout_apll_ops = { static struct clk init_clocks_off[] = { { - .name = "dma", - .devname = "dma-pl330.0", - .parent = &clk_hclk_psys.clk, - .enable = s5pv210_clk_ip0_ctrl, - .ctrlbit = (1 << 3), - }, { - .name = "dma", - .devname = "dma-pl330.1", - .parent = &clk_hclk_psys.clk, - .enable = s5pv210_clk_ip0_ctrl, - .ctrlbit = (1 << 4), - }, { .name = "rot", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip0_ctrl, @@ -573,6 +556,20 @@ static struct clk clk_hsmmc3 = { .ctrlbit = (1<<19), }; +static struct clk clk_pdma0 = { + .name = "pdma0", + .parent = &clk_hclk_psys.clk, + .enable = s5pv210_clk_ip0_ctrl, + .ctrlbit = (1 << 3), +}; + +static struct clk clk_pdma1 = { + .name = "pdma1", + .parent = &clk_hclk_psys.clk, + .enable = s5pv210_clk_ip0_ctrl, + .ctrlbit = (1 << 4), +}; + static struct clk *clkset_uart_list[] = { [6] = &clk_mout_mpll.clk, [7] = &clk_mout_epll.clk, @@ -1075,6 +1072,8 @@ static struct clk *clk_cdev[] = { &clk_hsmmc1, &clk_hsmmc2, &clk_hsmmc3, + &clk_pdma0, + &clk_pdma1, }; /* Clock initialisation code */ @@ -1333,6 +1332,8 @@ static struct clk_lookup s5pv210_clk_lookup[] = { CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), + CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0), + CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1), }; void __init s5pv210_register_clocks(void) @@ -1361,6 +1362,5 @@ void __init s5pv210_register_clocks(void) for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++) s3c_disable_clocks(clk_cdev[ptr], 1); - s3c24xx_register_clock(&dummy_apb_pclk); s3c_pwmclk_init(); } diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c index 3a38f7b..e373de4 100644 --- a/arch/arm/mach-s5pv210/mach-goni.c +++ b/arch/arm/mach-s5pv210/mach-goni.c @@ -845,7 +845,7 @@ static struct fimc_source_info goni_camera_sensors[] = { .mux_id = 0, .flags = V4L2_MBUS_PCLK_SAMPLE_FALLING | V4L2_MBUS_VSYNC_ACTIVE_LOW, - .bus_type = FIMC_BUS_TYPE_ITU_601, + .fimc_bus_type = FIMC_BUS_TYPE_ITU_601, .board_info = &noon010pc30_board_info, .i2c_bus_num = 0, .clk_frequency = 16000000UL, diff --git a/arch/arm/mach-s5pv210/setup-usb-phy.c b/arch/arm/mach-s5pv210/setup-usb-phy.c index 356a090..b2ee533 100644 --- a/arch/arm/mach-s5pv210/setup-usb-phy.c +++ b/arch/arm/mach-s5pv210/setup-usb-phy.c @@ -80,7 +80,7 @@ static int s5pv210_usb_otgphy_exit(struct platform_device *pdev) int s5p_usb_phy_init(struct platform_device *pdev, int type) { - if (type == S5P_USB_PHY_DEVICE) + if (type == USB_PHY_TYPE_DEVICE) return s5pv210_usb_otgphy_init(pdev); return -EINVAL; @@ -88,7 +88,7 @@ int s5p_usb_phy_init(struct platform_device *pdev, int type) int s5p_usb_phy_exit(struct platform_device *pdev, int type) { - if (type == S5P_USB_PHY_DEVICE) + if (type == USB_PHY_TYPE_DEVICE) return s5pv210_usb_otgphy_exit(pdev); return -EINVAL; diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig index ca14dbd..04f9784 100644 --- a/arch/arm/mach-sa1100/Kconfig +++ b/arch/arm/mach-sa1100/Kconfig @@ -4,7 +4,7 @@ menu "SA11x0 Implementations" config SA1100_ASSABET bool "Assabet" - select CPU_FREQ_SA1110 + select ARM_SA1110_CPUFREQ help Say Y here if you are using the Intel(R) StrongARM(R) SA-1110 Microprocessor Development Board (also known as the Assabet). @@ -20,7 +20,7 @@ config ASSABET_NEPONSET config SA1100_CERF bool "CerfBoard" - select CPU_FREQ_SA1110 + select ARM_SA1110_CPUFREQ help The Intrinsyc CerfBoard is based on the StrongARM 1110 (Discontinued). More information is available at: @@ -47,7 +47,7 @@ endchoice config SA1100_COLLIE bool "Sharp Zaurus SL5500" - # FIXME: select CPU_FREQ_SA11x0 + # FIXME: select ARM_SA11x0_CPUFREQ select SHARP_LOCOMO select SHARP_PARAM select SHARP_SCOOP @@ -56,7 +56,7 @@ config SA1100_COLLIE config SA1100_H3100 bool "Compaq iPAQ H3100" - select CPU_FREQ_SA1110 + select ARM_SA1110_CPUFREQ select HTC_EGPIO help Say Y here if you intend to run this kernel on the Compaq iPAQ @@ -67,7 +67,7 @@ config SA1100_H3100 config SA1100_H3600 bool "Compaq iPAQ H3600/H3700" - select CPU_FREQ_SA1110 + select ARM_SA1110_CPUFREQ select HTC_EGPIO help Say Y here if you intend to run this kernel on the Compaq iPAQ @@ -78,7 +78,7 @@ config SA1100_H3600 config SA1100_BADGE4 bool "HP Labs BadgePAD 4" - select CPU_FREQ_SA1100 + select ARM_SA1100_CPUFREQ select SA1111 help Say Y here if you want to build a kernel for the HP Laboratories @@ -86,7 +86,7 @@ config SA1100_BADGE4 config SA1100_JORNADA720 bool "HP Jornada 720" - # FIXME: select CPU_FREQ_SA11x0 + # FIXME: select ARM_SA11x0_CPUFREQ select SA1111 help Say Y here if you want to build a kernel for the HP Jornada 720 @@ -105,14 +105,14 @@ config SA1100_JORNADA720_SSP config SA1100_HACKKIT bool "HackKit Core CPU Board" - select CPU_FREQ_SA1100 + select ARM_SA1100_CPUFREQ help Say Y here to support the HackKit Core CPU Board <http://hackkit.eletztrick.de>; config SA1100_LART bool "LART" - select CPU_FREQ_SA1100 + select ARM_SA1100_CPUFREQ help Say Y here if you are using the Linux Advanced Radio Terminal (also known as the LART). See <http://www.lartmaker.nl/> for @@ -120,7 +120,7 @@ config SA1100_LART config SA1100_NANOENGINE bool "nanoEngine" - select CPU_FREQ_SA1110 + select ARM_SA1110_CPUFREQ select PCI select PCI_NANOENGINE help @@ -130,7 +130,7 @@ config SA1100_NANOENGINE config SA1100_PLEB bool "PLEB" - select CPU_FREQ_SA1100 + select ARM_SA1100_CPUFREQ help Say Y here if you are using version 1 of the Portable Linux Embedded Board (also known as PLEB). @@ -139,7 +139,7 @@ config SA1100_PLEB config SA1100_SHANNON bool "Shannon" - select CPU_FREQ_SA1100 + select ARM_SA1100_CPUFREQ help The Shannon (also known as a Tuxscreen, and also as a IS2630) was a limited edition webphone produced by Philips. The Shannon is a SA1100 @@ -148,7 +148,7 @@ config SA1100_SHANNON config SA1100_SIMPAD bool "Simpad" - select CPU_FREQ_SA1110 + select ARM_SA1110_CPUFREQ help The SIEMENS webpad SIMpad is based on the StrongARM 1110. There are two different versions CL4 and SL4. CL4 has 32MB RAM and 16MB diff --git a/arch/arm/mach-sa1100/Makefile b/arch/arm/mach-sa1100/Makefile index 1aed9e7..2732eef 100644 --- a/arch/arm/mach-sa1100/Makefile +++ b/arch/arm/mach-sa1100/Makefile @@ -8,9 +8,6 @@ obj-m := obj-n := obj- := -obj-$(CONFIG_CPU_FREQ_SA1100) += cpu-sa1100.o -obj-$(CONFIG_CPU_FREQ_SA1110) += cpu-sa1110.o - # Specific board support obj-$(CONFIG_SA1100_ASSABET) += assabet.o obj-$(CONFIG_ASSABET_NEPONSET) += neponset.o diff --git a/arch/arm/mach-sa1100/cpu-sa1100.c b/arch/arm/mach-sa1100/cpu-sa1100.c deleted file mode 100644 index e8f4d1e..0000000 --- a/arch/arm/mach-sa1100/cpu-sa1100.c +++ /dev/null @@ -1,249 +0,0 @@ -/* - * cpu-sa1100.c: clock scaling for the SA1100 - * - * Copyright (C) 2000 2001, The Delft University of Technology - * - * Authors: - * - Johan Pouwelse (J.A.Pouwelse@its.tudelft.nl): initial version - * - Erik Mouw (J.A.K.Mouw@its.tudelft.nl): - * - major rewrite for linux-2.3.99 - * - rewritten for the more generic power management scheme in - * linux-2.4.5-rmk1 - * - * This software has been developed while working on the LART - * computing board (http://www.lartmaker.nl/), which is - * sponsored by the Mobile Multi-media Communications - * (http://www.mobimedia.org/) and Ubiquitous Communications - * (http://www.ubicom.tudelft.nl/) projects. - * - * The authors can be reached at: - * - * Erik Mouw - * Information and Communication Theory Group - * Faculty of Information Technology and Systems - * Delft University of Technology - * P.O. Box 5031 - * 2600 GA Delft - * The Netherlands - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * - * Theory of operations - * ==================== - * - * Clock scaling can be used to lower the power consumption of the CPU - * core. This will give you a somewhat longer running time. - * - * The SA-1100 has a single register to change the core clock speed: - * - * PPCR 0x90020014 PLL config - * - * However, the DRAM timings are closely related to the core clock - * speed, so we need to change these, too. The used registers are: - * - * MDCNFG 0xA0000000 DRAM config - * MDCAS0 0xA0000004 Access waveform - * MDCAS1 0xA0000008 Access waveform - * MDCAS2 0xA000000C Access waveform - * - * Care must be taken to change the DRAM parameters the correct way, - * because otherwise the DRAM becomes unusable and the kernel will - * crash. - * - * The simple solution to avoid a kernel crash is to put the actual - * clock change in ROM and jump to that code from the kernel. The main - * disadvantage is that the ROM has to be modified, which is not - * possible on all SA-1100 platforms. Another disadvantage is that - * jumping to ROM makes clock switching unnecessary complicated. - * - * The idea behind this driver is that the memory configuration can be - * changed while running from DRAM (even with interrupts turned on!) - * as long as all re-configuration steps yield a valid DRAM - * configuration. The advantages are clear: it will run on all SA-1100 - * platforms, and the code is very simple. - * - * If you really want to understand what is going on in - * sa1100_update_dram_timings(), you'll have to read sections 8.2, - * 9.5.7.3, and 10.2 from the "Intel StrongARM SA-1100 Microprocessor - * Developers Manual" (available for free from Intel). - * - */ - -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/init.h> -#include <linux/cpufreq.h> -#include <linux/io.h> - -#include <asm/cputype.h> - -#include <mach/hardware.h> - -#include "generic.h" - -struct sa1100_dram_regs { - int speed; - u32 mdcnfg; - u32 mdcas0; - u32 mdcas1; - u32 mdcas2; -}; - - -static struct cpufreq_driver sa1100_driver; - -static struct sa1100_dram_regs sa1100_dram_settings[] = { - /*speed, mdcnfg, mdcas0, mdcas1, mdcas2, clock freq */ - { 59000, 0x00dc88a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 59.0 MHz */ - { 73700, 0x011490a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 73.7 MHz */ - { 88500, 0x014e90a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 88.5 MHz */ - {103200, 0x01889923, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 103.2 MHz */ - {118000, 0x01c29923, 0x9999998f, 0xfffffff9, 0xffffffff},/* 118.0 MHz */ - {132700, 0x01fb2123, 0x9999998f, 0xfffffff9, 0xffffffff},/* 132.7 MHz */ - {147500, 0x02352123, 0x3333330f, 0xfffffff3, 0xffffffff},/* 147.5 MHz */ - {162200, 0x026b29a3, 0x38e38e1f, 0xfff8e38e, 0xffffffff},/* 162.2 MHz */ - {176900, 0x02a329a3, 0x71c71c1f, 0xfff1c71c, 0xffffffff},/* 176.9 MHz */ - {191700, 0x02dd31a3, 0xe38e383f, 0xffe38e38, 0xffffffff},/* 191.7 MHz */ - {206400, 0x03153223, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 206.4 MHz */ - {221200, 0x034fba23, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 221.2 MHz */ - {235900, 0x03853a23, 0xe1e1e07f, 0xe1e1e1e1, 0xffffffe1},/* 235.9 MHz */ - {250700, 0x03bf3aa3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 250.7 MHz */ - {265400, 0x03f7c2a3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 265.4 MHz */ - {280200, 0x0431c2a3, 0x878780ff, 0x87878787, 0xffffff87},/* 280.2 MHz */ - { 0, 0, 0, 0, 0 } /* last entry */ -}; - -static void sa1100_update_dram_timings(int current_speed, int new_speed) -{ - struct sa1100_dram_regs *settings = sa1100_dram_settings; - - /* find speed */ - while (settings->speed != 0) { - if (new_speed == settings->speed) - break; - - settings++; - } - - if (settings->speed == 0) { - panic("%s: couldn't find dram setting for speed %d\n", - __func__, new_speed); - } - - /* No risk, no fun: run with interrupts on! */ - if (new_speed > current_speed) { - /* We're going FASTER, so first relax the memory - * timings before changing the core frequency - */ - - /* Half the memory access clock */ - MDCNFG |= MDCNFG_CDB2; - - /* The order of these statements IS important, keep 8 - * pulses!! - */ - MDCAS2 = settings->mdcas2; - MDCAS1 = settings->mdcas1; - MDCAS0 = settings->mdcas0; - MDCNFG = settings->mdcnfg; - } else { - /* We're going SLOWER: first decrease the core - * frequency and then tighten the memory settings. - */ - - /* Half the memory access clock */ - MDCNFG |= MDCNFG_CDB2; - - /* The order of these statements IS important, keep 8 - * pulses!! - */ - MDCAS0 = settings->mdcas0; - MDCAS1 = settings->mdcas1; - MDCAS2 = settings->mdcas2; - MDCNFG = settings->mdcnfg; - } -} - -static int sa1100_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int cur = sa11x0_getspeed(0); - unsigned int new_ppcr; - struct cpufreq_freqs freqs; - - new_ppcr = sa11x0_freq_to_ppcr(target_freq); - switch (relation) { - case CPUFREQ_RELATION_L: - if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max) - new_ppcr--; - break; - case CPUFREQ_RELATION_H: - if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) && - (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min)) - new_ppcr--; - break; - } - - freqs.old = cur; - freqs.new = sa11x0_ppcr_to_freq(new_ppcr); - freqs.cpu = 0; - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - if (freqs.new > cur) - sa1100_update_dram_timings(cur, freqs.new); - - PPCR = new_ppcr; - - if (freqs.new < cur) - sa1100_update_dram_timings(cur, freqs.new); - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - return 0; -} - -static int __init sa1100_cpu_init(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - policy->cur = policy->min = policy->max = sa11x0_getspeed(0); - policy->cpuinfo.min_freq = 59000; - policy->cpuinfo.max_freq = 287000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - return 0; -} - -static struct cpufreq_driver sa1100_driver __refdata = { - .flags = CPUFREQ_STICKY, - .verify = sa11x0_verify_speed, - .target = sa1100_target, - .get = sa11x0_getspeed, - .init = sa1100_cpu_init, - .name = "sa1100", -}; - -static int __init sa1100_dram_init(void) -{ - if (cpu_is_sa1100()) - return cpufreq_register_driver(&sa1100_driver); - else - return -ENODEV; -} - -arch_initcall(sa1100_dram_init); diff --git a/arch/arm/mach-sa1100/cpu-sa1110.c b/arch/arm/mach-sa1100/cpu-sa1110.c deleted file mode 100644 index 48c45b0..0000000 --- a/arch/arm/mach-sa1100/cpu-sa1110.c +++ /dev/null @@ -1,408 +0,0 @@ -/* - * linux/arch/arm/mach-sa1100/cpu-sa1110.c - * - * Copyright (C) 2001 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Note: there are two erratas that apply to the SA1110 here: - * 7 - SDRAM auto-power-up failure (rev A0) - * 13 - Corruption of internal register reads/writes following - * SDRAM reads (rev A0, B0, B1) - * - * We ignore rev. A0 and B0 devices; I don't think they're worth supporting. - * - * The SDRAM type can be passed on the command line as cpu_sa1110.sdram=type - */ -#include <linux/cpufreq.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/io.h> -#include <linux/kernel.h> -#include <linux/moduleparam.h> -#include <linux/types.h> - -#include <asm/cputype.h> -#include <asm/mach-types.h> - -#include <mach/hardware.h> - -#include "generic.h" - -#undef DEBUG - -struct sdram_params { - const char name[20]; - u_char rows; /* bits */ - u_char cas_latency; /* cycles */ - u_char tck; /* clock cycle time (ns) */ - u_char trcd; /* activate to r/w (ns) */ - u_char trp; /* precharge to activate (ns) */ - u_char twr; /* write recovery time (ns) */ - u_short refresh; /* refresh time for array (us) */ -}; - -struct sdram_info { - u_int mdcnfg; - u_int mdrefr; - u_int mdcas[3]; -}; - -static struct sdram_params sdram_tbl[] __initdata = { - { /* Toshiba TC59SM716 CL2 */ - .name = "TC59SM716-CL2", - .rows = 12, - .tck = 10, - .trcd = 20, - .trp = 20, - .twr = 10, - .refresh = 64000, - .cas_latency = 2, - }, { /* Toshiba TC59SM716 CL3 */ - .name = "TC59SM716-CL3", - .rows = 12, - .tck = 8, - .trcd = 20, - .trp = 20, - .twr = 8, - .refresh = 64000, - .cas_latency = 3, - }, { /* Samsung K4S641632D TC75 */ - .name = "K4S641632D", - .rows = 14, - .tck = 9, - .trcd = 27, - .trp = 20, - .twr = 9, - .refresh = 64000, - .cas_latency = 3, - }, { /* Samsung K4S281632B-1H */ - .name = "K4S281632B-1H", - .rows = 12, - .tck = 10, - .trp = 20, - .twr = 10, - .refresh = 64000, - .cas_latency = 3, - }, { /* Samsung KM416S4030CT */ - .name = "KM416S4030CT", - .rows = 13, - .tck = 8, - .trcd = 24, /* 3 CLKs */ - .trp = 24, /* 3 CLKs */ - .twr = 16, /* Trdl: 2 CLKs */ - .refresh = 64000, - .cas_latency = 3, - }, { /* Winbond W982516AH75L CL3 */ - .name = "W982516AH75L", - .rows = 16, - .tck = 8, - .trcd = 20, - .trp = 20, - .twr = 8, - .refresh = 64000, - .cas_latency = 3, - }, { /* Micron MT48LC8M16A2TG-75 */ - .name = "MT48LC8M16A2TG-75", - .rows = 12, - .tck = 8, - .trcd = 20, - .trp = 20, - .twr = 8, - .refresh = 64000, - .cas_latency = 3, - }, -}; - -static struct sdram_params sdram_params; - -/* - * Given a period in ns and frequency in khz, calculate the number of - * cycles of frequency in period. Note that we round up to the next - * cycle, even if we are only slightly over. - */ -static inline u_int ns_to_cycles(u_int ns, u_int khz) -{ - return (ns * khz + 999999) / 1000000; -} - -/* - * Create the MDCAS register bit pattern. - */ -static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd) -{ - u_int shift; - - rcd = 2 * rcd - 1; - shift = delayed + 1 + rcd; - - mdcas[0] = (1 << rcd) - 1; - mdcas[0] |= 0x55555555 << shift; - mdcas[1] = mdcas[2] = 0x55555555 << (shift & 1); -} - -static void -sdram_calculate_timing(struct sdram_info *sd, u_int cpu_khz, - struct sdram_params *sdram) -{ - u_int mem_khz, sd_khz, trp, twr; - - mem_khz = cpu_khz / 2; - sd_khz = mem_khz; - - /* - * If SDCLK would invalidate the SDRAM timings, - * run SDCLK at half speed. - * - * CPU steppings prior to B2 must either run the memory at - * half speed or use delayed read latching (errata 13). - */ - if ((ns_to_cycles(sdram->tck, sd_khz) > 1) || - (CPU_REVISION < CPU_SA1110_B2 && sd_khz < 62000)) - sd_khz /= 2; - - sd->mdcnfg = MDCNFG & 0x007f007f; - - twr = ns_to_cycles(sdram->twr, mem_khz); - - /* trp should always be >1 */ - trp = ns_to_cycles(sdram->trp, mem_khz) - 1; - if (trp < 1) - trp = 1; - - sd->mdcnfg |= trp << 8; - sd->mdcnfg |= trp << 24; - sd->mdcnfg |= sdram->cas_latency << 12; - sd->mdcnfg |= sdram->cas_latency << 28; - sd->mdcnfg |= twr << 14; - sd->mdcnfg |= twr << 30; - - sd->mdrefr = MDREFR & 0xffbffff0; - sd->mdrefr |= 7; - - if (sd_khz != mem_khz) - sd->mdrefr |= MDREFR_K1DB2; - - /* initial number of '1's in MDCAS + 1 */ - set_mdcas(sd->mdcas, sd_khz >= 62000, - ns_to_cycles(sdram->trcd, mem_khz)); - -#ifdef DEBUG - printk(KERN_DEBUG "MDCNFG: %08x MDREFR: %08x MDCAS0: %08x MDCAS1: %08x MDCAS2: %08x\n", - sd->mdcnfg, sd->mdrefr, sd->mdcas[0], sd->mdcas[1], - sd->mdcas[2]); -#endif -} - -/* - * Set the SDRAM refresh rate. - */ -static inline void sdram_set_refresh(u_int dri) -{ - MDREFR = (MDREFR & 0xffff000f) | (dri << 4); - (void) MDREFR; -} - -/* - * Update the refresh period. We do this such that we always refresh - * the SDRAMs within their permissible period. The refresh period is - * always a multiple of the memory clock (fixed at cpu_clock / 2). - * - * FIXME: we don't currently take account of burst accesses here, - * but neither do Intels DM nor Angel. - */ -static void -sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram) -{ - u_int ns_row = (sdram->refresh * 1000) >> sdram->rows; - u_int dri = ns_to_cycles(ns_row, cpu_khz / 2) / 32; - -#ifdef DEBUG - mdelay(250); - printk(KERN_DEBUG "new dri value = %d\n", dri); -#endif - - sdram_set_refresh(dri); -} - -/* - * Ok, set the CPU frequency. - */ -static int sa1110_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct sdram_params *sdram = &sdram_params; - struct cpufreq_freqs freqs; - struct sdram_info sd; - unsigned long flags; - unsigned int ppcr, unused; - - switch (relation) { - case CPUFREQ_RELATION_L: - ppcr = sa11x0_freq_to_ppcr(target_freq); - if (sa11x0_ppcr_to_freq(ppcr) > policy->max) - ppcr--; - break; - case CPUFREQ_RELATION_H: - ppcr = sa11x0_freq_to_ppcr(target_freq); - if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) && - (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min)) - ppcr--; - break; - default: - return -EINVAL; - } - - freqs.old = sa11x0_getspeed(0); - freqs.new = sa11x0_ppcr_to_freq(ppcr); - freqs.cpu = 0; - - sdram_calculate_timing(&sd, freqs.new, sdram); - -#if 0 - /* - * These values are wrong according to the SA1110 documentation - * and errata, but they seem to work. Need to get a storage - * scope on to the SDRAM signals to work out why. - */ - if (policy->max < 147500) { - sd.mdrefr |= MDREFR_K1DB2; - sd.mdcas[0] = 0xaaaaaa7f; - } else { - sd.mdrefr &= ~MDREFR_K1DB2; - sd.mdcas[0] = 0xaaaaaa9f; - } - sd.mdcas[1] = 0xaaaaaaaa; - sd.mdcas[2] = 0xaaaaaaaa; -#endif - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - /* - * The clock could be going away for some time. Set the SDRAMs - * to refresh rapidly (every 64 memory clock cycles). To get - * through the whole array, we need to wait 262144 mclk cycles. - * We wait 20ms to be safe. - */ - sdram_set_refresh(2); - if (!irqs_disabled()) - msleep(20); - else - mdelay(20); - - /* - * Reprogram the DRAM timings with interrupts disabled, and - * ensure that we are doing this within a complete cache line. - * This means that we won't access SDRAM for the duration of - * the programming. - */ - local_irq_save(flags); - asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (0)); - udelay(10); - __asm__ __volatile__("\n\ - b 2f \n\ - .align 5 \n\ -1: str %3, [%1, #0] @ MDCNFG \n\ - str %4, [%1, #28] @ MDREFR \n\ - str %5, [%1, #4] @ MDCAS0 \n\ - str %6, [%1, #8] @ MDCAS1 \n\ - str %7, [%1, #12] @ MDCAS2 \n\ - str %8, [%2, #0] @ PPCR \n\ - ldr %0, [%1, #0] \n\ - b 3f \n\ -2: b 1b \n\ -3: nop \n\ - nop" - : "=&r" (unused) - : "r" (&MDCNFG), "r" (&PPCR), "0" (sd.mdcnfg), - "r" (sd.mdrefr), "r" (sd.mdcas[0]), - "r" (sd.mdcas[1]), "r" (sd.mdcas[2]), "r" (ppcr)); - local_irq_restore(flags); - - /* - * Now, return the SDRAM refresh back to normal. - */ - sdram_update_refresh(freqs.new, sdram); - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - return 0; -} - -static int __init sa1110_cpu_init(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - policy->cur = policy->min = policy->max = sa11x0_getspeed(0); - policy->cpuinfo.min_freq = 59000; - policy->cpuinfo.max_freq = 287000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - return 0; -} - -/* sa1110_driver needs __refdata because it must remain after init registers - * it with cpufreq_register_driver() */ -static struct cpufreq_driver sa1110_driver __refdata = { - .flags = CPUFREQ_STICKY, - .verify = sa11x0_verify_speed, - .target = sa1110_target, - .get = sa11x0_getspeed, - .init = sa1110_cpu_init, - .name = "sa1110", -}; - -static struct sdram_params *sa1110_find_sdram(const char *name) -{ - struct sdram_params *sdram; - - for (sdram = sdram_tbl; sdram < sdram_tbl + ARRAY_SIZE(sdram_tbl); - sdram++) - if (strcmp(name, sdram->name) == 0) - return sdram; - - return NULL; -} - -static char sdram_name[16]; - -static int __init sa1110_clk_init(void) -{ - struct sdram_params *sdram; - const char *name = sdram_name; - - if (!cpu_is_sa1110()) - return -ENODEV; - - if (!name[0]) { - if (machine_is_assabet()) - name = "TC59SM716-CL3"; - if (machine_is_pt_system3()) - name = "K4S641632D"; - if (machine_is_h3100()) - name = "KM416S4030CT"; - if (machine_is_jornada720()) - name = "K4S281632B-1H"; - if (machine_is_nanoengine()) - name = "MT48LC8M16A2TG-75"; - } - - sdram = sa1110_find_sdram(name); - if (sdram) { - printk(KERN_DEBUG "SDRAM: tck: %d trcd: %d trp: %d" - " twr: %d refresh: %d cas_latency: %d\n", - sdram->tck, sdram->trcd, sdram->trp, - sdram->twr, sdram->refresh, sdram->cas_latency); - - memcpy(&sdram_params, sdram, sizeof(sdram_params)); - - return cpufreq_register_driver(&sa1110_driver); - } - - return 0; -} - -module_param_string(sdram, sdram_name, sizeof(sdram_name), 0); -arch_initcall(sa1110_clk_init); diff --git a/arch/arm/mach-sa1100/include/mach/generic.h b/arch/arm/mach-sa1100/include/mach/generic.h new file mode 100644 index 0000000..665542e --- /dev/null +++ b/arch/arm/mach-sa1100/include/mach/generic.h @@ -0,0 +1 @@ +#include "../../generic.h" diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c index b63dec8..1535557 100644 --- a/arch/arm/mach-shark/core.c +++ b/arch/arm/mach-shark/core.c @@ -10,6 +10,7 @@ #include <linux/sched.h> #include <linux/serial_8250.h> #include <linux/io.h> +#include <linux/cpu.h> #include <asm/setup.h> #include <asm/mach-types.h> @@ -130,7 +131,7 @@ static void __init shark_timer_init(void) static void shark_init_early(void) { - disable_hlt(); + cpu_idle_poll_ctrl(true); } MACHINE_START(SHARK, "Shark") diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index f2ec077..ff8b7ba 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -169,7 +169,7 @@ static int usbhsf_get_id(struct platform_device *pdev) return USBHS_GADGET; } -static void usbhsf_power_ctrl(struct platform_device *pdev, +static int usbhsf_power_ctrl(struct platform_device *pdev, void __iomem *base, int enable) { struct usbhsf_private *priv = usbhsf_get_priv(pdev); @@ -223,6 +223,8 @@ static void usbhsf_power_ctrl(struct platform_device *pdev, clk_disable(priv->pci); /* usb work around */ clk_disable(priv->usb24); /* usb work around */ } + + return 0; } static int usbhsf_get_vbus(struct platform_device *pdev) @@ -239,7 +241,7 @@ static irqreturn_t usbhsf_interrupt(int irq, void *data) return IRQ_HANDLED; } -static void usbhsf_hardware_exit(struct platform_device *pdev) +static int usbhsf_hardware_exit(struct platform_device *pdev) { struct usbhsf_private *priv = usbhsf_get_priv(pdev); @@ -264,6 +266,8 @@ static void usbhsf_hardware_exit(struct platform_device *pdev) priv->usbh_base = NULL; free_irq(IRQ7, pdev); + + return 0; } static int usbhsf_hardware_init(struct platform_device *pdev) diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c index 7f3a6b7..a385f57 100644 --- a/arch/arm/mach-shmobile/board-kzm9g.c +++ b/arch/arm/mach-shmobile/board-kzm9g.c @@ -155,12 +155,14 @@ static int usbhs_get_vbus(struct platform_device *pdev) return !((1 << 7) & __raw_readw(priv->cr2)); } -static void usbhs_phy_reset(struct platform_device *pdev) +static int usbhs_phy_reset(struct platform_device *pdev) { struct usbhs_private *priv = usbhs_get_priv(pdev); /* init phy */ __raw_writew(0x8a0a, priv->cr2); + + return 0; } static int usbhs_get_id(struct platform_device *pdev) @@ -202,7 +204,7 @@ static int usbhs_hardware_init(struct platform_device *pdev) return 0; } -static void usbhs_hardware_exit(struct platform_device *pdev) +static int usbhs_hardware_exit(struct platform_device *pdev) { struct usbhs_private *priv = usbhs_get_priv(pdev); @@ -210,6 +212,8 @@ static void usbhs_hardware_exit(struct platform_device *pdev) __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy); free_irq(IRQ15, pdev); + + return 0; } static u32 usbhs_pipe_cfg[] = { diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index db968a5..979237c 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -596,12 +596,14 @@ static int usbhs_get_vbus(struct platform_device *pdev) return usbhs_is_connected(usbhs_get_priv(pdev)); } -static void usbhs_phy_reset(struct platform_device *pdev) +static int usbhs_phy_reset(struct platform_device *pdev) { struct usbhs_private *priv = usbhs_get_priv(pdev); /* init phy */ __raw_writew(0x8a0a, priv->usbcrcaddr); + + return 0; } static int usbhs0_get_id(struct platform_device *pdev) @@ -628,11 +630,13 @@ static int usbhs0_hardware_init(struct platform_device *pdev) return 0; } -static void usbhs0_hardware_exit(struct platform_device *pdev) +static int usbhs0_hardware_exit(struct platform_device *pdev) { struct usbhs_private *priv = usbhs_get_priv(pdev); cancel_delayed_work_sync(&priv->work); + + return 0; } static struct usbhs_private usbhs0_private = { @@ -735,7 +739,7 @@ static int usbhs1_hardware_init(struct platform_device *pdev) return 0; } -static void usbhs1_hardware_exit(struct platform_device *pdev) +static int usbhs1_hardware_exit(struct platform_device *pdev) { struct usbhs_private *priv = usbhs_get_priv(pdev); @@ -743,6 +747,8 @@ static void usbhs1_hardware_exit(struct platform_device *pdev) __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr); free_irq(IRQ8, pdev); + + return 0; } static int usbhs1_get_id(struct platform_device *pdev) diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c index cdcb799..fec49eb 100644 --- a/arch/arm/mach-shmobile/board-marzen.c +++ b/arch/arm/mach-shmobile/board-marzen.c @@ -32,6 +32,7 @@ #include <linux/smsc911x.h> #include <linux/spi/spi.h> #include <linux/spi/sh_hspi.h> +#include <linux/mmc/host.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mfd/tmio.h> #include <linux/usb/otg.h> diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c index 9e05026..0afeb5c 100644 --- a/arch/arm/mach-shmobile/cpuidle.c +++ b/arch/arm/mach-shmobile/cpuidle.c @@ -16,39 +16,22 @@ #include <asm/cpuidle.h> #include <asm/io.h> -int shmobile_enter_wfi(struct cpuidle_device *dev, struct cpuidle_driver *drv, - int index) -{ - cpu_do_idle(); - return 0; -} - -static struct cpuidle_device shmobile_cpuidle_dev; static struct cpuidle_driver shmobile_cpuidle_default_driver = { .name = "shmobile_cpuidle", .owner = THIS_MODULE, - .en_core_tk_irqen = 1, .states[0] = ARM_CPUIDLE_WFI_STATE, - .states[0].enter = shmobile_enter_wfi, .safe_state_index = 0, /* C1 */ .state_count = 1, }; static struct cpuidle_driver *cpuidle_drv = &shmobile_cpuidle_default_driver; -void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv) +void __init shmobile_cpuidle_set_driver(struct cpuidle_driver *drv) { cpuidle_drv = drv; } -int shmobile_cpuidle_init(void) +int __init shmobile_cpuidle_init(void) { - struct cpuidle_device *dev = &shmobile_cpuidle_dev; - - cpuidle_register_driver(cpuidle_drv); - - dev->state_count = cpuidle_drv->state_count; - cpuidle_register_device(dev); - - return 0; + return cpuidle_register(cpuidle_drv, NULL); } diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h index e48606d..362f9b2 100644 --- a/arch/arm/mach-shmobile/include/mach/common.h +++ b/arch/arm/mach-shmobile/include/mach/common.h @@ -13,9 +13,6 @@ extern int shmobile_clk_init(void); extern void shmobile_handle_irq_intc(struct pt_regs *); extern struct platform_suspend_ops shmobile_suspend_ops; struct cpuidle_driver; -struct cpuidle_device; -extern int shmobile_enter_wfi(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index); extern void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv); extern void sh7372_init_irq(void); diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index a0826a4..dec9293 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -410,11 +410,9 @@ static int sh7372_enter_a4s(struct cpuidle_device *dev, static struct cpuidle_driver sh7372_cpuidle_driver = { .name = "sh7372_cpuidle", .owner = THIS_MODULE, - .en_core_tk_irqen = 1, .state_count = 5, .safe_state_index = 0, /* C1 */ .states[0] = ARM_CPUIDLE_WFI_STATE, - .states[0].enter = shmobile_enter_wfi, .states[1] = { .name = "C2", .desc = "Core Standby Mode", @@ -450,12 +448,12 @@ static struct cpuidle_driver sh7372_cpuidle_driver = { }, }; -static void sh7372_cpuidle_init(void) +static void __init sh7372_cpuidle_init(void) { shmobile_cpuidle_set_driver(&sh7372_cpuidle_driver); } #else -static void sh7372_cpuidle_init(void) {} +static void __init sh7372_cpuidle_init(void) {} #endif #ifdef CONFIG_SUSPEND diff --git a/arch/arm/mach-shmobile/suspend.c b/arch/arm/mach-shmobile/suspend.c index 47d83f7..5d92b5d 100644 --- a/arch/arm/mach-shmobile/suspend.c +++ b/arch/arm/mach-shmobile/suspend.c @@ -12,6 +12,8 @@ #include <linux/suspend.h> #include <linux/module.h> #include <linux/err.h> +#include <linux/cpu.h> + #include <asm/io.h> #include <asm/system_misc.h> @@ -23,13 +25,13 @@ static int shmobile_suspend_default_enter(suspend_state_t suspend_state) static int shmobile_suspend_begin(suspend_state_t state) { - disable_hlt(); + cpu_idle_poll_ctrl(true); return 0; } static void shmobile_suspend_end(void) { - enable_hlt(); + cpu_idle_poll_ctrl(false); } struct platform_suspend_ops shmobile_suspend_ops = { diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c index f9d754f..d2b3937 100644 --- a/arch/arm/mach-spear3xx/spear3xx.c +++ b/arch/arm/mach-spear3xx/spear3xx.c @@ -14,7 +14,7 @@ #define pr_fmt(fmt) "SPEAr3xx: " fmt #include <linux/amba/pl022.h> -#include <linux/amba/pl08x.h> +#include <linux/amba/pl080.h> #include <linux/io.h> #include <plat/pl080.h> #include <mach/generic.h> diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index d1c4893..dbc653e 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig @@ -18,8 +18,8 @@ config ARCH_TEGRA_2x_SOC select PL310_ERRATA_727915 if CACHE_L2X0 select PL310_ERRATA_769419 if CACHE_L2X0 select USB_ARCH_HAS_EHCI if USB_SUPPORT - select USB_ULPI if USB - select USB_ULPI_VIEWPORT if USB_SUPPORT + select USB_ULPI if USB_PHY + select USB_ULPI_VIEWPORT if USB_PHY help Support for NVIDIA Tegra AP20 and T20 processors, based on the ARM CortexA9MP CPU and the ARM PL310 L2 cache controller @@ -37,8 +37,8 @@ config ARCH_TEGRA_3x_SOC select PINCTRL_TEGRA30 select PL310_ERRATA_769419 if CACHE_L2X0 select USB_ARCH_HAS_EHCI if USB_SUPPORT - select USB_ULPI if USB - select USB_ULPI_VIEWPORT if USB_SUPPORT + select USB_ULPI if USB_PHY + select USB_ULPI_VIEWPORT if USB_PHY help Support for NVIDIA Tegra T30 processor family, based on the ARM CortexA9MP CPU and the ARM PL310 L2 cache controller diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile index f6b46ae..09b578f 100644 --- a/arch/arm/mach-tegra/Makefile +++ b/arch/arm/mach-tegra/Makefile @@ -24,7 +24,6 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += cpuidle-tegra30.o endif obj-$(CONFIG_SMP) += platsmp.o headsmp.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o -obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o obj-$(CONFIG_TEGRA_PCI) += pcie.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += board-dt-tegra20.o diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c deleted file mode 100644 index e3d6e15..0000000 --- a/arch/arm/mach-tegra/cpu-tegra.c +++ /dev/null @@ -1,293 +0,0 @@ -/* - * arch/arm/mach-tegra/cpu-tegra.c - * - * Copyright (C) 2010 Google, Inc. - * - * Author: - * Colin Cross <ccross@google.com> - * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/types.h> -#include <linux/sched.h> -#include <linux/cpufreq.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/err.h> -#include <linux/clk.h> -#include <linux/io.h> -#include <linux/suspend.h> - -/* Frequency table index must be sequential starting at 0 */ -static struct cpufreq_frequency_table freq_table[] = { - { 0, 216000 }, - { 1, 312000 }, - { 2, 456000 }, - { 3, 608000 }, - { 4, 760000 }, - { 5, 816000 }, - { 6, 912000 }, - { 7, 1000000 }, - { 8, CPUFREQ_TABLE_END }, -}; - -#define NUM_CPUS 2 - -static struct clk *cpu_clk; -static struct clk *pll_x_clk; -static struct clk *pll_p_clk; -static struct clk *emc_clk; - -static unsigned long target_cpu_speed[NUM_CPUS]; -static DEFINE_MUTEX(tegra_cpu_lock); -static bool is_suspended; - -static int tegra_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, freq_table); -} - -static unsigned int tegra_getspeed(unsigned int cpu) -{ - unsigned long rate; - - if (cpu >= NUM_CPUS) - return 0; - - rate = clk_get_rate(cpu_clk) / 1000; - return rate; -} - -static int tegra_cpu_clk_set_rate(unsigned long rate) -{ - int ret; - - /* - * Take an extra reference to the main pll so it doesn't turn - * off when we move the cpu off of it - */ - clk_prepare_enable(pll_x_clk); - - ret = clk_set_parent(cpu_clk, pll_p_clk); - if (ret) { - pr_err("Failed to switch cpu to clock pll_p\n"); - goto out; - } - - if (rate == clk_get_rate(pll_p_clk)) - goto out; - - ret = clk_set_rate(pll_x_clk, rate); - if (ret) { - pr_err("Failed to change pll_x to %lu\n", rate); - goto out; - } - - ret = clk_set_parent(cpu_clk, pll_x_clk); - if (ret) { - pr_err("Failed to switch cpu to clock pll_x\n"); - goto out; - } - -out: - clk_disable_unprepare(pll_x_clk); - return ret; -} - -static int tegra_update_cpu_speed(unsigned long rate) -{ - int ret = 0; - struct cpufreq_freqs freqs; - - freqs.old = tegra_getspeed(0); - freqs.new = rate; - - if (freqs.old == freqs.new) - return ret; - - /* - * Vote on memory bus frequency based on cpu frequency - * This sets the minimum frequency, display or avp may request higher - */ - if (rate >= 816000) - clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */ - else if (rate >= 456000) - clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */ - else - clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ - - for_each_online_cpu(freqs.cpu) - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - -#ifdef CONFIG_CPU_FREQ_DEBUG - printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n", - freqs.old, freqs.new); -#endif - - ret = tegra_cpu_clk_set_rate(freqs.new * 1000); - if (ret) { - pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n", - freqs.new); - return ret; - } - - for_each_online_cpu(freqs.cpu) - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - return 0; -} - -static unsigned long tegra_cpu_highest_speed(void) -{ - unsigned long rate = 0; - int i; - - for_each_online_cpu(i) - rate = max(rate, target_cpu_speed[i]); - return rate; -} - -static int tegra_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int idx; - unsigned int freq; - int ret = 0; - - mutex_lock(&tegra_cpu_lock); - - if (is_suspended) { - ret = -EBUSY; - goto out; - } - - cpufreq_frequency_table_target(policy, freq_table, target_freq, - relation, &idx); - - freq = freq_table[idx].frequency; - - target_cpu_speed[policy->cpu] = freq; - - ret = tegra_update_cpu_speed(tegra_cpu_highest_speed()); - -out: - mutex_unlock(&tegra_cpu_lock); - return ret; -} - -static int tegra_pm_notify(struct notifier_block *nb, unsigned long event, - void *dummy) -{ - mutex_lock(&tegra_cpu_lock); - if (event == PM_SUSPEND_PREPARE) { - is_suspended = true; - pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n", - freq_table[0].frequency); - tegra_update_cpu_speed(freq_table[0].frequency); - } else if (event == PM_POST_SUSPEND) { - is_suspended = false; - } - mutex_unlock(&tegra_cpu_lock); - - return NOTIFY_OK; -} - -static struct notifier_block tegra_cpu_pm_notifier = { - .notifier_call = tegra_pm_notify, -}; - -static int tegra_cpu_init(struct cpufreq_policy *policy) -{ - if (policy->cpu >= NUM_CPUS) - return -EINVAL; - - clk_prepare_enable(emc_clk); - clk_prepare_enable(cpu_clk); - - cpufreq_frequency_table_cpuinfo(policy, freq_table); - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - policy->cur = tegra_getspeed(policy->cpu); - target_cpu_speed[policy->cpu] = policy->cur; - - /* FIXME: what's the actual transition time? */ - policy->cpuinfo.transition_latency = 300 * 1000; - - cpumask_copy(policy->cpus, cpu_possible_mask); - - if (policy->cpu == 0) - register_pm_notifier(&tegra_cpu_pm_notifier); - - return 0; -} - -static int tegra_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_cpuinfo(policy, freq_table); - clk_disable_unprepare(emc_clk); - return 0; -} - -static struct freq_attr *tegra_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver tegra_cpufreq_driver = { - .verify = tegra_verify_speed, - .target = tegra_target, - .get = tegra_getspeed, - .init = tegra_cpu_init, - .exit = tegra_cpu_exit, - .name = "tegra", - .attr = tegra_cpufreq_attr, -}; - -static int __init tegra_cpufreq_init(void) -{ - cpu_clk = clk_get_sys(NULL, "cpu"); - if (IS_ERR(cpu_clk)) - return PTR_ERR(cpu_clk); - - pll_x_clk = clk_get_sys(NULL, "pll_x"); - if (IS_ERR(pll_x_clk)) - return PTR_ERR(pll_x_clk); - - pll_p_clk = clk_get_sys(NULL, "pll_p_cclk"); - if (IS_ERR(pll_p_clk)) - return PTR_ERR(pll_p_clk); - - emc_clk = clk_get_sys("cpu", "emc"); - if (IS_ERR(emc_clk)) { - clk_put(cpu_clk); - return PTR_ERR(emc_clk); - } - - return cpufreq_register_driver(&tegra_cpufreq_driver); -} - -static void __exit tegra_cpufreq_exit(void) -{ - cpufreq_unregister_driver(&tegra_cpufreq_driver); - clk_put(emc_clk); - clk_put(cpu_clk); -} - - -MODULE_AUTHOR("Colin Cross <ccross@android.com>"); -MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2"); -MODULE_LICENSE("GPL"); -module_init(tegra_cpufreq_init); -module_exit(tegra_cpufreq_exit); diff --git a/arch/arm/mach-tegra/cpuidle-tegra114.c b/arch/arm/mach-tegra/cpuidle-tegra114.c index 0f4e8c4..1d1c602 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra114.c +++ b/arch/arm/mach-tegra/cpuidle-tegra114.c @@ -23,39 +23,13 @@ static struct cpuidle_driver tegra_idle_driver = { .name = "tegra_idle", .owner = THIS_MODULE, - .en_core_tk_irqen = 1, .state_count = 1, .states = { [0] = ARM_CPUIDLE_WFI_STATE_PWR(600), }, }; -static DEFINE_PER_CPU(struct cpuidle_device, tegra_idle_device); - int __init tegra114_cpuidle_init(void) { - int ret; - unsigned int cpu; - struct cpuidle_device *dev; - struct cpuidle_driver *drv = &tegra_idle_driver; - - ret = cpuidle_register_driver(&tegra_idle_driver); - if (ret) { - pr_err("CPUidle driver registration failed\n"); - return ret; - } - - for_each_possible_cpu(cpu) { - dev = &per_cpu(tegra_idle_device, cpu); - dev->cpu = cpu; - - dev->state_count = drv->state_count; - ret = cpuidle_register_device(dev); - if (ret) { - pr_err("CPU%u: CPUidle device registration failed\n", - cpu); - return ret; - } - } - return 0; + return cpuidle_register(&tegra_idle_driver, NULL); } diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c index 825ced4..590ec25 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra20.c +++ b/arch/arm/mach-tegra/cpuidle-tegra20.c @@ -43,32 +43,33 @@ static atomic_t abort_barrier; static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); +#define TEGRA20_MAX_STATES 2 +#else +#define TEGRA20_MAX_STATES 1 #endif -static struct cpuidle_state tegra_idle_states[] = { - [0] = ARM_CPUIDLE_WFI_STATE_PWR(600), -#ifdef CONFIG_PM_SLEEP - [1] = { - .enter = tegra20_idle_lp2_coupled, - .exit_latency = 5000, - .target_residency = 10000, - .power_usage = 0, - .flags = CPUIDLE_FLAG_TIME_VALID | - CPUIDLE_FLAG_COUPLED, - .name = "powered-down", - .desc = "CPU power gated", - }, -#endif -}; - static struct cpuidle_driver tegra_idle_driver = { .name = "tegra_idle", .owner = THIS_MODULE, - .en_core_tk_irqen = 1, + .states = { + ARM_CPUIDLE_WFI_STATE_PWR(600), +#ifdef CONFIG_PM_SLEEP + { + .enter = tegra20_idle_lp2_coupled, + .exit_latency = 5000, + .target_residency = 10000, + .power_usage = 0, + .flags = CPUIDLE_FLAG_TIME_VALID | + CPUIDLE_FLAG_COUPLED, + .name = "powered-down", + .desc = "CPU power gated", + }, +#endif + }, + .state_count = TEGRA20_MAX_STATES, + .safe_state_index = 0, }; -static DEFINE_PER_CPU(struct cpuidle_device, tegra_idle_device); - #ifdef CONFIG_PM_SLEEP #ifdef CONFIG_SMP static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE); @@ -217,39 +218,8 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev, int __init tegra20_cpuidle_init(void) { - int ret; - unsigned int cpu; - struct cpuidle_device *dev; - struct cpuidle_driver *drv = &tegra_idle_driver; - #ifdef CONFIG_PM_SLEEP tegra_tear_down_cpu = tegra20_tear_down_cpu; #endif - - drv->state_count = ARRAY_SIZE(tegra_idle_states); - memcpy(drv->states, tegra_idle_states, - drv->state_count * sizeof(drv->states[0])); - - ret = cpuidle_register_driver(&tegra_idle_driver); - if (ret) { - pr_err("CPUidle driver registration failed\n"); - return ret; - } - - for_each_possible_cpu(cpu) { - dev = &per_cpu(tegra_idle_device, cpu); - dev->cpu = cpu; -#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED - dev->coupled_cpus = *cpu_possible_mask; -#endif - - dev->state_count = drv->state_count; - ret = cpuidle_register_device(dev); - if (ret) { - pr_err("CPU%u: CPUidle device registration failed\n", - cpu); - return ret; - } - } - return 0; + return cpuidle_register(&tegra_idle_driver, cpu_possible_mask); } diff --git a/arch/arm/mach-tegra/cpuidle-tegra30.c b/arch/arm/mach-tegra/cpuidle-tegra30.c index 8b50cf4..36dc2be 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra30.c +++ b/arch/arm/mach-tegra/cpuidle-tegra30.c @@ -43,7 +43,6 @@ static int tegra30_idle_lp2(struct cpuidle_device *dev, static struct cpuidle_driver tegra_idle_driver = { .name = "tegra_idle", .owner = THIS_MODULE, - .en_core_tk_irqen = 1, #ifdef CONFIG_PM_SLEEP .state_count = 2, #else @@ -65,8 +64,6 @@ static struct cpuidle_driver tegra_idle_driver = { }, }; -static DEFINE_PER_CPU(struct cpuidle_device, tegra_idle_device); - #ifdef CONFIG_PM_SLEEP static bool tegra30_cpu_cluster_power_down(struct cpuidle_device *dev, struct cpuidle_driver *drv, @@ -157,32 +154,8 @@ static int tegra30_idle_lp2(struct cpuidle_device *dev, int __init tegra30_cpuidle_init(void) { - int ret; - unsigned int cpu; - struct cpuidle_device *dev; - struct cpuidle_driver *drv = &tegra_idle_driver; - #ifdef CONFIG_PM_SLEEP tegra_tear_down_cpu = tegra30_tear_down_cpu; #endif - - ret = cpuidle_register_driver(&tegra_idle_driver); - if (ret) { - pr_err("CPUidle driver registration failed\n"); - return ret; - } - - for_each_possible_cpu(cpu) { - dev = &per_cpu(tegra_idle_device, cpu); - dev->cpu = cpu; - - dev->state_count = drv->state_count; - ret = cpuidle_register_device(dev); - if (ret) { - pr_err("CPU%u: CPUidle device registration failed\n", - cpu); - return ret; - } - } - return 0; + return cpuidle_register(&tegra_idle_driver, NULL); } diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c index 2a17bc5..ff3c9f0 100644 --- a/arch/arm/mach-ux500/board-mop500-regulators.c +++ b/arch/arm/mach-ux500/board-mop500-regulators.c @@ -5,6 +5,7 @@ * * Authors: Sundar Iyer <sundar.iyer@stericsson.com> * Bengt Jonsson <bengt.g.jonsson@stericsson.com> + * Daniel Willerud <daniel.willerud@stericsson.com> * * MOP500 board specific initialization for regulators */ @@ -12,6 +13,7 @@ #include <linux/regulator/machine.h> #include <linux/regulator/ab8500.h> #include "board-mop500-regulators.h" +#include "id.h" static struct regulator_consumer_supply gpio_en_3v3_consumers[] = { REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), @@ -53,21 +55,37 @@ struct regulator_init_data tps61052_regulator = { }; static struct regulator_consumer_supply ab8500_vaux1_consumers[] = { - /* External displays, connector on board 2v5 power supply */ - REGULATOR_SUPPLY("vaux12v5", "mcde.0"), + /* Main display, u8500 R3 uib */ + REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"), + /* Main display, u8500 uib and ST uib */ + REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.0"), + /* Secondary display, ST uib */ + REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.1"), /* SFH7741 proximity sensor */ REGULATOR_SUPPLY("vcc", "gpio-keys.0"), /* BH1780GLS ambient light sensor */ REGULATOR_SUPPLY("vcc", "2-0029"), /* lsm303dlh accelerometer */ - REGULATOR_SUPPLY("vdd", "3-0018"), + REGULATOR_SUPPLY("vdd", "2-0018"), + /* lsm303dlhc accelerometer */ + REGULATOR_SUPPLY("vdd", "2-0019"), /* lsm303dlh magnetometer */ - REGULATOR_SUPPLY("vdd", "3-001e"), + REGULATOR_SUPPLY("vdd", "2-001e"), /* Rohm BU21013 Touchscreen devices */ REGULATOR_SUPPLY("avdd", "3-005c"), REGULATOR_SUPPLY("avdd", "3-005d"), /* Synaptics RMI4 Touchscreen device */ REGULATOR_SUPPLY("vdd", "3-004b"), + /* L3G4200D Gyroscope device */ + REGULATOR_SUPPLY("vdd", "2-0068"), + /* Ambient light sensor device */ + REGULATOR_SUPPLY("vdd", "3-0029"), + /* Pressure sensor device */ + REGULATOR_SUPPLY("vdd", "2-005c"), + /* Cypress TrueTouch Touchscreen device */ + REGULATOR_SUPPLY("vcpin", "spi8.0"), + /* Camera device */ + REGULATOR_SUPPLY("vaux12v5", "mmio_camera"), }; static struct regulator_consumer_supply ab8500_vaux2_consumers[] = { @@ -75,18 +93,50 @@ static struct regulator_consumer_supply ab8500_vaux2_consumers[] = { REGULATOR_SUPPLY("vmmc", "sdi4"), /* AB8500 audio codec */ REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"), + /* AB8500 accessory detect 1 */ + REGULATOR_SUPPLY("vcc-N2158", "ab8500-acc-det.0"), + /* AB8500 Tv-out device */ + REGULATOR_SUPPLY("vcc-N2158", "mcde_tv_ab8500.4"), + /* AV8100 HDMI device */ + REGULATOR_SUPPLY("vcc-N2158", "av8100_hdmi.3"), }; static struct regulator_consumer_supply ab8500_vaux3_consumers[] = { + REGULATOR_SUPPLY("v-SD-STM", "stm"), /* External MMC slot power */ REGULATOR_SUPPLY("vmmc", "sdi0"), }; +static struct regulator_consumer_supply ab8505_vaux4_consumers[] = { +}; + +static struct regulator_consumer_supply ab8505_vaux5_consumers[] = { +}; + +static struct regulator_consumer_supply ab8505_vaux6_consumers[] = { +}; + +static struct regulator_consumer_supply ab8505_vaux8_consumers[] = { + /* AB8500 audio codec device */ + REGULATOR_SUPPLY("v-aux8", NULL), +}; + +static struct regulator_consumer_supply ab8505_vadc_consumers[] = { + /* Internal general-purpose ADC */ + REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"), + /* ADC for charger */ + REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"), +}; + static struct regulator_consumer_supply ab8500_vtvout_consumers[] = { /* TV-out DENC supply */ REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"), /* Internal general-purpose ADC */ REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"), + /* ADC for charger */ + REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"), + /* AB8500 Tv-out device */ + REGULATOR_SUPPLY("vtvout", "mcde_tv_ab8500.4"), }; static struct regulator_consumer_supply ab8500_vaud_consumers[] = { @@ -114,77 +164,90 @@ static struct regulator_consumer_supply ab8500_vintcore_consumers[] = { REGULATOR_SUPPLY("v-intcore", NULL), /* USB Transceiver */ REGULATOR_SUPPLY("vddulpivio18", "ab8500-usb.0"), + /* Handled by abx500 clk driver */ + REGULATOR_SUPPLY("v-intcore", "abx500-clk.0"), +}; + +static struct regulator_consumer_supply ab8505_usb_consumers[] = { + /* HS USB OTG physical interface */ + REGULATOR_SUPPLY("v-ape", NULL), }; static struct regulator_consumer_supply ab8500_vana_consumers[] = { - /* External displays, connector on board, 1v8 power supply */ - REGULATOR_SUPPLY("vsmps2", "mcde.0"), + /* DB8500 DSI */ + REGULATOR_SUPPLY("vdddsi1v2", "mcde"), + REGULATOR_SUPPLY("vdddsi1v2", "b2r2_core"), + REGULATOR_SUPPLY("vdddsi1v2", "b2r2_1_core"), + REGULATOR_SUPPLY("vdddsi1v2", "dsilink.0"), + REGULATOR_SUPPLY("vdddsi1v2", "dsilink.1"), + REGULATOR_SUPPLY("vdddsi1v2", "dsilink.2"), + /* DB8500 CSI */ + REGULATOR_SUPPLY("vddcsi1v2", "mmio_camera"), }; /* ab8500 regulator register initialization */ -struct ab8500_regulator_reg_init -ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = { +static struct ab8500_regulator_reg_init ab8500_reg_init[] = { /* * VanaRequestCtrl = HP/LP depending on VxRequest * VextSupply1RequestCtrl = HP/LP depending on VxRequest */ - INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0x00), + INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0xf0, 0x00), /* * VextSupply2RequestCtrl = HP/LP depending on VxRequest * VextSupply3RequestCtrl = HP/LP depending on VxRequest * Vaux1RequestCtrl = HP/LP depending on VxRequest * Vaux2RequestCtrl = HP/LP depending on VxRequest */ - INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0x00), + INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0xff, 0x00), /* * Vaux3RequestCtrl = HP/LP depending on VxRequest * SwHPReq = Control through SWValid disabled */ - INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x00), + INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x07, 0x00), /* * VanaSysClkReq1HPValid = disabled * Vaux1SysClkReq1HPValid = disabled * Vaux2SysClkReq1HPValid = disabled * Vaux3SysClkReq1HPValid = disabled */ - INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0x00), + INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00), /* * VextSupply1SysClkReq1HPValid = disabled * VextSupply2SysClkReq1HPValid = disabled * VextSupply3SysClkReq1HPValid = SysClkReq1 controlled */ - INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x40), + INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x70, 0x40), /* * VanaHwHPReq1Valid = disabled * Vaux1HwHPreq1Valid = disabled * Vaux2HwHPReq1Valid = disabled * Vaux3HwHPReqValid = disabled */ - INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0x00), + INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0xe8, 0x00), /* * VextSupply1HwHPReq1Valid = disabled * VextSupply2HwHPReq1Valid = disabled * VextSupply3HwHPReq1Valid = disabled */ - INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x00), + INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x07, 0x00), /* * VanaHwHPReq2Valid = disabled * Vaux1HwHPReq2Valid = disabled * Vaux2HwHPReq2Valid = disabled * Vaux3HwHPReq2Valid = disabled */ - INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0x00), + INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0xe8, 0x00), /* * VextSupply1HwHPReq2Valid = disabled * VextSupply2HwHPReq2Valid = disabled * VextSupply3HwHPReq2Valid = HWReq2 controlled */ - INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x04), + INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x07, 0x04), /* * VanaSwHPReqValid = disabled * Vaux1SwHPReqValid = disabled */ - INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0x00), + INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0xa0, 0x00), /* * Vaux2SwHPReqValid = disabled * Vaux3SwHPReqValid = disabled @@ -192,7 +255,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = { * VextSupply2SwHPReqValid = disabled * VextSupply3SwHPReqValid = disabled */ - INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x00), + INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x1f, 0x00), /* * SysClkReq2Valid1 = SysClkReq2 controlled * SysClkReq3Valid1 = disabled @@ -202,7 +265,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = { * SysClkReq7Valid1 = disabled * SysClkReq8Valid1 = disabled */ - INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0x2a), + INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0xfe, 0x2a), /* * SysClkReq2Valid2 = disabled * SysClkReq3Valid2 = disabled @@ -212,7 +275,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = { * SysClkReq7Valid2 = disabled * SysClkReq8Valid2 = disabled */ - INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0x20), + INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0xfe, 0x20), /* * VTVoutEna = disabled * Vintcore12Ena = disabled @@ -220,66 +283,62 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = { * Vintcore12LP = inactive (HP) * VTVoutLP = inactive (HP) */ - INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0x10), + INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0xfe, 0x10), /* * VaudioEna = disabled * VdmicEna = disabled * Vamic1Ena = disabled * Vamic2Ena = disabled */ - INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x00), + INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x1e, 0x00), /* * Vamic1_dzout = high-Z when Vamic1 is disabled * Vamic2_dzout = high-Z when Vamic2 is disabled */ - INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x00), + INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x03, 0x00), /* - * VPll = Hw controlled + * VPll = Hw controlled (NOTE! PRCMU bits) * VanaRegu = force off */ - INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x02), + INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x0f, 0x02), /* * VrefDDREna = disabled * VrefDDRSleepMode = inactive (no pulldown) */ - INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x00), + INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x03, 0x00), /* - * VextSupply1Regu = HW control - * VextSupply2Regu = HW control - * VextSupply3Regu = HW control + * VextSupply1Regu = force LP + * VextSupply2Regu = force OFF + * VextSupply3Regu = force HP (-> STBB2=LP and TPS=LP) * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0 * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0 */ - INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0x2a), + INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0xff, 0x13), /* * Vaux1Regu = force HP * Vaux2Regu = force off */ - INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x01), + INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x0f, 0x01), /* - * Vaux3regu = force off + * Vaux3Regu = force off */ - INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x00), + INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x03, 0x00), /* - * Vsmps1 = 1.15V + * Vaux1Sel = 2.8 V */ - INIT_REGULATOR_REGISTER(AB8500_VSMPS1SEL1, 0x24), - /* - * Vaux1Sel = 2.5 V - */ - INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x08), + INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x0f, 0x0C), /* * Vaux2Sel = 2.9 V */ - INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0d), + INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0f, 0x0d), /* * Vaux3Sel = 2.91 V */ - INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07), + INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07, 0x07), /* * VextSupply12LP = disabled (no LP) */ - INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x00), + INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x01, 0x00), /* * Vaux1Disch = short discharge time * Vaux2Disch = short discharge time @@ -288,33 +347,26 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = { * VTVoutDisch = short discharge time * VaudioDisch = short discharge time */ - INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0x00), + INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0xfc, 0x00), /* * VanaDisch = short discharge time * VdmicPullDownEna = pulldown disabled when Vdmic is disabled * VdmicDisch = short discharge time */ - INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x00), + INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x16, 0x00), }; /* AB8500 regulators */ -struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { +static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { /* supplies to the display/camera */ [AB8500_LDO_AUX1] = { .constraints = { .name = "V-DISPLAY", - .min_uV = 2500000, - .max_uV = 2900000, + .min_uV = 2800000, + .max_uV = 3300000, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS, .boot_on = 1, /* display is on at boot */ - /* - * This voltage cannot be disabled right now because - * it is somehow affecting the external MMC - * functionality, though that typically will use - * AUX3. - */ - .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers), .consumer_supplies = ab8500_vaux1_consumers, @@ -326,7 +378,10 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { .min_uV = 1100000, .max_uV = 3300000, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | - REGULATOR_CHANGE_STATUS, + REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, }, .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers), .consumer_supplies = ab8500_vaux2_consumers, @@ -338,7 +393,10 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { .min_uV = 1100000, .max_uV = 3300000, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | - REGULATOR_CHANGE_STATUS, + REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, }, .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers), .consumer_supplies = ab8500_vaux3_consumers, @@ -392,18 +450,614 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { [AB8500_LDO_INTCORE] = { .constraints = { .name = "V-INTCORE", - .valid_ops_mask = REGULATOR_CHANGE_STATUS, + .min_uV = 1250000, + .max_uV = 1350000, + .input_uV = 1800000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE | + REGULATOR_CHANGE_DRMS, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, }, .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers), .consumer_supplies = ab8500_vintcore_consumers, }, - /* supply for U8500 CSI/DSI, VANA LDO */ + /* supply for U8500 CSI-DSI, VANA LDO */ [AB8500_LDO_ANA] = { .constraints = { - .name = "V-CSI/DSI", + .name = "V-CSI-DSI", .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers), .consumer_supplies = ab8500_vana_consumers, }, }; + +/* supply for VextSupply3 */ +static struct regulator_consumer_supply ab8500_ext_supply3_consumers[] = { + /* SIM supply for 3 V SIM cards */ + REGULATOR_SUPPLY("vinvsim", "sim-detect.0"), +}; + +/* extended configuration for VextSupply2, only used for HREFP_V20 boards */ +static struct ab8500_ext_regulator_cfg ab8500_ext_supply2 = { + .hwreq = true, +}; + +/* + * AB8500 external regulators + */ +static struct regulator_init_data ab8500_ext_regulators[] = { + /* fixed Vbat supplies VSMPS1_EXT_1V8 */ + [AB8500_EXT_SUPPLY1] = { + .constraints = { + .name = "ab8500-ext-supply1", + .min_uV = 1800000, + .max_uV = 1800000, + .initial_mode = REGULATOR_MODE_IDLE, + .boot_on = 1, + .always_on = 1, + }, + }, + /* fixed Vbat supplies VSMPS2_EXT_1V36 and VSMPS5_EXT_1V15 */ + [AB8500_EXT_SUPPLY2] = { + .constraints = { + .name = "ab8500-ext-supply2", + .min_uV = 1360000, + .max_uV = 1360000, + }, + }, + /* fixed Vbat supplies VSMPS3_EXT_3V4 and VSMPS4_EXT_3V4 */ + [AB8500_EXT_SUPPLY3] = { + .constraints = { + .name = "ab8500-ext-supply3", + .min_uV = 3400000, + .max_uV = 3400000, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + .boot_on = 1, + }, + .num_consumer_supplies = + ARRAY_SIZE(ab8500_ext_supply3_consumers), + .consumer_supplies = ab8500_ext_supply3_consumers, + }, +}; + +/* ab8505 regulator register initialization */ +static struct ab8500_regulator_reg_init ab8505_reg_init[] = { + /* + * VarmRequestCtrl + * VsmpsCRequestCtrl + * VsmpsARequestCtrl + * VsmpsBRequestCtrl + */ + INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL1, 0x00, 0x00), + /* + * VsafeRequestCtrl + * VpllRequestCtrl + * VanaRequestCtrl = HP/LP depending on VxRequest + */ + INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL2, 0x30, 0x00), + /* + * Vaux1RequestCtrl = HP/LP depending on VxRequest + * Vaux2RequestCtrl = HP/LP depending on VxRequest + */ + INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL3, 0xf0, 0x00), + /* + * Vaux3RequestCtrl = HP/LP depending on VxRequest + * SwHPReq = Control through SWValid disabled + */ + INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL4, 0x07, 0x00), + /* + * VsmpsASysClkReq1HPValid + * VsmpsBSysClkReq1HPValid + * VsafeSysClkReq1HPValid + * VanaSysClkReq1HPValid = disabled + * VpllSysClkReq1HPValid + * Vaux1SysClkReq1HPValid = disabled + * Vaux2SysClkReq1HPValid = disabled + * Vaux3SysClkReq1HPValid = disabled + */ + INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00), + /* + * VsmpsCSysClkReq1HPValid + * VarmSysClkReq1HPValid + * VbbSysClkReq1HPValid + * VsmpsMSysClkReq1HPValid + */ + INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQ1HPVALID2, 0x00, 0x00), + /* + * VsmpsAHwHPReq1Valid + * VsmpsBHwHPReq1Valid + * VsafeHwHPReq1Valid + * VanaHwHPReq1Valid = disabled + * VpllHwHPReq1Valid + * Vaux1HwHPreq1Valid = disabled + * Vaux2HwHPReq1Valid = disabled + * Vaux3HwHPReqValid = disabled + */ + INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ1VALID1, 0xe8, 0x00), + /* + * VsmpsMHwHPReq1Valid + */ + INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ1VALID2, 0x00, 0x00), + /* + * VsmpsAHwHPReq2Valid + * VsmpsBHwHPReq2Valid + * VsafeHwHPReq2Valid + * VanaHwHPReq2Valid = disabled + * VpllHwHPReq2Valid + * Vaux1HwHPReq2Valid = disabled + * Vaux2HwHPReq2Valid = disabled + * Vaux3HwHPReq2Valid = disabled + */ + INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ2VALID1, 0xe8, 0x00), + /* + * VsmpsMHwHPReq2Valid + */ + INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ2VALID2, 0x00, 0x00), + /** + * VsmpsCSwHPReqValid + * VarmSwHPReqValid + * VsmpsASwHPReqValid + * VsmpsBSwHPReqValid + * VsafeSwHPReqValid + * VanaSwHPReqValid + * VanaSwHPReqValid = disabled + * VpllSwHPReqValid + * Vaux1SwHPReqValid = disabled + */ + INIT_REGULATOR_REGISTER(AB8505_REGUSWHPREQVALID1, 0xa0, 0x00), + /* + * Vaux2SwHPReqValid = disabled + * Vaux3SwHPReqValid = disabled + * VsmpsMSwHPReqValid + */ + INIT_REGULATOR_REGISTER(AB8505_REGUSWHPREQVALID2, 0x03, 0x00), + /* + * SysClkReq2Valid1 = SysClkReq2 controlled + * SysClkReq3Valid1 = disabled + * SysClkReq4Valid1 = SysClkReq4 controlled + */ + INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQVALID1, 0x0e, 0x0a), + /* + * SysClkReq2Valid2 = disabled + * SysClkReq3Valid2 = disabled + * SysClkReq4Valid2 = disabled + */ + INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQVALID2, 0x0e, 0x00), + /* + * Vaux4SwHPReqValid + * Vaux4HwHPReq2Valid + * Vaux4HwHPReq1Valid + * Vaux4SysClkReq1HPValid + */ + INIT_REGULATOR_REGISTER(AB8505_REGUVAUX4REQVALID, 0x00, 0x00), + /* + * VadcEna = disabled + * VintCore12Ena = disabled + * VintCore12Sel = 1.25 V + * VintCore12LP = inactive (HP) + * VadcLP = inactive (HP) + */ + INIT_REGULATOR_REGISTER(AB8505_REGUMISC1, 0xfe, 0x10), + /* + * VaudioEna = disabled + * Vaux8Ena = disabled + * Vamic1Ena = disabled + * Vamic2Ena = disabled + */ + INIT_REGULATOR_REGISTER(AB8505_VAUDIOSUPPLY, 0x1e, 0x00), + /* + * Vamic1_dzout = high-Z when Vamic1 is disabled + * Vamic2_dzout = high-Z when Vamic2 is disabled + */ + INIT_REGULATOR_REGISTER(AB8505_REGUCTRL1VAMIC, 0x03, 0x00), + /* + * VsmpsARegu + * VsmpsASelCtrl + * VsmpsAAutoMode + * VsmpsAPWMMode + */ + INIT_REGULATOR_REGISTER(AB8505_VSMPSAREGU, 0x00, 0x00), + /* + * VsmpsBRegu + * VsmpsBSelCtrl + * VsmpsBAutoMode + * VsmpsBPWMMode + */ + INIT_REGULATOR_REGISTER(AB8505_VSMPSBREGU, 0x00, 0x00), + /* + * VsafeRegu + * VsafeSelCtrl + * VsafeAutoMode + * VsafePWMMode + */ + INIT_REGULATOR_REGISTER(AB8505_VSAFEREGU, 0x00, 0x00), + /* + * VPll = Hw controlled (NOTE! PRCMU bits) + * VanaRegu = force off + */ + INIT_REGULATOR_REGISTER(AB8505_VPLLVANAREGU, 0x0f, 0x02), + /* + * VextSupply1Regu = force OFF (OTP_ExtSupply12LPnPolarity 1) + * VextSupply2Regu = force OFF (OTP_ExtSupply12LPnPolarity 1) + * VextSupply3Regu = force OFF (OTP_ExtSupply3LPnPolarity 0) + * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0 + * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0 + */ + INIT_REGULATOR_REGISTER(AB8505_EXTSUPPLYREGU, 0xff, 0x30), + /* + * Vaux1Regu = force HP + * Vaux2Regu = force off + */ + INIT_REGULATOR_REGISTER(AB8505_VAUX12REGU, 0x0f, 0x01), + /* + * Vaux3Regu = force off + */ + INIT_REGULATOR_REGISTER(AB8505_VRF1VAUX3REGU, 0x03, 0x00), + /* + * VsmpsASel1 + */ + INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL1, 0x00, 0x00), + /* + * VsmpsASel2 + */ + INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL2, 0x00, 0x00), + /* + * VsmpsASel3 + */ + INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL3, 0x00, 0x00), + /* + * VsmpsBSel1 + */ + INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL1, 0x00, 0x00), + /* + * VsmpsBSel2 + */ + INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL2, 0x00, 0x00), + /* + * VsmpsBSel3 + */ + INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL3, 0x00, 0x00), + /* + * VsafeSel1 + */ + INIT_REGULATOR_REGISTER(AB8505_VSAFESEL1, 0x00, 0x00), + /* + * VsafeSel2 + */ + INIT_REGULATOR_REGISTER(AB8505_VSAFESEL2, 0x00, 0x00), + /* + * VsafeSel3 + */ + INIT_REGULATOR_REGISTER(AB8505_VSAFESEL3, 0x00, 0x00), + /* + * Vaux1Sel = 2.8 V + */ + INIT_REGULATOR_REGISTER(AB8505_VAUX1SEL, 0x0f, 0x0C), + /* + * Vaux2Sel = 2.9 V + */ + INIT_REGULATOR_REGISTER(AB8505_VAUX2SEL, 0x0f, 0x0d), + /* + * Vaux3Sel = 2.91 V + */ + INIT_REGULATOR_REGISTER(AB8505_VRF1VAUX3SEL, 0x07, 0x07), + /* + * Vaux4RequestCtrl + */ + INIT_REGULATOR_REGISTER(AB8505_VAUX4REQCTRL, 0x00, 0x00), + /* + * Vaux4Regu + */ + INIT_REGULATOR_REGISTER(AB8505_VAUX4REGU, 0x00, 0x00), + /* + * Vaux4Sel + */ + INIT_REGULATOR_REGISTER(AB8505_VAUX4SEL, 0x00, 0x00), + /* + * Vaux1Disch = short discharge time + * Vaux2Disch = short discharge time + * Vaux3Disch = short discharge time + * Vintcore12Disch = short discharge time + * VTVoutDisch = short discharge time + * VaudioDisch = short discharge time + */ + INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH, 0xfc, 0x00), + /* + * VanaDisch = short discharge time + * Vaux8PullDownEna = pulldown disabled when Vaux8 is disabled + * Vaux8Disch = short discharge time + */ + INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH2, 0x16, 0x00), + /* + * Vaux4Disch = short discharge time + */ + INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH3, 0x01, 0x00), + /* + * Vaux5Sel + * Vaux5LP + * Vaux5Ena + * Vaux5Disch + * Vaux5DisSfst + * Vaux5DisPulld + */ + INIT_REGULATOR_REGISTER(AB8505_CTRLVAUX5, 0x00, 0x00), + /* + * Vaux6Sel + * Vaux6LP + * Vaux6Ena + * Vaux6DisPulld + */ + INIT_REGULATOR_REGISTER(AB8505_CTRLVAUX6, 0x00, 0x00), +}; + +struct regulator_init_data ab8505_regulators[AB8505_NUM_REGULATORS] = { + /* supplies to the display/camera */ + [AB8505_LDO_AUX1] = { + .constraints = { + .name = "V-DISPLAY", + .min_uV = 2800000, + .max_uV = 3300000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS, + .boot_on = 1, /* display is on at boot */ + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers), + .consumer_supplies = ab8500_vaux1_consumers, + }, + /* supplies to the on-board eMMC */ + [AB8505_LDO_AUX2] = { + .constraints = { + .name = "V-eMMC1", + .min_uV = 1100000, + .max_uV = 3300000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers), + .consumer_supplies = ab8500_vaux2_consumers, + }, + /* supply for VAUX3, supplies to SDcard slots */ + [AB8505_LDO_AUX3] = { + .constraints = { + .name = "V-MMC-SD", + .min_uV = 1100000, + .max_uV = 3300000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers), + .consumer_supplies = ab8500_vaux3_consumers, + }, + /* supply for VAUX4, supplies to NFC and standalone secure element */ + [AB8505_LDO_AUX4] = { + .constraints = { + .name = "V-NFC-SE", + .min_uV = 1100000, + .max_uV = 3300000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux4_consumers), + .consumer_supplies = ab8505_vaux4_consumers, + }, + /* supply for VAUX5, supplies to TBD */ + [AB8505_LDO_AUX5] = { + .constraints = { + .name = "V-AUX5", + .min_uV = 1050000, + .max_uV = 2790000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux5_consumers), + .consumer_supplies = ab8505_vaux5_consumers, + }, + /* supply for VAUX6, supplies to TBD */ + [AB8505_LDO_AUX6] = { + .constraints = { + .name = "V-AUX6", + .min_uV = 1050000, + .max_uV = 2790000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux6_consumers), + .consumer_supplies = ab8505_vaux6_consumers, + }, + /* supply for gpadc, ADC LDO */ + [AB8505_LDO_ADC] = { + .constraints = { + .name = "V-ADC", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8505_vadc_consumers), + .consumer_supplies = ab8505_vadc_consumers, + }, + /* supply for ab8500-vaudio, VAUDIO LDO */ + [AB8505_LDO_AUDIO] = { + .constraints = { + .name = "V-AUD", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vaud_consumers), + .consumer_supplies = ab8500_vaud_consumers, + }, + /* supply for v-anamic1 VAMic1-LDO */ + [AB8505_LDO_ANAMIC1] = { + .constraints = { + .name = "V-AMIC1", + .valid_ops_mask = REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic1_consumers), + .consumer_supplies = ab8500_vamic1_consumers, + }, + /* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */ + [AB8505_LDO_ANAMIC2] = { + .constraints = { + .name = "V-AMIC2", + .valid_ops_mask = REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic2_consumers), + .consumer_supplies = ab8500_vamic2_consumers, + }, + /* supply for v-aux8, VAUX8 LDO */ + [AB8505_LDO_AUX8] = { + .constraints = { + .name = "V-AUX8", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux8_consumers), + .consumer_supplies = ab8505_vaux8_consumers, + }, + /* supply for v-intcore12, VINTCORE12 LDO */ + [AB8505_LDO_INTCORE] = { + .constraints = { + .name = "V-INTCORE", + .min_uV = 1250000, + .max_uV = 1350000, + .input_uV = 1800000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE | + REGULATOR_CHANGE_DRMS, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers), + .consumer_supplies = ab8500_vintcore_consumers, + }, + /* supply for LDO USB */ + [AB8505_LDO_USB] = { + .constraints = { + .name = "V-USB", + .valid_ops_mask = REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8505_usb_consumers), + .consumer_supplies = ab8505_usb_consumers, + }, + /* supply for U8500 CSI-DSI, VANA LDO */ + [AB8505_LDO_ANA] = { + .constraints = { + .name = "V-CSI-DSI", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers), + .consumer_supplies = ab8500_vana_consumers, + }, +}; + +struct ab8500_regulator_platform_data ab8500_regulator_plat_data = { + .reg_init = ab8500_reg_init, + .num_reg_init = ARRAY_SIZE(ab8500_reg_init), + .regulator = ab8500_regulators, + .num_regulator = ARRAY_SIZE(ab8500_regulators), + .ext_regulator = ab8500_ext_regulators, + .num_ext_regulator = ARRAY_SIZE(ab8500_ext_regulators), +}; + +/* Use the AB8500 init settings for AB8505 as they are the same right now */ +struct ab8500_regulator_platform_data ab8505_regulator_plat_data = { + .reg_init = ab8505_reg_init, + .num_reg_init = ARRAY_SIZE(ab8505_reg_init), + .regulator = ab8505_regulators, + .num_regulator = ARRAY_SIZE(ab8505_regulators), +}; + +static void ab8500_modify_reg_init(int id, u8 mask, u8 value) +{ + int i; + + if (cpu_is_u8520()) { + for (i = ARRAY_SIZE(ab8505_reg_init) - 1; i >= 0; i--) { + if (ab8505_reg_init[i].id == id) { + u8 initval = ab8505_reg_init[i].value; + initval = (initval & ~mask) | (value & mask); + ab8505_reg_init[i].value = initval; + + BUG_ON(mask & ~ab8505_reg_init[i].mask); + return; + } + } + } else { + for (i = ARRAY_SIZE(ab8500_reg_init) - 1; i >= 0; i--) { + if (ab8500_reg_init[i].id == id) { + u8 initval = ab8500_reg_init[i].value; + initval = (initval & ~mask) | (value & mask); + ab8500_reg_init[i].value = initval; + + BUG_ON(mask & ~ab8500_reg_init[i].mask); + return; + } + } + } + + BUG_ON(1); +} + +void mop500_regulator_init(void) +{ + struct regulator_init_data *regulator; + + /* + * Temporarily turn on Vaux2 on 8520 machine + */ + if (cpu_is_u8520()) { + /* Vaux2 initialized to be on */ + ab8500_modify_reg_init(AB8505_VAUX12REGU, 0x0f, 0x05); + } + + /* + * Handle AB8500_EXT_SUPPLY2 on HREFP_V20_V50 boards (do it for + * all HREFP_V20 boards) + */ + if (cpu_is_u8500v20()) { + /* VextSupply2RequestCtrl = HP/OFF depending on VxRequest */ + ab8500_modify_reg_init(AB8500_REGUREQUESTCTRL3, 0x01, 0x01); + + /* VextSupply2SysClkReq1HPValid = SysClkReq1 controlled */ + ab8500_modify_reg_init(AB8500_REGUSYSCLKREQ1HPVALID2, + 0x20, 0x20); + + /* VextSupply2 = force HP at initialization */ + ab8500_modify_reg_init(AB8500_EXTSUPPLYREGU, 0x0c, 0x04); + + /* enable VextSupply2 during platform active */ + regulator = &ab8500_ext_regulators[AB8500_EXT_SUPPLY2]; + regulator->constraints.always_on = 1; + + /* disable VextSupply2 in suspend */ + regulator = &ab8500_ext_regulators[AB8500_EXT_SUPPLY2]; + regulator->constraints.state_mem.disabled = 1; + regulator->constraints.state_standby.disabled = 1; + + /* enable VextSupply2 HW control (used in suspend) */ + regulator->driver_data = (void *)&ab8500_ext_supply2; + } +} diff --git a/arch/arm/mach-ux500/board-mop500-regulators.h b/arch/arm/mach-ux500/board-mop500-regulators.h index 78a0642..9bece38 100644 --- a/arch/arm/mach-ux500/board-mop500-regulators.h +++ b/arch/arm/mach-ux500/board-mop500-regulators.h @@ -14,10 +14,11 @@ #include <linux/regulator/machine.h> #include <linux/regulator/ab8500.h> -extern struct ab8500_regulator_reg_init -ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS]; -extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS]; +extern struct ab8500_regulator_platform_data ab8500_regulator_plat_data; +extern struct ab8500_regulator_platform_data ab8505_regulator_plat_data; extern struct regulator_init_data tps61052_regulator; extern struct regulator_init_data gpio_en_3v3_regulator; +void mop500_regulator_init(void); + #endif diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c index 051b62c..7f2cb6c 100644 --- a/arch/arm/mach-ux500/board-mop500-sdi.c +++ b/arch/arm/mach-ux500/board-mop500-sdi.c @@ -81,7 +81,6 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = { #endif struct mmci_platform_data mop500_sdi0_data = { - .ios_handler = mop500_sdi0_ios_handler, .ocr_mask = MMC_VDD_29_30, .f_max = 50000000, .capabilities = MMC_CAP_4_BIT_DATA | diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index b034578..ce67237 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> +#include <linux/clk.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/platform_data/i2c-nomadik.h> @@ -198,10 +199,7 @@ static struct platform_device snowball_sbnet_dev = { struct ab8500_platform_data ab8500_platdata = { .irq_base = MOP500_AB8500_IRQ_BASE, - .regulator_reg_init = ab8500_regulator_reg_init, - .num_regulator_reg_init = ARRAY_SIZE(ab8500_regulator_reg_init), - .regulator = ab8500_regulators, - .num_regulator = ARRAY_SIZE(ab8500_regulators), + .regulator = &ab8500_regulator_plat_data, .gpio = &ab8500_gpio_pdata, .codec = &ab8500_codec_pdata, }; @@ -439,6 +437,15 @@ static void mop500_prox_deactivate(struct device *dev) regulator_put(prox_regulator); } +void mop500_snowball_ethernet_clock_enable(void) +{ + struct clk *clk; + + clk = clk_get_sys("fsmc", NULL); + if (!IS_ERR(clk)) + clk_prepare_enable(clk); +} + static struct cryp_platform_data u8500_cryp1_platform_data = { .mem_to_engine = { .dir = STEDMA40_MEM_TO_PERIPH, @@ -683,6 +690,8 @@ static void __init snowball_init_machine(void) mop500_audio_init(parent); mop500_uart_init(parent); + mop500_snowball_ethernet_clock_enable(); + /* This board has full regulator constraints */ regulator_has_full_constraints(); } diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h index eaa605f..d38951b 100644 --- a/arch/arm/mach-ux500/board-mop500.h +++ b/arch/arm/mach-ux500/board-mop500.h @@ -104,6 +104,7 @@ void __init mop500_pinmaps_init(void); void __init snowball_pinmaps_init(void); void __init hrefv60_pinmaps_init(void); void mop500_audio_init(struct device *parent); +void mop500_snowball_ethernet_clock_enable(void); int __init mop500_uib_init(void); void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info, diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 19235cf..f1a5818 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -312,9 +312,10 @@ static void __init u8500_init_machine(void) /* Pinmaps must be in place before devices register */ if (of_machine_is_compatible("st-ericsson,mop500")) mop500_pinmaps_init(); - else if (of_machine_is_compatible("calaosystems,snowball-a9500")) + else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { snowball_pinmaps_init(); - else if (of_machine_is_compatible("st-ericsson,hrefv60+")) + mop500_snowball_ethernet_clock_enable(); + } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) hrefv60_pinmaps_init(); else if (of_machine_is_compatible("st-ericsson,ccu9540")) {} /* TODO: Add pinmaps for ccu9540 board. */ diff --git a/arch/arm/mach-ux500/cpuidle.c b/arch/arm/mach-ux500/cpuidle.c index ce91493..488e074 100644 --- a/arch/arm/mach-ux500/cpuidle.c +++ b/arch/arm/mach-ux500/cpuidle.c @@ -11,7 +11,6 @@ #include <linux/module.h> #include <linux/cpuidle.h> -#include <linux/clockchips.h> #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/smp.h> @@ -22,7 +21,6 @@ static atomic_t master = ATOMIC_INIT(0); static DEFINE_SPINLOCK(master_lock); -static DEFINE_PER_CPU(struct cpuidle_device, ux500_cpuidle_device); static inline int ux500_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) @@ -30,8 +28,6 @@ static inline int ux500_enter_idle(struct cpuidle_device *dev, int this_cpu = smp_processor_id(); bool recouple = false; - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &this_cpu); - if (atomic_inc_return(&master) == num_online_cpus()) { /* With this lock, we prevent the other cpu to exit and enter @@ -91,22 +87,20 @@ out: spin_unlock(&master_lock); } - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &this_cpu); - return index; } static struct cpuidle_driver ux500_idle_driver = { .name = "ux500_idle", .owner = THIS_MODULE, - .en_core_tk_irqen = 1, .states = { ARM_CPUIDLE_WFI_STATE, { .enter = ux500_enter_idle, .exit_latency = 70, .target_residency = 260, - .flags = CPUIDLE_FLAG_TIME_VALID, + .flags = CPUIDLE_FLAG_TIME_VALID | + CPUIDLE_FLAG_TIMER_STOP, .name = "ApIdle", .desc = "ARM Retention", }, @@ -115,59 +109,13 @@ static struct cpuidle_driver ux500_idle_driver = { .state_count = 2, }; -/* - * For each cpu, setup the broadcast timer because we will - * need to migrate the timers for the states >= ApIdle. - */ -static void ux500_setup_broadcast_timer(void *arg) -{ - int cpu = smp_processor_id(); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu); -} - int __init ux500_idle_init(void) { - int ret, cpu; - struct cpuidle_device *device; - /* Configure wake up reasons */ prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) | PRCMU_WAKEUP(ABB)); - /* - * Configure the timer broadcast for each cpu, that must - * be done from the cpu context, so we use a smp cross - * call with 'on_each_cpu'. - */ - on_each_cpu(ux500_setup_broadcast_timer, NULL, 1); - - ret = cpuidle_register_driver(&ux500_idle_driver); - if (ret) { - printk(KERN_ERR "failed to register ux500 idle driver\n"); - return ret; - } - - for_each_online_cpu(cpu) { - device = &per_cpu(ux500_cpuidle_device, cpu); - device->cpu = cpu; - ret = cpuidle_register_device(device); - if (ret) { - printk(KERN_ERR "Failed to register cpuidle " - "device for cpu%d\n", cpu); - goto out_unregister; - } - } -out: - return ret; - -out_unregister: - for_each_online_cpu(cpu) { - device = &per_cpu(ux500_cpuidle_device, cpu); - cpuidle_unregister_device(device); - } - - cpuidle_unregister_driver(&ux500_idle_driver); - goto out; + return cpuidle_register(&ux500_idle_driver, NULL); } device_initcall(ux500_idle_init); diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig index 52d315b..0f1c5e5 100644 --- a/arch/arm/mach-vexpress/Kconfig +++ b/arch/arm/mach-vexpress/Kconfig @@ -17,6 +17,9 @@ config ARCH_VEXPRESS select NO_IOPORT select PLAT_VERSATILE select PLAT_VERSATILE_CLCD + select POWER_RESET + select POWER_RESET_VEXPRESS + select POWER_SUPPLY select REGULATOR_FIXED_VOLTAGE if REGULATOR select VEXPRESS_CONFIG help diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile index 80b6497..42703e8 100644 --- a/arch/arm/mach-vexpress/Makefile +++ b/arch/arm/mach-vexpress/Makefile @@ -4,7 +4,7 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ -I$(srctree)/arch/arm/plat-versatile/include -obj-y := v2m.o reset.o +obj-y := v2m.o obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o obj-$(CONFIG_SMP) += platsmp.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o diff --git a/arch/arm/mach-vexpress/reset.c b/arch/arm/mach-vexpress/reset.c deleted file mode 100644 index 465923a..0000000 --- a/arch/arm/mach-vexpress/reset.c +++ /dev/null @@ -1,141 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2012 ARM Limited - */ - -#include <linux/jiffies.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/platform_device.h> -#include <linux/stat.h> -#include <linux/vexpress.h> - -static void vexpress_reset_do(struct device *dev, const char *what) -{ - int err = -ENOENT; - struct vexpress_config_func *func = - vexpress_config_func_get_by_dev(dev); - - if (func) { - unsigned long timeout; - - err = vexpress_config_write(func, 0, 0); - - timeout = jiffies + HZ; - while (time_before(jiffies, timeout)) - cpu_relax(); - } - - dev_emerg(dev, "Unable to %s (%d)\n", what, err); -} - -static struct device *vexpress_power_off_device; - -void vexpress_power_off(void) -{ - vexpress_reset_do(vexpress_power_off_device, "power off"); -} - -static struct device *vexpress_restart_device; - -void vexpress_restart(char str, const char *cmd) -{ - vexpress_reset_do(vexpress_restart_device, "restart"); -} - -static ssize_t vexpress_reset_active_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", vexpress_restart_device == dev); -} - -static ssize_t vexpress_reset_active_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - long value; - int err = kstrtol(buf, 0, &value); - - if (!err && value) - vexpress_restart_device = dev; - - return err ? err : count; -} - -DEVICE_ATTR(active, S_IRUGO | S_IWUSR, vexpress_reset_active_show, - vexpress_reset_active_store); - - -enum vexpress_reset_func { FUNC_RESET, FUNC_SHUTDOWN, FUNC_REBOOT }; - -static struct of_device_id vexpress_reset_of_match[] = { - { - .compatible = "arm,vexpress-reset", - .data = (void *)FUNC_RESET, - }, { - .compatible = "arm,vexpress-shutdown", - .data = (void *)FUNC_SHUTDOWN - }, { - .compatible = "arm,vexpress-reboot", - .data = (void *)FUNC_REBOOT - }, - {} -}; - -static int vexpress_reset_probe(struct platform_device *pdev) -{ - enum vexpress_reset_func func; - const struct of_device_id *match = - of_match_device(vexpress_reset_of_match, &pdev->dev); - - if (match) - func = (enum vexpress_reset_func)match->data; - else - func = pdev->id_entry->driver_data; - - switch (func) { - case FUNC_SHUTDOWN: - vexpress_power_off_device = &pdev->dev; - break; - case FUNC_RESET: - if (!vexpress_restart_device) - vexpress_restart_device = &pdev->dev; - device_create_file(&pdev->dev, &dev_attr_active); - break; - case FUNC_REBOOT: - vexpress_restart_device = &pdev->dev; - device_create_file(&pdev->dev, &dev_attr_active); - break; - }; - - return 0; -} - -static const struct platform_device_id vexpress_reset_id_table[] = { - { .name = "vexpress-reset", .driver_data = FUNC_RESET, }, - { .name = "vexpress-shutdown", .driver_data = FUNC_SHUTDOWN, }, - { .name = "vexpress-reboot", .driver_data = FUNC_REBOOT, }, - {} -}; - -static struct platform_driver vexpress_reset_driver = { - .probe = vexpress_reset_probe, - .driver = { - .name = "vexpress-reset", - .of_match_table = vexpress_reset_of_match, - }, - .id_table = vexpress_reset_id_table, -}; - -static int __init vexpress_reset_init(void) -{ - return platform_driver_register(&vexpress_reset_driver); -} -device_initcall(vexpress_reset_init); diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c index 915683c..eb2b3a6 100644 --- a/arch/arm/mach-vexpress/v2m.c +++ b/arch/arm/mach-vexpress/v2m.c @@ -21,6 +21,8 @@ #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/vexpress.h> +#include <linux/clk-provider.h> +#include <linux/clkdev.h> #include <asm/arch_timer.h> #include <asm/mach-types.h> @@ -361,8 +363,6 @@ static void __init v2m_init(void) for (i = 0; i < ARRAY_SIZE(v2m_amba_devs); i++) amba_device_register(v2m_amba_devs[i], &iomem_resource); - pm_power_off = vexpress_power_off; - ct_desc->init_tile(); } @@ -374,7 +374,6 @@ MACHINE_START(VEXPRESS, "ARM-Versatile Express") .init_irq = v2m_init_irq, .init_time = v2m_timer_init, .init_machine = v2m_init, - .restart = vexpress_restart, MACHINE_END static struct map_desc v2m_rs1_io_desc __initdata = { @@ -433,7 +432,7 @@ static void __init v2m_dt_timer_init(void) { struct device_node *node = NULL; - vexpress_clk_of_init(); + of_clk_init(NULL); do { node = of_find_compatible_node(node, NULL, "arm,sp804"); @@ -441,6 +440,10 @@ static void __init v2m_dt_timer_init(void) if (node) { pr_info("Using SP804 '%s' as a clock & events source\n", node->full_name); + WARN_ON(clk_register_clkdev(of_clk_get_by_name(node, + "timclken1"), "v2m-timer0", "sp804")); + WARN_ON(clk_register_clkdev(of_clk_get_by_name(node, + "timclken2"), "v2m-timer1", "sp804")); v2m_sp804_init(of_iomap(node, 0), irq_of_parse_and_map(node, 0)); } @@ -464,7 +467,6 @@ static void __init v2m_dt_init(void) { l2x0_of_init(0x00400000, 0xfe0fffff); of_platform_populate(NULL, v2m_dt_bus_match, NULL, NULL); - pm_power_off = vexpress_power_off; } static const char * const v2m_dt_match[] __initconst = { @@ -481,5 +483,4 @@ DT_MACHINE_START(VEXPRESS_DT, "ARM-Versatile Express") .init_irq = irqchip_init, .init_time = v2m_dt_timer_init, .init_machine = v2m_dt_init, - .restart = vexpress_restart, MACHINE_END diff --git a/arch/arm/mach-w90x900/dev.c b/arch/arm/mach-w90x900/dev.c index 7abdb96..e65a80a 100644 --- a/arch/arm/mach-w90x900/dev.c +++ b/arch/arm/mach-w90x900/dev.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/cpu.h> #include <linux/mtd/physmap.h> #include <linux/mtd/mtd.h> @@ -531,7 +532,7 @@ static struct platform_device *nuc900_public_dev[] __initdata = { void __init nuc900_board_init(struct platform_device **device, int size) { - disable_hlt(); + cpu_idle_poll_ctrl(true); platform_add_devices(device, size); platform_add_devices(nuc900_public_dev, ARRAY_SIZE(nuc900_public_dev)); spi_register_board_info(nuc900_spi_board_info, diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 025d173..4045c49 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -43,7 +43,7 @@ config CPU_ARM740T depends on !MMU select CPU_32v4T select CPU_ABRT_LV4T - select CPU_CACHE_V3 # although the core is v4t + select CPU_CACHE_V4 select CPU_CP15_MPU select CPU_PABRT_LEGACY help @@ -469,9 +469,6 @@ config CPU_PABRT_V7 bool # The cache model -config CPU_CACHE_V3 - bool - config CPU_CACHE_V4 bool diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 4e333fa..9e51be9 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o -obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index dd3d591..48bc3c0 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override) outer_cache.inv_range = feroceon_l2_inv_range; outer_cache.clean_range = feroceon_l2_clean_range; outer_cache.flush_range = feroceon_l2_flush_range; + outer_cache.inv_all = l2_inv_all; enable_l2(); diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c2f3739..c465fac 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id) int lockregs; int i; - switch (cache_id) { + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { case L2X0_CACHE_ID_PART_L310: lockregs = 8; break; @@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) if (cache_id_part_number_from_dt) cache_id = cache_id_part_number_from_dt; else - cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) - & L2X0_CACHE_ID_PART_MASK; + cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); aux &= aux_mask; aux |= aux_val; /* Determine the number of ways */ - switch (cache_id) { + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { case L2X0_CACHE_ID_PART_L310: if (aux & (1 << 16)) ways = 16; @@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = { .flush_all = l2x0_flush_all, .inv_all = l2x0_inv_all, .disable = l2x0_disable, - .set_debug = pl310_set_debug, }, }; @@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) data->save(); of_init = true; - l2x0_init(l2x0_base, aux_val, aux_mask); - memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); + l2x0_init(l2x0_base, aux_val, aux_mask); return 0; } diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S deleted file mode 100644 index 8a3fade..0000000 --- a/arch/arm/mm/cache-v3.S +++ /dev/null @@ -1,137 +0,0 @@ -/* - * linux/arch/arm/mm/cache-v3.S - * - * Copyright (C) 1997-2002 Russell king - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/linkage.h> -#include <linux/init.h> -#include <asm/page.h> -#include "proc-macros.S" - -/* - * flush_icache_all() - * - * Unconditionally clean and invalidate the entire icache. - */ -ENTRY(v3_flush_icache_all) - mov pc, lr -ENDPROC(v3_flush_icache_all) - -/* - * flush_user_cache_all() - * - * Invalidate all cache entries in a particular address - * space. - * - * - mm - mm_struct describing address space - */ -ENTRY(v3_flush_user_cache_all) - /* FALLTHROUGH */ -/* - * flush_kern_cache_all() - * - * Clean and invalidate the entire cache. - */ -ENTRY(v3_flush_kern_cache_all) - /* FALLTHROUGH */ - -/* - * flush_user_cache_range(start, end, flags) - * - * Invalidate a range of cache entries in the specified - * address space. - * - * - start - start address (may not be aligned) - * - end - end address (exclusive, may not be aligned) - * - flags - vma_area_struct flags describing address space - */ -ENTRY(v3_flush_user_cache_range) - mov ip, #0 - mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache - mov pc, lr - -/* - * coherent_kern_range(start, end) - * - * Ensure coherency between the Icache and the Dcache in the - * region described by start. If you have non-snooping - * Harvard caches, you need to implement this function. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_coherent_kern_range) - /* FALLTHROUGH */ - -/* - * coherent_user_range(start, end) - * - * Ensure coherency between the Icache and the Dcache in the - * region described by start. If you have non-snooping - * Harvard caches, you need to implement this function. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_coherent_user_range) - mov r0, #0 - mov pc, lr - -/* - * flush_kern_dcache_area(void *page, size_t size) - * - * Ensure no D cache aliasing occurs, either with itself or - * the I cache - * - * - addr - kernel address - * - size - region size - */ -ENTRY(v3_flush_kern_dcache_area) - /* FALLTHROUGH */ - -/* - * dma_flush_range(start, end) - * - * Clean and invalidate the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_dma_flush_range) - mov r0, #0 - mcr p15, 0, r0, c7, c0, 0 @ flush ID cache - mov pc, lr - -/* - * dma_unmap_area(start, size, dir) - * - start - kernel virtual start address - * - size - size of region - * - dir - DMA direction - */ -ENTRY(v3_dma_unmap_area) - teq r2, #DMA_TO_DEVICE - bne v3_dma_flush_range - /* FALLTHROUGH */ - -/* - * dma_map_area(start, size, dir) - * - start - kernel virtual start address - * - size - size of region - * - dir - DMA direction - */ -ENTRY(v3_dma_map_area) - mov pc, lr -ENDPROC(v3_dma_unmap_area) -ENDPROC(v3_dma_map_area) - - .globl v3_flush_kern_cache_louis - .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all - - __INITDATA - - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) - define_cache_functions v3 diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 43e5d77..a7ba68f 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all) ENTRY(v4_flush_user_cache_range) #ifdef CONFIG_CPU_CP15 mov ip, #0 - mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache + mcr p15, 0, ip, c7, c7, 0 @ flush ID cache mov pc, lr #else /* FALLTHROUGH */ diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 7a05111..2ac3737 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock); static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); -static DEFINE_PER_CPU(atomic64_t, active_asids); +DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); static cpumask_t tlb_flush_pending; @@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid) return 0; } -static void new_context(struct mm_struct *mm, unsigned int cpu) +static u64 new_context(struct mm_struct *mm, unsigned int cpu) { - u64 asid = mm->context.id; + u64 asid = atomic64_read(&mm->context.id); u64 generation = atomic64_read(&asid_generation); if (asid != 0 && is_reserved_asid(asid)) { @@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu) cpumask_clear(mm_cpumask(mm)); } - mm->context.id = asid; + return asid; } void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { unsigned long flags; unsigned int cpu = smp_processor_id(); + u64 asid; if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) __check_vmalloc_seq(mm); @@ -198,20 +199,27 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) */ cpu_set_reserved_ttbr0(); - if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) - && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) + asid = atomic64_read(&mm->context.id); + if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) + && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) goto switch_mm_fastpath; raw_spin_lock_irqsave(&cpu_asid_lock, flags); /* Check that our ASID belongs to the current generation. */ - if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) - new_context(mm, cpu); - - atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); - cpumask_set_cpu(cpu, mm_cpumask(mm)); + asid = atomic64_read(&mm->context.id); + if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { + asid = new_context(mm, cpu); + atomic64_set(&mm->context.id, asid); + } - if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) + if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { + local_flush_bp_all(); local_flush_tlb_all(); + dummy_flush_tlb_a15_erratum(); + } + + atomic64_set(&per_cpu(active_asids, cpu), asid); + cpumask_set_cpu(cpu, mm_cpumask(mm)); raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); switch_mm_fastpath: diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c7e3759..e9db6b4 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -342,6 +342,7 @@ static int __init atomic_pool_init(void) { struct dma_pool *pool = &atomic_pool; pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); + gfp_t gfp = GFP_KERNEL | GFP_DMA; unsigned long nr_pages = pool->size >> PAGE_SHIFT; unsigned long *bitmap; struct page *page; @@ -361,8 +362,8 @@ static int __init atomic_pool_init(void) ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, atomic_pool_init); else - ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, - &page, atomic_pool_init); + ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, + atomic_pool_init); if (ptr) { int i; diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 2dffc01..5ee505c 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -141,6 +141,7 @@ void setup_mm_for_reboot(void) { /* Switch to the identity mapping. */ cpu_switch_mm(idmap_pgd, &init_mm); + local_flush_bp_all(); #ifdef CONFIG_CPU_HAS_ASID /* diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index ad722f1..9a5cdc0 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -99,6 +99,9 @@ void show_mem(unsigned int filter) printk("Mem-info:\n"); show_free_areas(filter); + if (filter & SHOW_MEM_FILTER_PAGE_COUNT) + return; + for_each_bank (i, mi) { struct membank *bank = &mi->bank[i]; unsigned int pfn1, pfn2; @@ -424,24 +427,6 @@ void __init bootmem_init(void) max_pfn = max_high - PHYS_PFN_OFFSET; } -static inline int free_area(unsigned long pfn, unsigned long end, char *s) -{ - unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); - - for (; pfn < end; pfn++) { - struct page *page = pfn_to_page(pfn); - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - pages++; - } - - if (size && s) - printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); - - return pages; -} - /* * Poison init memory with an undefined instruction (ARM) or a branch to an * undefined instruction (Thumb). @@ -534,6 +519,14 @@ static void __init free_unused_memmap(struct meminfo *mi) #endif } +#ifdef CONFIG_HIGHMEM +static inline void free_area_high(unsigned long pfn, unsigned long end) +{ + for (; pfn < end; pfn++) + free_highmem_page(pfn_to_page(pfn)); +} +#endif + static void __init free_highpages(void) { #ifdef CONFIG_HIGHMEM @@ -569,8 +562,7 @@ static void __init free_highpages(void) if (res_end > end) res_end = end; if (res_start != start) - totalhigh_pages += free_area(start, res_start, - NULL); + free_area_high(start, res_start); start = res_end; if (start == end) break; @@ -578,9 +570,8 @@ static void __init free_highpages(void) /* And now free anything which remains */ if (start < end) - totalhigh_pages += free_area(start, end, NULL); + free_area_high(start, end); } - totalram_pages += totalhigh_pages; #endif } @@ -609,8 +600,7 @@ void __init mem_init(void) #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ - totalram_pages += free_area(PHYS_PFN_OFFSET, - __phys_to_pfn(__pa(swapper_pg_dir)), NULL); + free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL); #endif free_highpages(); @@ -738,16 +728,12 @@ void free_initmem(void) extern char __tcm_start, __tcm_end; poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); - totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), - __phys_to_pfn(__pa(&__tcm_end)), - "TCM link"); + free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link"); #endif poison_init_mem(__init_begin, __init_end - __init_begin); if (!machine_is_integrator() && !machine_is_cintegrator()) - totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), - __phys_to_pfn(__pa(__init_end)), - "init"); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD @@ -758,9 +744,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) { poison_init_mem((void *)start, PAGE_ALIGN(end) - start); - totalram_pages += free_area(__phys_to_pfn(__pa(start)), - __phys_to_pfn(__pa(end)), - "initrd"); + free_reserved_area(start, end, 0, "initrd"); } } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e95a996..a84ff76 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -34,6 +34,7 @@ #include <asm/mach/pci.h> #include "mm.h" +#include "tcm.h" /* * empty_zero_page is a special page that is used for @@ -598,39 +599,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, } while (pte++, addr += PAGE_SIZE, addr != end); } -static void __init alloc_init_section(pud_t *pud, unsigned long addr, - unsigned long end, phys_addr_t phys, - const struct mem_type *type) +static void __init map_init_section(pmd_t *pmd, unsigned long addr, + unsigned long end, phys_addr_t phys, + const struct mem_type *type) { - pmd_t *pmd = pmd_offset(pud, addr); - +#ifndef CONFIG_ARM_LPAE /* - * Try a section mapping - end, addr and phys must all be aligned - * to a section boundary. Note that PMDs refer to the individual - * L1 entries, whereas PGDs refer to a group of L1 entries making - * up one logical pointer to an L2 table. + * In classic MMU format, puds and pmds are folded in to + * the pgds. pmd_offset gives the PGD entry. PGDs refer to a + * group of L1 entries making up one logical pointer to + * an L2 table (2MB), where as PMDs refer to the individual + * L1 entries (1MB). Hence increment to get the correct + * offset for odd 1MB sections. + * (See arch/arm/include/asm/pgtable-2level.h) */ - if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { - pmd_t *p = pmd; - -#ifndef CONFIG_ARM_LPAE - if (addr & SECTION_SIZE) - pmd++; + if (addr & SECTION_SIZE) + pmd++; #endif + do { + *pmd = __pmd(phys | type->prot_sect); + phys += SECTION_SIZE; + } while (pmd++, addr += SECTION_SIZE, addr != end); - do { - *pmd = __pmd(phys | type->prot_sect); - phys += SECTION_SIZE; - } while (pmd++, addr += SECTION_SIZE, addr != end); + flush_pmd_entry(pmd); +} - flush_pmd_entry(p); - } else { +static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, + unsigned long end, phys_addr_t phys, + const struct mem_type *type) +{ + pmd_t *pmd = pmd_offset(pud, addr); + unsigned long next; + + do { /* - * No need to loop; pte's aren't interested in the - * individual L1 entries. + * With LPAE, we must loop over to map + * all the pmds for the given range. */ - alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); - } + next = pmd_addr_end(addr, end); + + /* + * Try a section mapping - addr, next and phys must all be + * aligned to a section boundary. + */ + if (type->prot_sect && + ((addr | next | phys) & ~SECTION_MASK) == 0) { + map_init_section(pmd, addr, next, phys, type); + } else { + alloc_init_pte(pmd, addr, next, + __phys_to_pfn(phys), type); + } + + phys += next - addr; + + } while (pmd++, addr = next, addr != end); } static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, @@ -641,7 +663,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, do { next = pud_addr_end(addr, end); - alloc_init_section(pud, addr, next, phys, type); + alloc_init_pmd(pud, addr, next, phys, type); phys += next - addr; } while (pud++, addr = next, addr != end); } @@ -1256,6 +1278,7 @@ void __init paging_init(struct machine_desc *mdesc) dma_contiguous_remap(); devicemaps_init(mdesc); kmap_init(); + tcm_init(); top_pmd = pmd_off_k(0xffff0000); diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index dc5de5d..fde2d2a 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -77,24 +77,27 @@ __arm740_setup: mcr p15, 0, r0, c6, c0 @ set area 0, default ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM - ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 + ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) + mov r4, #10 @ 11 is the minimum (4KB) +1: add r4, r4, #1 @ area size *= 2 + movs r3, r3, lsr #1 bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the area register value + orr r0, r0, r4, lsl #1 @ the area register value orr r0, r0, #1 @ set enable bit mcr p15, 0, r0, c6, c1 @ set area 1, RAM ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH - ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 + ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) + cmp r3, #0 + moveq r0, #0 + beq 2f + mov r4, #10 @ 11 is the minimum (4KB) +1: add r4, r4, #1 @ area size *= 2 + movs r3, r3, lsr #1 bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the area register value + orr r0, r0, r4, lsl #1 @ the area register value orr r0, r0, #1 @ set enable bit - mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH +2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH mov r0, #0x06 mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable @@ -137,13 +140,14 @@ __arm740_proc_info: .long 0x41807400 .long 0xfffffff0 .long 0 + .long 0 b __arm740_setup .long cpu_arch_name .long cpu_elf_name - .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT + .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT .long cpu_arm740_name .long arm740_processor_functions .long 0 .long 0 - .long v3_cache_fns @ cache model + .long v4_cache_fns @ cache model .size __arm740_proc_info, . - __arm740_proc_info diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 2c3b942..2556cf1 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm920_suspend_size .equ cpu_arm920_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_arm920_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index f1803f7..344c8a5 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm926_suspend_size .equ cpu_arm926_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_arm926_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 82f9cdc..0b60dd3 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext) .globl cpu_mohawk_suspend_size .equ cpu_mohawk_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_mohawk_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 3aa0da1..d92dfd0 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext) .globl cpu_sa1100_suspend_size .equ cpu_sa1100_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_sa1100_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c3, c0, 0 @ domain ID diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 3e6210b..054b491 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c @@ -17,7 +17,9 @@ #ifndef MULTI_CPU EXPORT_SYMBOL(cpu_dcache_clean_area); +#ifdef CONFIG_MMU EXPORT_SYMBOL(cpu_set_pte_ext); +#endif #else EXPORT_SYMBOL(processor); #endif diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index bcaaa8d..5c07ee4 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext) /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ .globl cpu_v6_suspend_size .equ cpu_v6_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_v6_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 50bf1da..6ffd78c 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -48,7 +48,7 @@ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU mmid r1, r1 @ get mm->context.id - and r3, r1, #0xff + asid r3, r1 mov r3, r3, lsl #(48 - 32) @ ASID mcrr p15, 0, r0, r3, c2 @ set TTB 0 isb diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3a3c015..f584d3f 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -420,7 +420,7 @@ __v7_pj4b_proc_info: __v7_ca7mp_proc_info: .long 0x410fc070 .long 0xff0ffff0 - __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV + __v7_proc __v7_ca7mp_setup .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info /* @@ -430,10 +430,25 @@ __v7_ca7mp_proc_info: __v7_ca15mp_proc_info: .long 0x410fc0f0 .long 0xff0ffff0 - __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV + __v7_proc __v7_ca15mp_setup .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info /* + * Qualcomm Inc. Krait processors. + */ + .type __krait_proc_info, #object +__krait_proc_info: + .long 0x510f0400 @ Required ID value + .long 0xff0ffc00 @ Mask for ID + /* + * Some Krait processors don't indicate support for SDIV and UDIV + * instructions in the ARM instruction set, even though they actually + * do support them. + */ + __v7_proc __v7_setup, hwcaps = HWCAP_IDIV + .size __krait_proc_info, . - __krait_proc_info + + /* * Match any ARMv7 processor core. */ .type __v7_proc_info, #object diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index eb93d64..e8efd83 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext) .globl cpu_xsc3_suspend_size .equ cpu_xsc3_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_xsc3_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 2551036..e766f88 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext) .globl cpu_xscale_suspend_size .equ cpu_xscale_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_xscale_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/kernel/tcm.h b/arch/arm/mm/tcm.h index 8015ad4..8015ad4 100644 --- a/arch/arm/kernel/tcm.h +++ b/arch/arm/mm/tcm.h diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 6828ef6..a0bd8a7 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -576,7 +576,7 @@ load_ind: /* x = ((*(frame + k)) & 0xf) << 2; */ ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; /* the interpreter should deal with the negative K */ - if (k < 0) + if ((int)k < 0) return -1; /* offset in r1: we might have to take the slow path */ emit_mov_i(r_off, k, ctx); diff --git a/arch/arm/plat-orion/addr-map.c b/arch/arm/plat-orion/addr-map.c index febe386..807ac8e 100644 --- a/arch/arm/plat-orion/addr-map.c +++ b/arch/arm/plat-orion/addr-map.c @@ -157,9 +157,12 @@ void __init orion_setup_cpu_mbus_target(const struct orion_addr_map_cfg *cfg, u32 size = readl(ddr_window_cpu_base + DDR_SIZE_CS_OFF(i)); /* - * Chip select enabled? + * We only take care of entries for which the chip + * select is enabled, and that don't have high base + * address bits set (devices can only access the first + * 32 bits of the memory). */ - if (size & 1) { + if ((size & 1) && !(base & 0xF)) { struct mbus_dram_window *w; w = &orion_mbus_dram_info.cs[cs++]; diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c index 51afedd..03db14d 100644 --- a/arch/arm/plat-samsung/devs.c +++ b/arch/arm/plat-samsung/devs.c @@ -10,6 +10,7 @@ * published by the Free Software Foundation. */ +#include <linux/amba/pl330.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> @@ -1552,6 +1553,9 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, pd.num_cs = num_cs; pd.src_clk_nr = src_clk_nr; pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio; +#ifdef CONFIG_PL330_DMA + pd.filter = pl330_filter; +#endif s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0); } @@ -1590,6 +1594,9 @@ void __init s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, pd.num_cs = num_cs; pd.src_clk_nr = src_clk_nr; pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio; +#ifdef CONFIG_PL330_DMA + pd.filter = pl330_filter; +#endif s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1); } @@ -1628,6 +1635,9 @@ void __init s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, pd.num_cs = num_cs; pd.src_clk_nr = src_clk_nr; pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio; +#ifdef CONFIG_PL330_DMA + pd.filter = pl330_filter; +#endif s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2); } diff --git a/arch/arm/plat-samsung/include/plat/fb.h b/arch/arm/plat-samsung/include/plat/fb.h index b885322..9ae5072 100644 --- a/arch/arm/plat-samsung/include/plat/fb.h +++ b/arch/arm/plat-samsung/include/plat/fb.h @@ -15,55 +15,7 @@ #ifndef __PLAT_S3C_FB_H #define __PLAT_S3C_FB_H __FILE__ -/* S3C_FB_MAX_WIN - * Set to the maximum number of windows that any of the supported hardware - * can use. Since the platform data uses this for an array size, having it - * set to the maximum of any version of the hardware can do is safe. - */ -#define S3C_FB_MAX_WIN (5) - -/** - * struct s3c_fb_pd_win - per window setup data - * @xres : The window X size. - * @yres : The window Y size. - * @virtual_x: The virtual X size. - * @virtual_y: The virtual Y size. - */ -struct s3c_fb_pd_win { - unsigned short default_bpp; - unsigned short max_bpp; - unsigned short xres; - unsigned short yres; - unsigned short virtual_x; - unsigned short virtual_y; -}; - -/** - * struct s3c_fb_platdata - S3C driver platform specific information - * @setup_gpio: Setup the external GPIO pins to the right state to transfer - * the data from the display system to the connected display - * device. - * @vidcon0: The base vidcon0 values to control the panel data format. - * @vidcon1: The base vidcon1 values to control the panel data output. - * @vtiming: Video timing when connected to a RGB type panel. - * @win: The setup data for each hardware window, or NULL for unused. - * @display_mode: The LCD output display mode. - * - * The platform data supplies the video driver with all the information - * it requires to work with the display(s) attached to the machine. It - * controls the initial mode, the number of display windows (0 is always - * the base framebuffer) that are initialised etc. - * - */ -struct s3c_fb_platdata { - void (*setup_gpio)(void); - - struct s3c_fb_pd_win *win[S3C_FB_MAX_WIN]; - struct fb_videomode *vtiming; - - u32 vidcon0; - u32 vidcon1; -}; +#include <linux/platform_data/video_s3c.h> /** * s3c_fb_set_platdata() - Setup the FB device with platform data. diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h index 29c26a8..f05f2af 100644 --- a/arch/arm/plat-samsung/include/plat/regs-serial.h +++ b/arch/arm/plat-samsung/include/plat/regs-serial.h @@ -1,281 +1 @@ -/* arch/arm/plat-samsung/include/plat/regs-serial.h - * - * From linux/include/asm-arm/hardware/serial_s3c2410.h - * - * Internal header file for Samsung S3C2410 serial ports (UART0-2) - * - * Copyright (C) 2002 Shane Nay (shane@minirl.com) - * - * Additional defines, Copyright 2003 Simtec Electronics (linux@simtec.co.uk) - * - * Adapted from: - * - * Internal header file for MX1ADS serial ports (UART1 & 2) - * - * Copyright (C) 2002 Shane Nay (shane@minirl.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -#ifndef __ASM_ARM_REGS_SERIAL_H -#define __ASM_ARM_REGS_SERIAL_H - -#define S3C24XX_VA_UART0 (S3C_VA_UART) -#define S3C24XX_VA_UART1 (S3C_VA_UART + 0x4000 ) -#define S3C24XX_VA_UART2 (S3C_VA_UART + 0x8000 ) -#define S3C24XX_VA_UART3 (S3C_VA_UART + 0xC000 ) - -#define S3C2410_PA_UART0 (S3C24XX_PA_UART) -#define S3C2410_PA_UART1 (S3C24XX_PA_UART + 0x4000 ) -#define S3C2410_PA_UART2 (S3C24XX_PA_UART + 0x8000 ) -#define S3C2443_PA_UART3 (S3C24XX_PA_UART + 0xC000 ) - -#define S3C2410_URXH (0x24) -#define S3C2410_UTXH (0x20) -#define S3C2410_ULCON (0x00) -#define S3C2410_UCON (0x04) -#define S3C2410_UFCON (0x08) -#define S3C2410_UMCON (0x0C) -#define S3C2410_UBRDIV (0x28) -#define S3C2410_UTRSTAT (0x10) -#define S3C2410_UERSTAT (0x14) -#define S3C2410_UFSTAT (0x18) -#define S3C2410_UMSTAT (0x1C) - -#define S3C2410_LCON_CFGMASK ((0xF<<3)|(0x3)) - -#define S3C2410_LCON_CS5 (0x0) -#define S3C2410_LCON_CS6 (0x1) -#define S3C2410_LCON_CS7 (0x2) -#define S3C2410_LCON_CS8 (0x3) -#define S3C2410_LCON_CSMASK (0x3) - -#define S3C2410_LCON_PNONE (0x0) -#define S3C2410_LCON_PEVEN (0x5 << 3) -#define S3C2410_LCON_PODD (0x4 << 3) -#define S3C2410_LCON_PMASK (0x7 << 3) - -#define S3C2410_LCON_STOPB (1<<2) -#define S3C2410_LCON_IRM (1<<6) - -#define S3C2440_UCON_CLKMASK (3<<10) -#define S3C2440_UCON_CLKSHIFT (10) -#define S3C2440_UCON_PCLK (0<<10) -#define S3C2440_UCON_UCLK (1<<10) -#define S3C2440_UCON_PCLK2 (2<<10) -#define S3C2440_UCON_FCLK (3<<10) -#define S3C2443_UCON_EPLL (3<<10) - -#define S3C6400_UCON_CLKMASK (3<<10) -#define S3C6400_UCON_CLKSHIFT (10) -#define S3C6400_UCON_PCLK (0<<10) -#define S3C6400_UCON_PCLK2 (2<<10) -#define S3C6400_UCON_UCLK0 (1<<10) -#define S3C6400_UCON_UCLK1 (3<<10) - -#define S3C2440_UCON2_FCLK_EN (1<<15) -#define S3C2440_UCON0_DIVMASK (15 << 12) -#define S3C2440_UCON1_DIVMASK (15 << 12) -#define S3C2440_UCON2_DIVMASK (7 << 12) -#define S3C2440_UCON_DIVSHIFT (12) - -#define S3C2412_UCON_CLKMASK (3<<10) -#define S3C2412_UCON_CLKSHIFT (10) -#define S3C2412_UCON_UCLK (1<<10) -#define S3C2412_UCON_USYSCLK (3<<10) -#define S3C2412_UCON_PCLK (0<<10) -#define S3C2412_UCON_PCLK2 (2<<10) - -#define S3C2410_UCON_CLKMASK (1 << 10) -#define S3C2410_UCON_CLKSHIFT (10) -#define S3C2410_UCON_UCLK (1<<10) -#define S3C2410_UCON_SBREAK (1<<4) - -#define S3C2410_UCON_TXILEVEL (1<<9) -#define S3C2410_UCON_RXILEVEL (1<<8) -#define S3C2410_UCON_TXIRQMODE (1<<2) -#define S3C2410_UCON_RXIRQMODE (1<<0) -#define S3C2410_UCON_RXFIFO_TOI (1<<7) -#define S3C2443_UCON_RXERR_IRQEN (1<<6) -#define S3C2443_UCON_LOOPBACK (1<<5) - -#define S3C2410_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \ - S3C2410_UCON_RXILEVEL | \ - S3C2410_UCON_TXIRQMODE | \ - S3C2410_UCON_RXIRQMODE | \ - S3C2410_UCON_RXFIFO_TOI) - -#define S3C2410_UFCON_FIFOMODE (1<<0) -#define S3C2410_UFCON_TXTRIG0 (0<<6) -#define S3C2410_UFCON_RXTRIG8 (1<<4) -#define S3C2410_UFCON_RXTRIG12 (2<<4) - -/* S3C2440 FIFO trigger levels */ -#define S3C2440_UFCON_RXTRIG1 (0<<4) -#define S3C2440_UFCON_RXTRIG8 (1<<4) -#define S3C2440_UFCON_RXTRIG16 (2<<4) -#define S3C2440_UFCON_RXTRIG32 (3<<4) - -#define S3C2440_UFCON_TXTRIG0 (0<<6) -#define S3C2440_UFCON_TXTRIG16 (1<<6) -#define S3C2440_UFCON_TXTRIG32 (2<<6) -#define S3C2440_UFCON_TXTRIG48 (3<<6) - -#define S3C2410_UFCON_RESETBOTH (3<<1) -#define S3C2410_UFCON_RESETTX (1<<2) -#define S3C2410_UFCON_RESETRX (1<<1) - -#define S3C2410_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \ - S3C2410_UFCON_TXTRIG0 | \ - S3C2410_UFCON_RXTRIG8 ) - -#define S3C2410_UMCOM_AFC (1<<4) -#define S3C2410_UMCOM_RTS_LOW (1<<0) - -#define S3C2412_UMCON_AFC_63 (0<<5) /* same as s3c2443 */ -#define S3C2412_UMCON_AFC_56 (1<<5) -#define S3C2412_UMCON_AFC_48 (2<<5) -#define S3C2412_UMCON_AFC_40 (3<<5) -#define S3C2412_UMCON_AFC_32 (4<<5) -#define S3C2412_UMCON_AFC_24 (5<<5) -#define S3C2412_UMCON_AFC_16 (6<<5) -#define S3C2412_UMCON_AFC_8 (7<<5) - -#define S3C2410_UFSTAT_TXFULL (1<<9) -#define S3C2410_UFSTAT_RXFULL (1<<8) -#define S3C2410_UFSTAT_TXMASK (15<<4) -#define S3C2410_UFSTAT_TXSHIFT (4) -#define S3C2410_UFSTAT_RXMASK (15<<0) -#define S3C2410_UFSTAT_RXSHIFT (0) - -/* UFSTAT S3C2443 same as S3C2440 */ -#define S3C2440_UFSTAT_TXFULL (1<<14) -#define S3C2440_UFSTAT_RXFULL (1<<6) -#define S3C2440_UFSTAT_TXSHIFT (8) -#define S3C2440_UFSTAT_RXSHIFT (0) -#define S3C2440_UFSTAT_TXMASK (63<<8) -#define S3C2440_UFSTAT_RXMASK (63) - -#define S3C2410_UTRSTAT_TXE (1<<2) -#define S3C2410_UTRSTAT_TXFE (1<<1) -#define S3C2410_UTRSTAT_RXDR (1<<0) - -#define S3C2410_UERSTAT_OVERRUN (1<<0) -#define S3C2410_UERSTAT_FRAME (1<<2) -#define S3C2410_UERSTAT_BREAK (1<<3) -#define S3C2443_UERSTAT_PARITY (1<<1) - -#define S3C2410_UERSTAT_ANY (S3C2410_UERSTAT_OVERRUN | \ - S3C2410_UERSTAT_FRAME | \ - S3C2410_UERSTAT_BREAK) - -#define S3C2410_UMSTAT_CTS (1<<0) -#define S3C2410_UMSTAT_DeltaCTS (1<<2) - -#define S3C2443_DIVSLOT (0x2C) - -/* S3C64XX interrupt registers. */ -#define S3C64XX_UINTP 0x30 -#define S3C64XX_UINTSP 0x34 -#define S3C64XX_UINTM 0x38 - -#define S3C64XX_UINTM_RXD (0) -#define S3C64XX_UINTM_TXD (2) -#define S3C64XX_UINTM_RXD_MSK (1 << S3C64XX_UINTM_RXD) -#define S3C64XX_UINTM_TXD_MSK (1 << S3C64XX_UINTM_TXD) - -/* Following are specific to S5PV210 */ -#define S5PV210_UCON_CLKMASK (1<<10) -#define S5PV210_UCON_CLKSHIFT (10) -#define S5PV210_UCON_PCLK (0<<10) -#define S5PV210_UCON_UCLK (1<<10) - -#define S5PV210_UFCON_TXTRIG0 (0<<8) -#define S5PV210_UFCON_TXTRIG4 (1<<8) -#define S5PV210_UFCON_TXTRIG8 (2<<8) -#define S5PV210_UFCON_TXTRIG16 (3<<8) -#define S5PV210_UFCON_TXTRIG32 (4<<8) -#define S5PV210_UFCON_TXTRIG64 (5<<8) -#define S5PV210_UFCON_TXTRIG128 (6<<8) -#define S5PV210_UFCON_TXTRIG256 (7<<8) - -#define S5PV210_UFCON_RXTRIG1 (0<<4) -#define S5PV210_UFCON_RXTRIG4 (1<<4) -#define S5PV210_UFCON_RXTRIG8 (2<<4) -#define S5PV210_UFCON_RXTRIG16 (3<<4) -#define S5PV210_UFCON_RXTRIG32 (4<<4) -#define S5PV210_UFCON_RXTRIG64 (5<<4) -#define S5PV210_UFCON_RXTRIG128 (6<<4) -#define S5PV210_UFCON_RXTRIG256 (7<<4) - -#define S5PV210_UFSTAT_TXFULL (1<<24) -#define S5PV210_UFSTAT_RXFULL (1<<8) -#define S5PV210_UFSTAT_TXMASK (255<<16) -#define S5PV210_UFSTAT_TXSHIFT (16) -#define S5PV210_UFSTAT_RXMASK (255<<0) -#define S5PV210_UFSTAT_RXSHIFT (0) - -#define S3C2410_UCON_CLKSEL0 (1 << 0) -#define S3C2410_UCON_CLKSEL1 (1 << 1) -#define S3C2410_UCON_CLKSEL2 (1 << 2) -#define S3C2410_UCON_CLKSEL3 (1 << 3) - -/* Default values for s5pv210 UCON and UFCON uart registers */ -#define S5PV210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \ - S3C2410_UCON_RXILEVEL | \ - S3C2410_UCON_TXIRQMODE | \ - S3C2410_UCON_RXIRQMODE | \ - S3C2410_UCON_RXFIFO_TOI | \ - S3C2443_UCON_RXERR_IRQEN) - -#define S5PV210_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \ - S5PV210_UFCON_TXTRIG4 | \ - S5PV210_UFCON_RXTRIG4) - -#ifndef __ASSEMBLY__ - -/* configuration structure for per-machine configurations for the - * serial port - * - * the pointer is setup by the machine specific initialisation from the - * arch/arm/mach-s3c2410/ directory. -*/ - -struct s3c2410_uartcfg { - unsigned char hwport; /* hardware port number */ - unsigned char unused; - unsigned short flags; - upf_t uart_flags; /* default uart flags */ - unsigned int clk_sel; - - unsigned int has_fracval; - - unsigned long ucon; /* value of ucon for port */ - unsigned long ulcon; /* value of ulcon for port */ - unsigned long ufcon; /* value of ufcon for port */ -}; - -/* s3c24xx_uart_devs - * - * this is exported from the core as we cannot use driver_register(), - * or platform_add_device() before the console_initcall() -*/ - -extern struct platform_device *s3c24xx_uart_devs[4]; - -#endif /* __ASSEMBLY__ */ - -#endif /* __ASM_ARM_REGS_SERIAL_H */ - +#include <linux/serial_s3c.h> diff --git a/arch/arm/plat-samsung/include/plat/usb-phy.h b/arch/arm/plat-samsung/include/plat/usb-phy.h index 959bcdb..ab34dfa 100644 --- a/arch/arm/plat-samsung/include/plat/usb-phy.h +++ b/arch/arm/plat-samsung/include/plat/usb-phy.h @@ -11,10 +11,7 @@ #ifndef __PLAT_SAMSUNG_USB_PHY_H #define __PLAT_SAMSUNG_USB_PHY_H __FILE__ -enum s5p_usb_phy_type { - S5P_USB_PHY_DEVICE, - S5P_USB_PHY_HOST, -}; +#include <linux/usb/samsung_usb_phy.h> extern int s5p_usb_phy_init(struct platform_device *pdev, int type); extern int s5p_usb_phy_exit(struct platform_device *pdev, int type); diff --git a/arch/arm/plat-spear/Kconfig b/arch/arm/plat-spear/Kconfig index 739d016..8a08c31 100644 --- a/arch/arm/plat-spear/Kconfig +++ b/arch/arm/plat-spear/Kconfig @@ -10,7 +10,7 @@ choice config ARCH_SPEAR13XX bool "ST SPEAr13xx with Device Tree" - select ARCH_HAVE_CPUFREQ + select ARCH_HAS_CPUFREQ select ARM_GIC select CPU_V7 select GPIO_SPEAR_SPICS diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index fd70a68..73b6e76 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -9,7 +9,6 @@ config ARM64 select CLONE_BACKWARDS select COMMON_CLK select GENERIC_CLOCKEVENTS - select GENERIC_HARDIRQS_NO_DEPRECATED select GENERIC_IOMAP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW @@ -100,7 +99,16 @@ source "init/Kconfig" source "kernel/Kconfig.freezer" -menu "System Type" +menu "Platform selection" + +config ARCH_VEXPRESS + bool "ARMv8 software model (Versatile Express)" + select ARCH_REQUIRE_GPIOLIB + select COMMON_CLK_VERSATILE + select VEXPRESS_CONFIG + help + This enables support for the ARMv8 software model (Versatile + Express). endmenu diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 5149343..1a6bfe9 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -6,17 +6,6 @@ config FRAME_POINTER bool default y -config DEBUG_ERRORS - bool "Verbose kernel error messages" - depends on DEBUG_KERNEL - help - This option controls verbose debugging information which can be - printed when the kernel detects an internal error. This debugging - information is useful to kernel hackers when tracking down problems, - but mostly meaningless to other people. It's safe to say Y unless - you are concerned with the code size or don't want to see these - messages. - config DEBUG_STACK_USAGE bool "Enable stack utilization instrumentation" depends on DEBUG_KERNEL diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile index 32ac0ae..68457e9 100644 --- a/arch/arm64/boot/dts/Makefile +++ b/arch/arm64/boot/dts/Makefile @@ -1,3 +1,5 @@ +dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb + targets += dtbs targets += $(dtb-y) diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts new file mode 100644 index 0000000..198682b --- /dev/null +++ b/arch/arm64/boot/dts/foundation-v8.dts @@ -0,0 +1,230 @@ +/* + * ARM Ltd. + * + * ARMv8 Foundation model DTS + */ + +/dts-v1/; + +/ { + model = "Foundation-v8A"; + compatible = "arm,foundation-aarch64", "arm,vexpress"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + chosen { }; + + aliases { + serial0 = &v2m_serial0; + serial1 = &v2m_serial1; + serial2 = &v2m_serial2; + serial3 = &v2m_serial3; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x8000fff8>; + }; + cpu@1 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x8000fff8>; + }; + cpu@2 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x2>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x8000fff8>; + }; + cpu@3 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x3>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x8000fff8>; + }; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x00000000 0x80000000 0 0x80000000>, + <0x00000008 0x80000000 0 0x80000000>; + }; + + gic: interrupt-controller@2c001000 { + compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic"; + #interrupt-cells = <3>; + #address-cells = <0>; + interrupt-controller; + reg = <0x0 0x2c001000 0 0x1000>, + <0x0 0x2c002000 0 0x1000>, + <0x0 0x2c004000 0 0x2000>, + <0x0 0x2c006000 0 0x2000>; + interrupts = <1 9 0xf04>; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = <1 13 0xff01>, + <1 14 0xff01>, + <1 11 0xff01>, + <1 10 0xff01>; + clock-frequency = <100000000>; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = <0 60 4>, + <0 61 4>, + <0 62 4>, + <0 63 4>; + }; + + smb { + compatible = "arm,vexpress,v2m-p1", "simple-bus"; + arm,v2m-memory-map = "rs1"; + #address-cells = <2>; /* SMB chipselect number and offset */ + #size-cells = <1>; + + ranges = <0 0 0 0x08000000 0x04000000>, + <1 0 0 0x14000000 0x04000000>, + <2 0 0 0x18000000 0x04000000>, + <3 0 0 0x1c000000 0x04000000>, + <4 0 0 0x0c000000 0x04000000>, + <5 0 0 0x10000000 0x04000000>; + + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 63>; + interrupt-map = <0 0 0 &gic 0 0 4>, + <0 0 1 &gic 0 1 4>, + <0 0 2 &gic 0 2 4>, + <0 0 3 &gic 0 3 4>, + <0 0 4 &gic 0 4 4>, + <0 0 5 &gic 0 5 4>, + <0 0 6 &gic 0 6 4>, + <0 0 7 &gic 0 7 4>, + <0 0 8 &gic 0 8 4>, + <0 0 9 &gic 0 9 4>, + <0 0 10 &gic 0 10 4>, + <0 0 11 &gic 0 11 4>, + <0 0 12 &gic 0 12 4>, + <0 0 13 &gic 0 13 4>, + <0 0 14 &gic 0 14 4>, + <0 0 15 &gic 0 15 4>, + <0 0 16 &gic 0 16 4>, + <0 0 17 &gic 0 17 4>, + <0 0 18 &gic 0 18 4>, + <0 0 19 &gic 0 19 4>, + <0 0 20 &gic 0 20 4>, + <0 0 21 &gic 0 21 4>, + <0 0 22 &gic 0 22 4>, + <0 0 23 &gic 0 23 4>, + <0 0 24 &gic 0 24 4>, + <0 0 25 &gic 0 25 4>, + <0 0 26 &gic 0 26 4>, + <0 0 27 &gic 0 27 4>, + <0 0 28 &gic 0 28 4>, + <0 0 29 &gic 0 29 4>, + <0 0 30 &gic 0 30 4>, + <0 0 31 &gic 0 31 4>, + <0 0 32 &gic 0 32 4>, + <0 0 33 &gic 0 33 4>, + <0 0 34 &gic 0 34 4>, + <0 0 35 &gic 0 35 4>, + <0 0 36 &gic 0 36 4>, + <0 0 37 &gic 0 37 4>, + <0 0 38 &gic 0 38 4>, + <0 0 39 &gic 0 39 4>, + <0 0 40 &gic 0 40 4>, + <0 0 41 &gic 0 41 4>, + <0 0 42 &gic 0 42 4>; + + ethernet@2,02000000 { + compatible = "smsc,lan91c111"; + reg = <2 0x02000000 0x10000>; + interrupts = <15>; + }; + + v2m_clk24mhz: clk24mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <24000000>; + clock-output-names = "v2m:clk24mhz"; + }; + + v2m_refclk1mhz: refclk1mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <1000000>; + clock-output-names = "v2m:refclk1mhz"; + }; + + v2m_refclk32khz: refclk32khz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <32768>; + clock-output-names = "v2m:refclk32khz"; + }; + + iofpga@3,00000000 { + compatible = "arm,amba-bus", "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 3 0 0x200000>; + + v2m_sysreg: sysreg@010000 { + compatible = "arm,vexpress-sysreg"; + reg = <0x010000 0x1000>; + }; + + v2m_serial0: uart@090000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x090000 0x1000>; + interrupts = <5>; + clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + v2m_serial1: uart@0a0000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0a0000 0x1000>; + interrupts = <6>; + clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + v2m_serial2: uart@0b0000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0b0000 0x1000>; + interrupts = <7>; + clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + v2m_serial3: uart@0c0000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0c0000 0x1000>; + interrupts = <8>; + clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + virtio_block@0130000 { + compatible = "virtio,mmio"; + reg = <0x130000 0x1000>; + interrupts = <42>; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts new file mode 100644 index 0000000..572005e --- /dev/null +++ b/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts @@ -0,0 +1,159 @@ +/* + * ARM Ltd. Fast Models + * + * Architecture Envelope Model (AEM) ARMv8-A + * ARMAEMv8AMPCT + * + * RTSM_VE_AEMv8A.lisa + */ + +/dts-v1/; + +/memreserve/ 0x80000000 0x00010000; + +/ { + model = "RTSM_VE_AEMv8A"; + compatible = "arm,rtsm_ve,aemv8a", "arm,vexpress"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + chosen { }; + + aliases { + serial0 = &v2m_serial0; + serial1 = &v2m_serial1; + serial2 = &v2m_serial2; + serial3 = &v2m_serial3; + }; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + cpu@0 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x8000fff8>; + }; + cpu@1 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x8000fff8>; + }; + cpu@2 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x2>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x8000fff8>; + }; + cpu@3 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x3>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x8000fff8>; + }; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x00000000 0x80000000 0 0x80000000>, + <0x00000008 0x80000000 0 0x80000000>; + }; + + gic: interrupt-controller@2c001000 { + compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic"; + #interrupt-cells = <3>; + #address-cells = <0>; + interrupt-controller; + reg = <0x0 0x2c001000 0 0x1000>, + <0x0 0x2c002000 0 0x1000>, + <0x0 0x2c004000 0 0x2000>, + <0x0 0x2c006000 0 0x2000>; + interrupts = <1 9 0xf04>; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = <1 13 0xff01>, + <1 14 0xff01>, + <1 11 0xff01>, + <1 10 0xff01>; + clock-frequency = <100000000>; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = <0 60 4>, + <0 61 4>, + <0 62 4>, + <0 63 4>; + }; + + smb { + compatible = "simple-bus"; + + #address-cells = <2>; + #size-cells = <1>; + ranges = <0 0 0 0x08000000 0x04000000>, + <1 0 0 0x14000000 0x04000000>, + <2 0 0 0x18000000 0x04000000>, + <3 0 0 0x1c000000 0x04000000>, + <4 0 0 0x0c000000 0x04000000>, + <5 0 0 0x10000000 0x04000000>; + + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 63>; + interrupt-map = <0 0 0 &gic 0 0 4>, + <0 0 1 &gic 0 1 4>, + <0 0 2 &gic 0 2 4>, + <0 0 3 &gic 0 3 4>, + <0 0 4 &gic 0 4 4>, + <0 0 5 &gic 0 5 4>, + <0 0 6 &gic 0 6 4>, + <0 0 7 &gic 0 7 4>, + <0 0 8 &gic 0 8 4>, + <0 0 9 &gic 0 9 4>, + <0 0 10 &gic 0 10 4>, + <0 0 11 &gic 0 11 4>, + <0 0 12 &gic 0 12 4>, + <0 0 13 &gic 0 13 4>, + <0 0 14 &gic 0 14 4>, + <0 0 15 &gic 0 15 4>, + <0 0 16 &gic 0 16 4>, + <0 0 17 &gic 0 17 4>, + <0 0 18 &gic 0 18 4>, + <0 0 19 &gic 0 19 4>, + <0 0 20 &gic 0 20 4>, + <0 0 21 &gic 0 21 4>, + <0 0 22 &gic 0 22 4>, + <0 0 23 &gic 0 23 4>, + <0 0 24 &gic 0 24 4>, + <0 0 25 &gic 0 25 4>, + <0 0 26 &gic 0 26 4>, + <0 0 27 &gic 0 27 4>, + <0 0 28 &gic 0 28 4>, + <0 0 29 &gic 0 29 4>, + <0 0 30 &gic 0 30 4>, + <0 0 31 &gic 0 31 4>, + <0 0 32 &gic 0 32 4>, + <0 0 33 &gic 0 33 4>, + <0 0 34 &gic 0 34 4>, + <0 0 35 &gic 0 35 4>, + <0 0 36 &gic 0 36 4>, + <0 0 37 &gic 0 37 4>, + <0 0 38 &gic 0 38 4>, + <0 0 39 &gic 0 39 4>, + <0 0 40 &gic 0 40 4>, + <0 0 41 &gic 0 41 4>, + <0 0 42 &gic 0 42 4>; + + /include/ "rtsm_ve-motherboard.dtsi" + }; +}; diff --git a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi new file mode 100644 index 0000000..b45e5f3 --- /dev/null +++ b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi @@ -0,0 +1,234 @@ +/* + * ARM Ltd. Fast Models + * + * Versatile Express (VE) system model + * Motherboard component + * + * VEMotherBoard.lisa + */ + + motherboard { + arm,v2m-memory-map = "rs1"; + compatible = "arm,vexpress,v2m-p1", "simple-bus"; + #address-cells = <2>; /* SMB chipselect number and offset */ + #size-cells = <1>; + #interrupt-cells = <1>; + ranges; + + flash@0,00000000 { + compatible = "arm,vexpress-flash", "cfi-flash"; + reg = <0 0x00000000 0x04000000>, + <4 0x00000000 0x04000000>; + bank-width = <4>; + }; + + vram@2,00000000 { + compatible = "arm,vexpress-vram"; + reg = <2 0x00000000 0x00800000>; + }; + + ethernet@2,02000000 { + compatible = "smsc,lan91c111"; + reg = <2 0x02000000 0x10000>; + interrupts = <15>; + }; + + v2m_clk24mhz: clk24mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <24000000>; + clock-output-names = "v2m:clk24mhz"; + }; + + v2m_refclk1mhz: refclk1mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <1000000>; + clock-output-names = "v2m:refclk1mhz"; + }; + + v2m_refclk32khz: refclk32khz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <32768>; + clock-output-names = "v2m:refclk32khz"; + }; + + iofpga@3,00000000 { + compatible = "arm,amba-bus", "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 3 0 0x200000>; + + v2m_sysreg: sysreg@010000 { + compatible = "arm,vexpress-sysreg"; + reg = <0x010000 0x1000>; + gpio-controller; + #gpio-cells = <2>; + }; + + v2m_sysctl: sysctl@020000 { + compatible = "arm,sp810", "arm,primecell"; + reg = <0x020000 0x1000>; + clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&v2m_clk24mhz>; + clock-names = "refclk", "timclk", "apb_pclk"; + #clock-cells = <1>; + clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3"; + }; + + aaci@040000 { + compatible = "arm,pl041", "arm,primecell"; + reg = <0x040000 0x1000>; + interrupts = <11>; + clocks = <&v2m_clk24mhz>; + clock-names = "apb_pclk"; + }; + + mmci@050000 { + compatible = "arm,pl180", "arm,primecell"; + reg = <0x050000 0x1000>; + interrupts = <9 10>; + cd-gpios = <&v2m_sysreg 0 0>; + wp-gpios = <&v2m_sysreg 1 0>; + max-frequency = <12000000>; + vmmc-supply = <&v2m_fixed_3v3>; + clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>; + clock-names = "mclk", "apb_pclk"; + }; + + kmi@060000 { + compatible = "arm,pl050", "arm,primecell"; + reg = <0x060000 0x1000>; + interrupts = <12>; + clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>; + clock-names = "KMIREFCLK", "apb_pclk"; + }; + + kmi@070000 { + compatible = "arm,pl050", "arm,primecell"; + reg = <0x070000 0x1000>; + interrupts = <13>; + clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>; + clock-names = "KMIREFCLK", "apb_pclk"; + }; + + v2m_serial0: uart@090000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x090000 0x1000>; + interrupts = <5>; + clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + v2m_serial1: uart@0a0000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0a0000 0x1000>; + interrupts = <6>; + clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + v2m_serial2: uart@0b0000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0b0000 0x1000>; + interrupts = <7>; + clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + v2m_serial3: uart@0c0000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0c0000 0x1000>; + interrupts = <8>; + clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + wdt@0f0000 { + compatible = "arm,sp805", "arm,primecell"; + reg = <0x0f0000 0x1000>; + interrupts = <0>; + clocks = <&v2m_refclk32khz>, <&v2m_clk24mhz>; + clock-names = "wdogclk", "apb_pclk"; + }; + + v2m_timer01: timer@110000 { + compatible = "arm,sp804", "arm,primecell"; + reg = <0x110000 0x1000>; + interrupts = <2>; + clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&v2m_clk24mhz>; + clock-names = "timclken1", "timclken2", "apb_pclk"; + }; + + v2m_timer23: timer@120000 { + compatible = "arm,sp804", "arm,primecell"; + reg = <0x120000 0x1000>; + interrupts = <3>; + clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&v2m_clk24mhz>; + clock-names = "timclken1", "timclken2", "apb_pclk"; + }; + + rtc@170000 { + compatible = "arm,pl031", "arm,primecell"; + reg = <0x170000 0x1000>; + interrupts = <4>; + clocks = <&v2m_clk24mhz>; + clock-names = "apb_pclk"; + }; + + clcd@1f0000 { + compatible = "arm,pl111", "arm,primecell"; + reg = <0x1f0000 0x1000>; + interrupts = <14>; + clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>; + clock-names = "clcdclk", "apb_pclk"; + }; + }; + + v2m_fixed_3v3: fixedregulator@0 { + compatible = "regulator-fixed"; + regulator-name = "3V3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + mcc { + compatible = "arm,vexpress,config-bus", "simple-bus"; + arm,vexpress,config-bridge = <&v2m_sysreg>; + + v2m_oscclk1: osc@1 { + /* CLCD clock */ + compatible = "arm,vexpress-osc"; + arm,vexpress-sysreg,func = <1 1>; + freq-range = <23750000 63500000>; + #clock-cells = <0>; + clock-output-names = "v2m:oscclk1"; + }; + + reset@0 { + compatible = "arm,vexpress-reset"; + arm,vexpress-sysreg,func = <5 0>; + }; + + muxfpga@0 { + compatible = "arm,vexpress-muxfpga"; + arm,vexpress-sysreg,func = <7 0>; + }; + + shutdown@0 { + compatible = "arm,vexpress-shutdown"; + arm,vexpress-sysreg,func = <8 0>; + }; + + reboot@0 { + compatible = "arm,vexpress-reboot"; + arm,vexpress-sysreg,func = <9 0>; + }; + + dvimode@0 { + compatible = "arm,vexpress-dvimode"; + arm,vexpress-sysreg,func = <11 0>; + }; + }; + }; diff --git a/arch/arm64/boot/dts/skeleton.dtsi b/arch/arm64/boot/dts/skeleton.dtsi new file mode 100644 index 0000000..38ead82 --- /dev/null +++ b/arch/arm64/boot/dts/skeleton.dtsi @@ -0,0 +1,13 @@ +/* + * Skeleton device tree; the bare minimum needed to boot; just include and + * add a compatible value. The bootloader will typically populate the memory + * node. + */ + +/ { + #address-cells = <2>; + #size-cells = <1>; + chosen { }; + aliases { }; + memory { device_type = "memory"; reg = <0 0 0>; }; +}; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 9212c78..8d9696ad 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -23,6 +23,7 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set +CONFIG_ARCH_VEXPRESS=y CONFIG_SMP=y CONFIG_PREEMPT_VOLUNTARY=y CONFIG_CMDLINE="console=ttyAMA0" @@ -47,11 +48,14 @@ CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_NETDEVICES=y CONFIG_MII=y +CONFIG_SMC91X=y # CONFIG_WLAN is not set CONFIG_INPUT_EVDEV=y # CONFIG_SERIO_I8042 is not set # CONFIG_SERIO_SERPORT is not set CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set CONFIG_FB=y @@ -82,4 +86,3 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_INFO=y # CONFIG_FTRACE is not set CONFIG_ATOMIC64_SELFTEST=y -CONFIG_DEBUG_ERRORS=y diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index e5fe4f9..79a642d 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -39,7 +39,6 @@ generic-y += shmbuf.h generic-y += sizes.h generic-y += socket.h generic-y += sockios.h -generic-y += string.h generic-y += switch_to.h generic-y += swab.h generic-y += termbits.h @@ -49,4 +48,5 @@ generic-y += trace_clock.h generic-y += types.h generic-y += unaligned.h generic-y += user.h +generic-y += vga.h generic-y += xor.h diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h index 5e69307..aa5b59d 100644 --- a/arch/arm64/include/asm/bitops.h +++ b/arch/arm64/include/asm/bitops.h @@ -32,6 +32,16 @@ #error only <linux/bitops.h> can be included directly #endif +/* + * Little endian assembly atomic bitops. + */ +extern void set_bit(int nr, volatile unsigned long *p); +extern void clear_bit(int nr, volatile unsigned long *p); +extern void change_bit(int nr, volatile unsigned long *p); +extern int test_and_set_bit(int nr, volatile unsigned long *p); +extern int test_and_clear_bit(int nr, volatile unsigned long *p); +extern int test_and_change_bit(int nr, volatile unsigned long *p); + #include <asm-generic/bitops/builtin-__ffs.h> #include <asm-generic/bitops/builtin-ffs.h> #include <asm-generic/bitops/builtin-__fls.h> @@ -45,9 +55,13 @@ #include <asm-generic/bitops/hweight.h> #include <asm-generic/bitops/lock.h> -#include <asm-generic/bitops/atomic.h> #include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/le.h> -#include <asm-generic/bitops/ext2-atomic.h> + +/* + * Ext2 is defined to use little-endian byte ordering. + */ +#define ext2_set_bit_atomic(lock, nr, p) test_and_set_bit_le(nr, p) +#define ext2_clear_bit_atomic(lock, nr, p) test_and_clear_bit_le(nr, p) #endif /* __ASM_BITOPS_H */ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 968b5cb..8a8ce0e 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -170,4 +170,7 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, (unsigned long)(n), \ sizeof(*(ptr)))) +#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) +#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) + #endif /* __ASM_CMPXCHG_H */ diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 618b450..899af80 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -35,14 +35,16 @@ typedef s32 compat_clock_t; typedef s32 compat_pid_t; typedef u32 __compat_uid_t; typedef u32 __compat_gid_t; +typedef u16 __compat_uid16_t; +typedef u16 __compat_gid16_t; typedef u32 __compat_uid32_t; typedef u32 __compat_gid32_t; -typedef u32 compat_mode_t; +typedef u16 compat_mode_t; typedef u32 compat_ino_t; typedef u32 compat_dev_t; typedef s32 compat_off_t; typedef s64 compat_loff_t; -typedef s16 compat_nlink_t; +typedef s32 compat_nlink_t; typedef u16 compat_ipc_pid_t; typedef s32 compat_daddr_t; typedef u32 compat_caddr_t; @@ -50,9 +52,11 @@ typedef __kernel_fsid_t compat_fsid_t; typedef s32 compat_key_t; typedef s32 compat_timer_t; +typedef s16 compat_short_t; typedef s32 compat_int_t; typedef s32 compat_long_t; typedef s64 compat_s64; +typedef u16 compat_ushort_t; typedef u32 compat_uint_t; typedef u32 compat_ulong_t; typedef u64 compat_u64; @@ -72,20 +76,20 @@ struct compat_stat { compat_dev_t st_dev; compat_ino_t st_ino; compat_mode_t st_mode; - compat_nlink_t st_nlink; - __compat_uid32_t st_uid; - __compat_gid32_t st_gid; + compat_ushort_t st_nlink; + __compat_uid16_t st_uid; + __compat_gid16_t st_gid; compat_dev_t st_rdev; compat_off_t st_size; compat_off_t st_blksize; compat_off_t st_blocks; compat_time_t st_atime; - u32 st_atime_nsec; + compat_ulong_t st_atime_nsec; compat_time_t st_mtime; - u32 st_mtime_nsec; + compat_ulong_t st_mtime_nsec; compat_time_t st_ctime; - u32 st_ctime_nsec; - u32 __unused4[2]; + compat_ulong_t st_ctime_nsec; + compat_ulong_t __unused4[2]; }; struct compat_flock { diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index ef54125..cf27494 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -17,6 +17,7 @@ #define __ASM_CPUTYPE_H #define ID_MIDR_EL1 "midr_el1" +#define ID_MPIDR_EL1 "mpidr_el1" #define ID_CTR_EL0 "ctr_el0" #define ID_AA64PFR0_EL1 "id_aa64pfr0_el1" @@ -25,12 +26,24 @@ #define ID_AA64ISAR0_EL1 "id_aa64isar0_el1" #define ID_AA64MMFR0_EL1 "id_aa64mmfr0_el1" +#define INVALID_HWID ULONG_MAX + +#define MPIDR_HWID_BITMASK 0xff00ffffff + #define read_cpuid(reg) ({ \ u64 __val; \ asm("mrs %0, " reg : "=r" (__val)); \ __val; \ }) +#define ARM_CPU_IMP_ARM 0x41 + +#define ARM_CPU_PART_AEM_V8 0xD0F0 +#define ARM_CPU_PART_FOUNDATION 0xD000 +#define ARM_CPU_PART_CORTEX_A57 0xD070 + +#ifndef __ASSEMBLY__ + /* * The CPU ID never changes at run time, so we might as well tell the * compiler that it's constant. Use this function to read the CPU ID @@ -41,9 +54,26 @@ static inline u32 __attribute_const__ read_cpuid_id(void) return read_cpuid(ID_MIDR_EL1); } +static inline u64 __attribute_const__ read_cpuid_mpidr(void) +{ + return read_cpuid(ID_MPIDR_EL1); +} + +static inline unsigned int __attribute_const__ read_cpuid_implementor(void) +{ + return (read_cpuid_id() & 0xFF000000) >> 24; +} + +static inline unsigned int __attribute_const__ read_cpuid_part_number(void) +{ + return (read_cpuid_id() & 0xFFF0); +} + static inline u32 __attribute_const__ read_cpuid_cachetype(void) { return read_cpuid(ID_CTR_EL0); } +#endif /* __ASSEMBLY__ */ + #endif diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h new file mode 100644 index 0000000..7883412 --- /dev/null +++ b/arch/arm64/include/asm/esr.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM_ESR_H +#define __ASM_ESR_H + +#define ESR_EL1_EC_SHIFT (26) +#define ESR_EL1_IL (1U << 25) + +#define ESR_EL1_EC_UNKNOWN (0x00) +#define ESR_EL1_EC_WFI (0x01) +#define ESR_EL1_EC_CP15_32 (0x03) +#define ESR_EL1_EC_CP15_64 (0x04) +#define ESR_EL1_EC_CP14_MR (0x05) +#define ESR_EL1_EC_CP14_LS (0x06) +#define ESR_EL1_EC_FP_ASIMD (0x07) +#define ESR_EL1_EC_CP10_ID (0x08) +#define ESR_EL1_EC_CP14_64 (0x0C) +#define ESR_EL1_EC_ILL_ISS (0x0E) +#define ESR_EL1_EC_SVC32 (0x11) +#define ESR_EL1_EC_SVC64 (0x15) +#define ESR_EL1_EC_SYS64 (0x18) +#define ESR_EL1_EC_IABT_EL0 (0x20) +#define ESR_EL1_EC_IABT_EL1 (0x21) +#define ESR_EL1_EC_PC_ALIGN (0x22) +#define ESR_EL1_EC_DABT_EL0 (0x24) +#define ESR_EL1_EC_DABT_EL1 (0x25) +#define ESR_EL1_EC_SP_ALIGN (0x26) +#define ESR_EL1_EC_FP_EXC32 (0x28) +#define ESR_EL1_EC_FP_EXC64 (0x2C) +#define ESR_EL1_EC_SERRROR (0x2F) +#define ESR_EL1_EC_BREAKPT_EL0 (0x30) +#define ESR_EL1_EC_BREAKPT_EL1 (0x31) +#define ESR_EL1_EC_SOFTSTP_EL0 (0x32) +#define ESR_EL1_EC_SOFTSTP_EL1 (0x33) +#define ESR_EL1_EC_WATCHPT_EL0 (0x34) +#define ESR_EL1_EC_WATCHPT_EL1 (0x35) +#define ESR_EL1_EC_BKPT32 (0x38) +#define ESR_EL1_EC_BRK64 (0x3C) + +#endif /* __ASM_ESR_H */ diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index ac63519..0303705 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -19,5 +19,6 @@ #define __ASM_EXCEPTION_H #define __exception __attribute__((section(".exception.text"))) +#define __exception_irq_entry __exception #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 5075463..990c051 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -49,4 +49,9 @@ static inline void ack_bad_irq(unsigned int irq) extern void handle_IRQ(unsigned int, struct pt_regs *); +/* + * No arch-specific IRQ flags. + */ +#define set_irq_flags(irq, flags) + #endif /* __ASM_HARDIRQ_H */ diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 57f12c9..2e12258 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -92,10 +92,12 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) #define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) #define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) #define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; }) +#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq(c)); __v; }) #define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) +#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c))) /* * I/O memory access primitives. Reads are ordered relative to any @@ -105,10 +107,12 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) +#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; }) #define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); }) +#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); }) /* * I/O port access primitives. diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index a4e1cad..0332fc0 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -4,5 +4,6 @@ #include <asm-generic/irq.h> extern void (*handle_arch_irq)(struct pt_regs *); +extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); #endif diff --git a/arch/arm64/lib/bitops.c b/arch/arm64/include/asm/smp_plat.h index aa4965e..ed43a0d 100644 --- a/arch/arm64/lib/bitops.c +++ b/arch/arm64/include/asm/smp_plat.h @@ -1,7 +1,9 @@ /* - * Copyright (C) 2012 ARM Limited + * Definitions specific to SMP platforms. * - * This program is free software; you can redistribute it and/or modify + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * @@ -14,12 +16,15 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include <linux/kernel.h> -#include <linux/spinlock.h> -#include <linux/atomic.h> +#ifndef __ASM_SMP_PLAT_H +#define __ASM_SMP_PLAT_H + +#include <asm/types.h> + +/* + * Logical CPU mapping. + */ +extern u64 __cpu_logical_map[NR_CPUS]; +#define cpu_logical_map(cpu) __cpu_logical_map[cpu] -#ifdef CONFIG_SMP -arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { - [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED -}; -#endif +#endif /* __ASM_SMP_PLAT_H */ diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h new file mode 100644 index 0000000..3ee8b30 --- /dev/null +++ b/arch/arm64/include/asm/string.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_STRING_H +#define __ASM_STRING_H + +#define __HAVE_ARCH_STRRCHR +extern char *strrchr(const char *, int c); + +#define __HAVE_ARCH_STRCHR +extern char *strchr(const char *, int c); + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMCHR +extern void *memchr(const void *, int, __kernel_size_t); + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *, int, __kernel_size_t); + +#endif diff --git a/arch/arm64/include/asm/ucontext.h b/arch/arm64/include/asm/ucontext.h index bde9607..42e04c8 100644 --- a/arch/arm64/include/asm/ucontext.h +++ b/arch/arm64/include/asm/ucontext.h @@ -22,7 +22,7 @@ struct ucontext { stack_t uc_stack; sigset_t uc_sigmask; /* glibc uses a 1024-bit sigset_t */ - __u8 __unused[(1024 - sizeof(sigset_t)) / 8]; + __u8 __unused[1024 / 8 - sizeof(sigset_t)]; /* last for future expansion */ struct sigcontext uc_mcontext; }; diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index cef3925..7df1aad 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -39,8 +39,21 @@ EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(__clear_user); - /* bitops */ -EXPORT_SYMBOL(__atomic_hash); - /* physical memory */ EXPORT_SYMBOL(memstart_addr); + + /* string / mem functions */ +EXPORT_SYMBOL(strchr); +EXPORT_SYMBOL(strrchr); +EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(memchr); + + /* atomic bitops */ +EXPORT_SYMBOL(set_bit); +EXPORT_SYMBOL(test_and_set_bit); +EXPORT_SYMBOL(clear_bit); +EXPORT_SYMBOL(test_and_clear_bit); +EXPORT_SYMBOL(change_bit); +EXPORT_SYMBOL(test_and_change_bit); diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c index 7e320a2..ac974f4 100644 --- a/arch/arm64/kernel/early_printk.c +++ b/arch/arm64/kernel/early_printk.c @@ -24,6 +24,7 @@ #include <linux/io.h> #include <linux/amba/serial.h> +#include <linux/serial_reg.h> static void __iomem *early_base; static void (*printch)(char ch); @@ -40,6 +41,37 @@ static void pl011_printch(char ch) ; } +/* + * Semihosting-based debug console + */ +static void smh_printch(char ch) +{ + asm volatile("mov x1, %0\n" + "mov x0, #3\n" + "hlt 0xf000\n" + : : "r" (&ch) : "x0", "x1", "memory"); +} + +/* + * 8250/16550 (8-bit aligned registers) single character TX. + */ +static void uart8250_8bit_printch(char ch) +{ + while (!(readb_relaxed(early_base + UART_LSR) & UART_LSR_THRE)) + ; + writeb_relaxed(ch, early_base + UART_TX); +} + +/* + * 8250/16550 (32-bit aligned registers) single character TX. + */ +static void uart8250_32bit_printch(char ch) +{ + while (!(readl_relaxed(early_base + (UART_LSR << 2)) & UART_LSR_THRE)) + ; + writel_relaxed(ch, early_base + (UART_TX << 2)); +} + struct earlycon_match { const char *name; void (*printch)(char ch); @@ -47,6 +79,9 @@ struct earlycon_match { static const struct earlycon_match earlycon_match[] __initconst = { { .name = "pl011", .printch = pl011_printch, }, + { .name = "smh", .printch = smh_printch, }, + { .name = "uart8250-8bit", .printch = uart8250_8bit_printch, }, + { .name = "uart8250-32bit", .printch = uart8250_32bit_printch, }, {} }; diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 514d609..c7e0470 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -24,6 +24,7 @@ #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/errno.h> +#include <asm/esr.h> #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/unistd32.h> @@ -239,18 +240,18 @@ ENDPROC(el1_error_invalid) el1_sync: kernel_entry 1 mrs x1, esr_el1 // read the syndrome register - lsr x24, x1, #26 // exception class - cmp x24, #0x25 // data abort in EL1 + lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class + cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1 b.eq el1_da - cmp x24, #0x18 // configurable trap + cmp x24, #ESR_EL1_EC_SYS64 // configurable trap b.eq el1_undef - cmp x24, #0x26 // stack alignment exception + cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception b.eq el1_sp_pc - cmp x24, #0x22 // pc alignment exception + cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception b.eq el1_sp_pc - cmp x24, #0x00 // unknown exception in EL1 + cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1 b.eq el1_undef - cmp x24, #0x30 // debug exception in EL1 + cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1 b.ge el1_dbg b el1_inv el1_da: @@ -346,27 +347,27 @@ el1_preempt: el0_sync: kernel_entry 0 mrs x25, esr_el1 // read the syndrome register - lsr x24, x25, #26 // exception class - cmp x24, #0x15 // SVC in 64-bit state + lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class + cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state b.eq el0_svc adr lr, ret_from_exception - cmp x24, #0x24 // data abort in EL0 + cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 b.eq el0_da - cmp x24, #0x20 // instruction abort in EL0 + cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 b.eq el0_ia - cmp x24, #0x07 // FP/ASIMD access + cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access b.eq el0_fpsimd_acc - cmp x24, #0x2c // FP/ASIMD exception + cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception b.eq el0_fpsimd_exc - cmp x24, #0x18 // configurable trap + cmp x24, #ESR_EL1_EC_SYS64 // configurable trap b.eq el0_undef - cmp x24, #0x26 // stack alignment exception + cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception b.eq el0_sp_pc - cmp x24, #0x22 // pc alignment exception + cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception b.eq el0_sp_pc - cmp x24, #0x00 // unknown exception in EL0 + cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef - cmp x24, #0x30 // debug exception in EL0 + cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0 b.ge el0_dbg b el0_inv @@ -375,21 +376,21 @@ el0_sync: el0_sync_compat: kernel_entry 0, 32 mrs x25, esr_el1 // read the syndrome register - lsr x24, x25, #26 // exception class - cmp x24, #0x11 // SVC in 32-bit state + lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class + cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state b.eq el0_svc_compat adr lr, ret_from_exception - cmp x24, #0x24 // data abort in EL0 + cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 b.eq el0_da - cmp x24, #0x20 // instruction abort in EL0 + cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 b.eq el0_ia - cmp x24, #0x07 // FP/ASIMD access + cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access b.eq el0_fpsimd_acc - cmp x24, #0x28 // FP/ASIMD exception + cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception b.eq el0_fpsimd_exc - cmp x24, #0x00 // unknown exception in EL0 + cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef - cmp x24, #0x30 // debug exception in EL0 + cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0 b.ge el0_dbg b el0_inv el0_svc_compat: diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 0a0a497..53dcae4 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -26,6 +26,7 @@ #include <asm/assembler.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> +#include <asm/cputype.h> #include <asm/memory.h> #include <asm/thread_info.h> #include <asm/pgtable-hwdef.h> @@ -229,7 +230,8 @@ ENTRY(secondary_holding_pen) bl __calc_phys_offset // x24=phys offset bl el2_setup // Drop to EL1 mrs x0, mpidr_el1 - and x0, x0, #15 // CPU number + ldr x1, =MPIDR_HWID_BITMASK + and x0, x0, x1 adr x1, 1b ldp x2, x3, [x1] sub x1, x1, x2 diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 0373c66..ecb3354 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -25,7 +25,7 @@ #include <linux/irq.h> #include <linux/smp.h> #include <linux/init.h> -#include <linux/of_irq.h> +#include <linux/irqchip.h> #include <linux/seq_file.h> #include <linux/ratelimit.h> @@ -67,18 +67,17 @@ void handle_IRQ(unsigned int irq, struct pt_regs *regs) set_irq_regs(old_regs); } -/* - * Interrupt controllers supported by the kernel. - */ -static const struct of_device_id intctrl_of_match[] __initconst = { - /* IRQ controllers { .compatible, .data } info to go here */ - {} -}; +void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) +{ + if (handle_arch_irq) + return; + + handle_arch_irq = handle_irq; +} void __init init_IRQ(void) { - of_irq_init(intctrl_of_match); - + irqchip_init(); if (!handle_arch_irq) panic("No interrupt controller found."); } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 0337cdb..f491972 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -84,11 +84,15 @@ EXPORT_SYMBOL_GPL(pm_power_off); void (*pm_restart)(const char *cmd); EXPORT_SYMBOL_GPL(pm_restart); +void arch_cpu_idle_prepare(void) +{ + local_fiq_enable(); +} /* * This is our default idle handler. */ -static void default_idle(void) +void arch_cpu_idle(void) { /* * This should do all the clock switching and wait for interrupt @@ -98,43 +102,6 @@ static void default_idle(void) local_irq_enable(); } -/* - * The idle thread. - * We always respect 'hlt_counter' to prevent low power idle. - */ -void cpu_idle(void) -{ - local_fiq_enable(); - - /* endless idle loop with no priority at all */ - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - while (!need_resched()) { - /* - * We need to disable interrupts here to ensure - * we don't miss a wakeup call. - */ - local_irq_disable(); - if (!need_resched()) { - stop_critical_timings(); - default_idle(); - start_critical_timings(); - /* - * default_idle functions should always return - * with IRQs enabled. - */ - WARN_ON(irqs_disabled()); - } else { - local_irq_enable(); - } - } - rcu_idle_exit(); - tick_nohz_idle_exit(); - schedule_preempt_disabled(); - } -} - void machine_shutdown(void) { #ifdef CONFIG_SMP @@ -178,11 +145,7 @@ void __show_regs(struct pt_regs *regs) { int i; - printk("CPU: %d %s (%s %.*s)\n", - raw_smp_processor_id(), print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); + show_regs_print_info(KERN_DEFAULT); print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("LR is at %s\n", regs->regs[30]); printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", @@ -199,7 +162,6 @@ void __show_regs(struct pt_regs *regs) void show_regs(struct pt_regs * regs) { printk("\n"); - printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm); __show_regs(regs); } @@ -311,11 +273,17 @@ struct task_struct *__switch_to(struct task_struct *prev, fpsimd_thread_switch(next); tls_thread_switch(next); hw_breakpoint_thread_switch(next); + contextidr_thread_switch(next); + + /* + * Complete any pending TLB or cache maintenance on this CPU in case + * the thread migrates to a different CPU. + */ + dsb(); /* the actual thread switch */ last = cpu_switch_to(prev, next); - contextidr_thread_switch(next); return last; } diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 113db86..6a9a532 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -32,6 +32,7 @@ #include <linux/kexec.h> #include <linux/crash_dump.h> #include <linux/root_dev.h> +#include <linux/clk-provider.h> #include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/smp.h> @@ -46,6 +47,7 @@ #include <asm/cputable.h> #include <asm/sections.h> #include <asm/setup.h> +#include <asm/smp_plat.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/traps.h> @@ -240,6 +242,8 @@ static void __init request_standard_resources(void) } } +u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; + void __init setup_arch(char **cmdline_p) { setup_processor(); @@ -264,6 +268,7 @@ void __init setup_arch(char **cmdline_p) psci_init(); + cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; #ifdef CONFIG_SMP smp_init_cpus(); #endif @@ -277,6 +282,13 @@ void __init setup_arch(char **cmdline_p) #endif } +static int __init arm64_of_clk_init(void) +{ + of_clk_init(NULL); + return 0; +} +arch_initcall(arm64_of_clk_init); + static DEFINE_PER_CPU(struct cpu, cpu_data); static int __init topology_init(void) diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 7f4f367..e393174 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -549,7 +549,6 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct compat_rt_sigframe __user *frame; - compat_stack_t stack; int err = 0; frame = compat_get_sigframe(ka, regs, sizeof(*frame)); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index bdd3459..5d54e37 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -43,6 +43,7 @@ #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/processor.h> +#include <asm/smp_plat.h> #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> @@ -53,7 +54,7 @@ * where to place its SVC stack */ struct secondary_data secondary_data; -volatile unsigned long secondary_holding_pen_release = -1; +volatile unsigned long secondary_holding_pen_release = INVALID_HWID; enum ipi_msg_type { IPI_RESCHEDULE, @@ -70,7 +71,7 @@ static DEFINE_RAW_SPINLOCK(boot_lock); * in coherency or not. This is necessary for the hotplug code to work * reliably. */ -static void __cpuinit write_pen_release(int val) +static void __cpuinit write_pen_release(u64 val) { void *start = (void *)&secondary_holding_pen_release; unsigned long size = sizeof(secondary_holding_pen_release); @@ -96,7 +97,7 @@ static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) /* * Update the pen release flag. */ - write_pen_release(cpu); + write_pen_release(cpu_logical_map(cpu)); /* * Send an event, causing the secondaries to read pen_release. @@ -105,7 +106,7 @@ static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { - if (secondary_holding_pen_release == -1UL) + if (secondary_holding_pen_release == INVALID_HWID) break; udelay(10); } @@ -116,7 +117,7 @@ static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) */ raw_spin_unlock(&boot_lock); - return secondary_holding_pen_release != -1 ? -ENOSYS : 0; + return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0; } static DECLARE_COMPLETION(cpu_running); @@ -190,7 +191,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) * Let the primary processor know we're out of the * pen, then head off into the C entry point */ - write_pen_release(-1); + write_pen_release(INVALID_HWID); /* * Synchronise with the boot thread. @@ -216,7 +217,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) /* * OK, it's off to the idle thread for us */ - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } void __init smp_cpus_done(unsigned int max_cpus) @@ -244,11 +245,11 @@ static const struct smp_enable_ops *smp_enable_ops[NR_CPUS]; static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name) { - const struct smp_enable_ops *ops = enable_ops[0]; + const struct smp_enable_ops **ops = enable_ops; - while (ops) { - if (!strcmp(name, ops->name)) - return ops; + while (*ops) { + if (!strcmp(name, (*ops)->name)) + return *ops; ops++; } @@ -257,15 +258,80 @@ static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name) } /* - * Enumerate the possible CPU set from the device tree. + * Enumerate the possible CPU set from the device tree and build the + * cpu logical map array containing MPIDR values related to logical + * cpus. Assumes that cpu_logical_map(0) has already been initialized. */ void __init smp_init_cpus(void) { const char *enable_method; struct device_node *dn = NULL; - int cpu = 0; + int i, cpu = 1; + bool bootcpu_valid = false; while ((dn = of_find_node_by_type(dn, "cpu"))) { + const u32 *cell; + u64 hwid; + + /* + * A cpu node with missing "reg" property is + * considered invalid to build a cpu_logical_map + * entry. + */ + cell = of_get_property(dn, "reg", NULL); + if (!cell) { + pr_err("%s: missing reg property\n", dn->full_name); + goto next; + } + hwid = of_read_number(cell, of_n_addr_cells(dn)); + + /* + * Non affinity bits must be set to 0 in the DT + */ + if (hwid & ~MPIDR_HWID_BITMASK) { + pr_err("%s: invalid reg property\n", dn->full_name); + goto next; + } + + /* + * Duplicate MPIDRs are a recipe for disaster. Scan + * all initialized entries and check for + * duplicates. If any is found just ignore the cpu. + * cpu_logical_map was initialized to INVALID_HWID to + * avoid matching valid MPIDR values. + */ + for (i = 1; (i < cpu) && (i < NR_CPUS); i++) { + if (cpu_logical_map(i) == hwid) { + pr_err("%s: duplicate cpu reg properties in the DT\n", + dn->full_name); + goto next; + } + } + + /* + * The numbering scheme requires that the boot CPU + * must be assigned logical id 0. Record it so that + * the logical map built from DT is validated and can + * be used. + */ + if (hwid == cpu_logical_map(0)) { + if (bootcpu_valid) { + pr_err("%s: duplicate boot cpu reg property in DT\n", + dn->full_name); + goto next; + } + + bootcpu_valid = true; + + /* + * cpu_logical_map has already been + * initialized and the boot cpu doesn't need + * the enable-method so continue without + * incrementing cpu. + */ + continue; + } + if (cpu >= NR_CPUS) goto next; @@ -274,22 +340,24 @@ void __init smp_init_cpus(void) */ enable_method = of_get_property(dn, "enable-method", NULL); if (!enable_method) { - pr_err("CPU %d: missing enable-method property\n", cpu); + pr_err("%s: missing enable-method property\n", + dn->full_name); goto next; } smp_enable_ops[cpu] = smp_get_enable_ops(enable_method); if (!smp_enable_ops[cpu]) { - pr_err("CPU %d: invalid enable-method property: %s\n", - cpu, enable_method); + pr_err("%s: invalid enable-method property: %s\n", + dn->full_name, enable_method); goto next; } if (smp_enable_ops[cpu]->init_cpu(dn, cpu)) goto next; - set_cpu_possible(cpu, true); + pr_debug("cpu logical map 0x%llx\n", hwid); + cpu_logical_map(cpu) = hwid; next: cpu++; } @@ -298,6 +366,19 @@ next: if (cpu > NR_CPUS) pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n", cpu, NR_CPUS); + + if (!bootcpu_valid) { + pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n"); + return; + } + + /* + * All the cpus that made it to the cpu_logical_map have been + * validated so set them as possible cpus. + */ + for (i = 0; i < NR_CPUS; i++) + if (cpu_logical_map(i) != INVALID_HWID) + set_cpu_possible(i, true); } void __init smp_prepare_cpus(unsigned int max_cpus) diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c index 1120916..0c53330 100644 --- a/arch/arm64/kernel/smp_psci.c +++ b/arch/arm64/kernel/smp_psci.c @@ -21,6 +21,7 @@ #include <linux/smp.h> #include <asm/psci.h> +#include <asm/smp_plat.h> static int __init smp_psci_init_cpu(struct device_node *dn, int cpu) { @@ -36,7 +37,7 @@ static int __init smp_psci_prepare_cpu(int cpu) return -ENODEV; } - err = psci_ops.cpu_on(cpu, __pa(secondary_holding_pen)); + err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_holding_pen)); if (err) { pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err); return err; @@ -47,6 +48,6 @@ static int __init smp_psci_prepare_cpu(int cpu) const struct smp_enable_ops smp_psci_ops __initconst = { .name = "psci", - .init_cpu = smp_psci_init_cpu, + .init_cpu = smp_psci_init_cpu, .prepare_cpu = smp_psci_prepare_cpu, }; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index b3c5f62..61d7dd2 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -167,13 +167,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) } } -void dump_stack(void) -{ - dump_backtrace(NULL, NULL); -} - -EXPORT_SYMBOL(dump_stack); - void show_stack(struct task_struct *tsk, unsigned long *sp) { dump_backtrace(NULL, tsk); diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 2fb7f60..59acc0e 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -1,4 +1,6 @@ lib-y := bitops.o delay.o \ strncpy_from_user.o strnlen_user.o clear_user.o \ copy_from_user.o copy_to_user.o copy_in_user.o \ - copy_page.o clear_page.o + copy_page.o clear_page.o \ + memchr.o memcpy.o memmove.o memset.o \ + strchr.o strrchr.o diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S new file mode 100644 index 0000000..36216d3 --- /dev/null +++ b/arch/arm64/lib/bitops.S @@ -0,0 +1,68 @@ +/* + * Based on arch/arm/lib/bitops.h + * + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * x0: bits 5:0 bit offset + * bits 63:6 word offset + * x1: address + */ + .macro bitop, name, instr +ENTRY( \name ) + and x3, x0, #63 // Get bit offset + eor x0, x0, x3 // Clear low bits + mov x2, #1 + add x1, x1, x0, lsr #3 // Get word offset + lsl x3, x2, x3 // Create mask +1: ldxr x2, [x1] + \instr x2, x2, x3 + stxr w0, x2, [x1] + cbnz w0, 1b + ret +ENDPROC(\name ) + .endm + + .macro testop, name, instr +ENTRY( \name ) + and x3, x0, #63 // Get bit offset + eor x0, x0, x3 // Clear low bits + mov x2, #1 + add x1, x1, x0, lsr #3 // Get word offset + lsl x4, x2, x3 // Create mask +1: ldaxr x2, [x1] + lsr x0, x2, x3 // Save old value of bit + \instr x2, x2, x4 // toggle bit + stlxr w5, x2, [x1] + cbnz w5, 1b + and x0, x0, #1 +3: ret +ENDPROC(\name ) + .endm + +/* + * Atomic bit operations. + */ + bitop change_bit, eor + bitop clear_bit, bic + bitop set_bit, orr + + testop test_and_change_bit, eor + testop test_and_clear_bit, bic + testop test_and_set_bit, orr diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S new file mode 100644 index 0000000..8636b75 --- /dev/null +++ b/arch/arm64/lib/memchr.S @@ -0,0 +1,44 @@ +/* + * Based on arch/arm/lib/memchr.S + * + * Copyright (C) 1995-2000 Russell King + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * Find a character in an area of memory. + * + * Parameters: + * x0 - buf + * x1 - c + * x2 - n + * Returns: + * x0 - address of first occurrence of 'c' or 0 + */ +ENTRY(memchr) + and w1, w1, #0xff +1: subs x2, x2, #1 + b.mi 2f + ldrb w3, [x0], #1 + cmp w3, w1 + b.ne 1b + sub x0, x0, #1 + ret +2: mov x0, #0 + ret +ENDPROC(memchr) diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S new file mode 100644 index 0000000..27b5003 --- /dev/null +++ b/arch/arm64/lib/memcpy.S @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * Copy a buffer from src to dest (alignment handled by the hardware) + * + * Parameters: + * x0 - dest + * x1 - src + * x2 - n + * Returns: + * x0 - dest + */ +ENTRY(memcpy) + mov x4, x0 + subs x2, x2, #8 + b.mi 2f +1: ldr x3, [x1], #8 + subs x2, x2, #8 + str x3, [x4], #8 + b.pl 1b +2: adds x2, x2, #4 + b.mi 3f + ldr w3, [x1], #4 + sub x2, x2, #4 + str w3, [x4], #4 +3: adds x2, x2, #2 + b.mi 4f + ldrh w3, [x1], #2 + sub x2, x2, #2 + strh w3, [x4], #2 +4: adds x2, x2, #1 + b.mi 5f + ldrb w3, [x1] + strb w3, [x4] +5: ret +ENDPROC(memcpy) diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S new file mode 100644 index 0000000..b79fdfa --- /dev/null +++ b/arch/arm64/lib/memmove.S @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * Move a buffer from src to test (alignment handled by the hardware). + * If dest <= src, call memcpy, otherwise copy in reverse order. + * + * Parameters: + * x0 - dest + * x1 - src + * x2 - n + * Returns: + * x0 - dest + */ +ENTRY(memmove) + cmp x0, x1 + b.ls memcpy + add x4, x0, x2 + add x1, x1, x2 + subs x2, x2, #8 + b.mi 2f +1: ldr x3, [x1, #-8]! + subs x2, x2, #8 + str x3, [x4, #-8]! + b.pl 1b +2: adds x2, x2, #4 + b.mi 3f + ldr w3, [x1, #-4]! + sub x2, x2, #4 + str w3, [x4, #-4]! +3: adds x2, x2, #2 + b.mi 4f + ldrh w3, [x1, #-2]! + sub x2, x2, #2 + strh w3, [x4, #-2]! +4: adds x2, x2, #1 + b.mi 5f + ldrb w3, [x1, #-1] + strb w3, [x4, #-1] +5: ret +ENDPROC(memmove) diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S new file mode 100644 index 0000000..87e4a68 --- /dev/null +++ b/arch/arm64/lib/memset.S @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * Fill in the buffer with character c (alignment handled by the hardware) + * + * Parameters: + * x0 - buf + * x1 - c + * x2 - n + * Returns: + * x0 - buf + */ +ENTRY(memset) + mov x4, x0 + and w1, w1, #0xff + orr w1, w1, w1, lsl #8 + orr w1, w1, w1, lsl #16 + orr x1, x1, x1, lsl #32 + subs x2, x2, #8 + b.mi 2f +1: str x1, [x4], #8 + subs x2, x2, #8 + b.pl 1b +2: adds x2, x2, #4 + b.mi 3f + sub x2, x2, #4 + str w1, [x4], #4 +3: adds x2, x2, #2 + b.mi 4f + sub x2, x2, #2 + strh w1, [x4], #2 +4: adds x2, x2, #1 + b.mi 5f + strb w1, [x4] +5: ret +ENDPROC(memset) diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S new file mode 100644 index 0000000..dae0cf5 --- /dev/null +++ b/arch/arm64/lib/strchr.S @@ -0,0 +1,42 @@ +/* + * Based on arch/arm/lib/strchr.S + * + * Copyright (C) 1995-2000 Russell King + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * Find the first occurrence of a character in a string. + * + * Parameters: + * x0 - str + * x1 - c + * Returns: + * x0 - address of first occurrence of 'c' or 0 + */ +ENTRY(strchr) + and w1, w1, #0xff +1: ldrb w2, [x0], #1 + cmp w2, w1 + ccmp w2, wzr, #4, ne + b.ne 1b + sub x0, x0, #1 + cmp w2, w1 + csel x0, x0, xzr, eq + ret +ENDPROC(strchr) diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S new file mode 100644 index 0000000..61eabd9 --- /dev/null +++ b/arch/arm64/lib/strrchr.S @@ -0,0 +1,43 @@ +/* + * Based on arch/arm/lib/strrchr.S + * + * Copyright (C) 1995-2000 Russell King + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * Find the last occurrence of a character in a string. + * + * Parameters: + * x0 - str + * x1 - c + * Returns: + * x0 - address of last occurrence of 'c' or 0 + */ +ENTRY(strrchr) + mov x3, #0 + and w1, w1, #0xff +1: ldrb w2, [x0], #1 + cbz w2, 2f + cmp w2, w1 + b.ne 1b + sub x3, x0, #1 + b 1b +2: mov x0, x3 + ret +ENDPROC(strrchr) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index afadae6..5263817 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -57,16 +57,16 @@ void show_pte(struct mm_struct *mm, unsigned long addr) pmd_t *pmd; pte_t *pte; - if (pgd_none_or_clear_bad(pgd)) + if (pgd_none(*pgd) || pgd_bad(*pgd)) break; pud = pud_offset(pgd, addr); - if (pud_none_or_clear_bad(pud)) + if (pud_none(*pud) || pud_bad(*pud)) break; pmd = pmd_offset(pud, addr); printk(", *pmd=%016llx", pmd_val(*pmd)); - if (pmd_none_or_clear_bad(pmd)) + if (pmd_none(*pmd) || pmd_bad(*pmd)) break; pte = pte_offset_map(pmd, addr); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 800aac3..f497ca7 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -197,24 +197,6 @@ void __init bootmem_init(void) max_pfn = max_low_pfn = max; } -static inline int free_area(unsigned long pfn, unsigned long end, char *s) -{ - unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); - - for (; pfn < end; pfn++) { - struct page *page = pfn_to_page(pfn); - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - pages++; - } - - if (size && s) - pr_info("Freeing %s memory: %dK\n", s, size); - - return pages; -} - /* * Poison init memory with an undefined instruction (0x0). */ @@ -405,9 +387,7 @@ void __init mem_init(void) void free_initmem(void) { poison_init_mem(__init_begin, __init_end - __init_begin); - totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), - __phys_to_pfn(__pa(__init_end)), - "init"); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD @@ -418,9 +398,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) { poison_init_mem((void *)start, PAGE_ALIGN(end) - start); - totalram_pages += free_area(__phys_to_pfn(__pa(start)), - __phys_to_pfn(__pa(end)), - "initrd"); + free_reserved_area(start, end, 0, "initrd"); } } diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 224b44a..eeecc9c 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt, void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) { unsigned long size, mask; - bool page64k = IS_ENABLED(ARM64_64K_PAGES); + bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES); pgd_t *pgd; pud_t *pud; pmd_t *pmd; @@ -391,17 +391,14 @@ int kern_addr_valid(unsigned long addr) } #ifdef CONFIG_SPARSEMEM_VMEMMAP #ifdef CONFIG_ARM64_64K_PAGES -int __meminit vmemmap_populate(struct page *start_page, - unsigned long size, int node) +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) { - return vmemmap_populate_basepages(start_page, size, node); + return vmemmap_populate_basepages(start, end, node); } #else /* !CONFIG_ARM64_64K_PAGES */ -int __meminit vmemmap_populate(struct page *start_page, - unsigned long size, int node) +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) { - unsigned long addr = (unsigned long)start_page; - unsigned long end = (unsigned long)(start_page + size); + unsigned long addr = start; unsigned long next; pgd_t *pgd; pud_t *pud; @@ -434,7 +431,7 @@ int __meminit vmemmap_populate(struct page *start_page, return 0; } #endif /* CONFIG_ARM64_64K_PAGES */ -void vmemmap_free(struct page *memmap, unsigned long nr_pages) +void vmemmap_free(unsigned long start, unsigned long end) { } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index 9b89257..22c4030 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -7,7 +7,7 @@ config AVR32 select HAVE_OPROFILE select HAVE_KPROBES select HAVE_GENERIC_HARDIRQS - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select GENERIC_IRQ_PROBE select GENERIC_ATOMIC64 select HARDIRQS_SW_RESEND @@ -250,20 +250,7 @@ config ARCH_SUSPEND_POSSIBLE def_bool y menu "CPU Frequency scaling" - source "drivers/cpufreq/Kconfig" - -config CPU_FREQ_AT32AP - bool "CPU frequency driver for AT32AP" - depends on CPU_FREQ && PLATFORM_AT32AP - default n - help - This enables the CPU frequency driver for AT32AP processors. - - For details, take a look in <file:Documentation/cpu-freq>. - - If in doubt, say N. - endmenu endmenu diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig index f4025db..d5aff36 100644 --- a/arch/avr32/configs/atngw100_defconfig +++ b/arch/avr32/configs/atngw100_defconfig @@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig index c76a49b..4abcf43 100644 --- a/arch/avr32/configs/atngw100_evklcd100_defconfig +++ b/arch/avr32/configs/atngw100_evklcd100_defconfig @@ -28,7 +28,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig index 2d8ab08..18f3fa0 100644 --- a/arch/avr32/configs/atngw100_evklcd101_defconfig +++ b/arch/avr32/configs/atngw100_evklcd101_defconfig @@ -27,7 +27,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig index b189e0c..06e389c 100644 --- a/arch/avr32/configs/atngw100_mrmt_defconfig +++ b/arch/avr32/configs/atngw100_mrmt_defconfig @@ -23,7 +23,7 @@ CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig index 2e4de42..2518a13 100644 --- a/arch/avr32/configs/atngw100mkii_defconfig +++ b/arch/avr32/configs/atngw100mkii_defconfig @@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig index fad3cd2..245ef6b 100644 --- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig +++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig @@ -29,7 +29,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig index 2998623..fa6cbac 100644 --- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig +++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig @@ -28,7 +28,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig index a582465..bbd5131 100644 --- a/arch/avr32/configs/atstk1002_defconfig +++ b/arch/avr32/configs/atstk1002_defconfig @@ -25,7 +25,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig index 57a79df..c1cd726 100644 --- a/arch/avr32/configs/atstk1003_defconfig +++ b/arch/avr32/configs/atstk1003_defconfig @@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig index 1a49bd8..754ae56 100644 --- a/arch/avr32/configs/atstk1004_defconfig +++ b/arch/avr32/configs/atstk1004_defconfig @@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig index 206a1b6..58589d8 100644 --- a/arch/avr32/configs/atstk1006_defconfig +++ b/arch/avr32/configs/atstk1006_defconfig @@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig index 0421498..57788a4 100644 --- a/arch/avr32/configs/favr-32_defconfig +++ b/arch/avr32/configs/favr-32_defconfig @@ -27,7 +27,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig index 82f24eb..ba7c31e 100644 --- a/arch/avr32/configs/hammerhead_defconfig +++ b/arch/avr32/configs/hammerhead_defconfig @@ -31,7 +31,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig index 1bee51f..0a8bfdc 100644 --- a/arch/avr32/configs/mimc200_defconfig +++ b/arch/avr32/configs/mimc200_defconfig @@ -24,7 +24,7 @@ CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_AT32AP=y +CONFIG_AVR32_AT32AP_CPUFREQ=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h index cf60d0a..fc6483f 100644 --- a/arch/avr32/include/asm/io.h +++ b/arch/avr32/include/asm/io.h @@ -165,6 +165,10 @@ BUILDIO_IOPORT(l, u32) #define readw_be __raw_readw #define readl_be __raw_readl +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel + #define writeb_be __raw_writeb #define writew_be __raw_writew #define writel_be __raw_writel diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index fd78f58..e7b6149 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c @@ -30,18 +30,9 @@ EXPORT_SYMBOL(pm_power_off); * This file handles the architecture-dependent parts of process handling.. */ -void cpu_idle(void) +void arch_cpu_idle(void) { - /* endless idle loop with no priority at all */ - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - while (!need_resched()) - cpu_idle_sleep(); - rcu_idle_exit(); - tick_nohz_idle_exit(); - schedule_preempt_disabled(); - } + cpu_enter_idle(); } void machine_halt(void) @@ -213,14 +204,6 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) show_stack_log_lvl(tsk, (unsigned long)stack, NULL, ""); } -void dump_stack(void) -{ - unsigned long stack; - - show_trace_log_lvl(current, &stack, NULL, ""); -} -EXPORT_SYMBOL(dump_stack); - static const char *cpu_modes[] = { "Application", "Supervisor", "Interrupt level 0", "Interrupt level 1", "Interrupt level 2", "Interrupt level 3", "Exception", "NMI" @@ -232,6 +215,8 @@ void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl) unsigned long lr = regs->lr; unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT; + show_regs_print_info(log_lvl); + if (!user_mode(regs)) { sp = (unsigned long)regs + FRAME_SIZE_FULL; @@ -269,9 +254,6 @@ void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl) regs->sr & SR_I0M ? '0' : '.', regs->sr & SR_GM ? 'G' : 'g'); printk("%sCPU Mode: %s\n", log_lvl, cpu_modes[mode]); - printk("%sProcess: %s [%d] (task: %p thread: %p)\n", - log_lvl, current->comm, current->pid, current, - task_thread_info(current)); } void show_regs(struct pt_regs *regs) diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index 05ad291..869a1c6 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c @@ -12,6 +12,7 @@ #include <linux/irq.h> #include <linux/kernel.h> #include <linux/time.h> +#include <linux/cpu.h> #include <asm/sysreg.h> @@ -87,13 +88,17 @@ static void comparator_mode(enum clock_event_mode mode, pr_debug("%s: start\n", evdev->name); /* FALLTHROUGH */ case CLOCK_EVT_MODE_RESUME: - cpu_disable_idle_sleep(); + /* + * If we're using the COUNT and COMPARE registers we + * need to force idle poll. + */ + cpu_idle_poll_ctrl(true); break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: sysreg_write(COMPARE, 0); pr_debug("%s: stop\n", evdev->name); - cpu_enable_idle_sleep(); + cpu_idle_poll_ctrl(false); break; default: BUG(); diff --git a/arch/avr32/mach-at32ap/Makefile b/arch/avr32/mach-at32ap/Makefile index 514c9a9..fc09ec4 100644 --- a/arch/avr32/mach-at32ap/Makefile +++ b/arch/avr32/mach-at32ap/Makefile @@ -1,7 +1,6 @@ obj-y += pdc.o clock.o intc.o extint.o pio.o hsmc.o obj-y += hmatrix.o obj-$(CONFIG_CPU_AT32AP700X) += at32ap700x.o pm-at32ap700x.o -obj-$(CONFIG_CPU_FREQ_AT32AP) += cpufreq.o obj-$(CONFIG_PM) += pm.o ifeq ($(CONFIG_PM_DEBUG),y) diff --git a/arch/avr32/mach-at32ap/cpufreq.c b/arch/avr32/mach-at32ap/cpufreq.c deleted file mode 100644 index 18b7656..0000000 --- a/arch/avr32/mach-at32ap/cpufreq.c +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright (C) 2004-2007 Atmel Corporation - * - * Based on MIPS implementation arch/mips/kernel/time.c - * Copyright 2001 MontaVista Software Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/*#define DEBUG*/ - -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/init.h> -#include <linux/cpufreq.h> -#include <linux/io.h> -#include <linux/clk.h> -#include <linux/err.h> -#include <linux/export.h> - -static struct clk *cpuclk; - -static int at32_verify_speed(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - return 0; -} - -static unsigned int at32_get_speed(unsigned int cpu) -{ - /* No SMP support */ - if (cpu) - return 0; - return (unsigned int)((clk_get_rate(cpuclk) + 500) / 1000); -} - -static unsigned int ref_freq; -static unsigned long loops_per_jiffy_ref; - -static int at32_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct cpufreq_freqs freqs; - long freq; - - /* Convert target_freq from kHz to Hz */ - freq = clk_round_rate(cpuclk, target_freq * 1000); - - /* Check if policy->min <= new_freq <= policy->max */ - if(freq < (policy->min * 1000) || freq > (policy->max * 1000)) - return -EINVAL; - - pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); - - freqs.old = at32_get_speed(0); - freqs.new = (freq + 500) / 1000; - freqs.cpu = 0; - freqs.flags = 0; - - if (!ref_freq) { - ref_freq = freqs.old; - loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy; - } - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - if (freqs.old < freqs.new) - boot_cpu_data.loops_per_jiffy = cpufreq_scale( - loops_per_jiffy_ref, ref_freq, freqs.new); - clk_set_rate(cpuclk, freq); - if (freqs.new < freqs.old) - boot_cpu_data.loops_per_jiffy = cpufreq_scale( - loops_per_jiffy_ref, ref_freq, freqs.new); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - pr_debug("cpufreq: set frequency %lu Hz\n", freq); - - return 0; -} - -static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - - cpuclk = clk_get(NULL, "cpu"); - if (IS_ERR(cpuclk)) { - pr_debug("cpufreq: could not get CPU clk\n"); - return PTR_ERR(cpuclk); - } - - policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; - policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; - policy->cpuinfo.transition_latency = 0; - policy->cur = at32_get_speed(0); - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; - - printk("cpufreq: AT32AP CPU frequency driver\n"); - - return 0; -} - -static struct cpufreq_driver at32_driver = { - .name = "at32ap", - .owner = THIS_MODULE, - .init = at32_cpufreq_driver_init, - .verify = at32_verify_speed, - .target = at32_set_target, - .get = at32_get_speed, - .flags = CPUFREQ_STICKY, -}; - -static int __init at32_cpufreq_init(void) -{ - return cpufreq_register_driver(&at32_driver); -} -late_initcall(at32_cpufreq_init); diff --git a/arch/avr32/mach-at32ap/include/mach/pm.h b/arch/avr32/mach-at32ap/include/mach/pm.h index 979b355..f29ff2c 100644 --- a/arch/avr32/mach-at32ap/include/mach/pm.h +++ b/arch/avr32/mach-at32ap/include/mach/pm.h @@ -21,30 +21,6 @@ extern void cpu_enter_idle(void); extern void cpu_enter_standby(unsigned long sdramc_base); -extern bool disable_idle_sleep; - -static inline void cpu_disable_idle_sleep(void) -{ - disable_idle_sleep = true; -} - -static inline void cpu_enable_idle_sleep(void) -{ - disable_idle_sleep = false; -} - -static inline void cpu_idle_sleep(void) -{ - /* - * If we're using the COUNT and COMPARE registers for - * timekeeping, we can't use the IDLE state. - */ - if (disable_idle_sleep) - cpu_relax(); - else - cpu_enter_idle(); -} - void intc_set_suspend_handler(unsigned long offset); #endif diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S index f868f4c..1c8e4e6 100644 --- a/arch/avr32/mach-at32ap/pm-at32ap700x.S +++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S @@ -18,13 +18,6 @@ /* Same as 0xfff00000 but fits in a 21 bit signed immediate */ #define PM_BASE -0x100000 - .section .bss, "wa", @nobits - .global disable_idle_sleep - .type disable_idle_sleep, @object -disable_idle_sleep: - .int 4 - .size disable_idle_sleep, . - disable_idle_sleep - /* Keep this close to the irq handlers */ .section .irq.text, "ax", @progbits diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c index 2798c2d..e66e840 100644 --- a/arch/avr32/mm/init.c +++ b/arch/avr32/mm/init.c @@ -146,34 +146,14 @@ void __init mem_init(void) initsize >> 10); } -static inline void free_area(unsigned long addr, unsigned long end, char *s) -{ - unsigned int size = (end - addr) >> 10; - - for (; addr < end; addr += PAGE_SIZE) { - struct page *page = virt_to_page(addr); - ClearPageReserved(page); - init_page_count(page); - free_page(addr); - totalram_pages++; - } - - if (size && s) - printk(KERN_INFO "Freeing %s memory: %dK (%lx - %lx)\n", - s, size, end - (size << 10), end); -} - void free_initmem(void) { - free_area((unsigned long)__init_begin, (unsigned long)__init_end, - "init"); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD - void free_initrd_mem(unsigned long start, unsigned long end) { - free_area(start, end, "initrd"); + free_reserved_area(start, end, 0, "initrd"); } - #endif diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 600494c..c3f2e0b 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -33,7 +33,7 @@ config BLACKFIN select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_WANT_OPTIONAL_GPIOLIB select HAVE_UID16 - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select ARCH_WANT_IPC_PARSE_VERSION select HAVE_GENERIC_HARDIRQS select GENERIC_ATOMIC64 diff --git a/arch/blackfin/include/asm/bfin_sport3.h b/arch/blackfin/include/asm/bfin_sport3.h index 03c0022..d82f5fa 100644 --- a/arch/blackfin/include/asm/bfin_sport3.h +++ b/arch/blackfin/include/asm/bfin_sport3.h @@ -41,7 +41,7 @@ #define SPORT_CTL_LAFS 0x00020000 /* Late Transmit frame select */ #define SPORT_CTL_RJUST 0x00040000 /* Right Justified mode select */ #define SPORT_CTL_FSED 0x00080000 /* External frame sync edge select */ -#define SPORT_CTL_TFIEN 0x00100000 /* Transmit finish interrrupt enable select */ +#define SPORT_CTL_TFIEN 0x00100000 /* Transmit finish interrupt enable select */ #define SPORT_CTL_GCLKEN 0x00200000 /* Gated clock mode select */ #define SPORT_CTL_SPENSEC 0x01000000 /* Enable secondary channel */ #define SPORT_CTL_SPTRAN 0x02000000 /* Data direction control */ diff --git a/arch/blackfin/kernel/dumpstack.c b/arch/blackfin/kernel/dumpstack.c index 5cfbaa2..95ba6d9 100644 --- a/arch/blackfin/kernel/dumpstack.c +++ b/arch/blackfin/kernel/dumpstack.c @@ -168,6 +168,7 @@ void dump_stack(void) #endif trace_buffer_save(tflags); dump_bfin_trace_buffer(); + dump_stack_print_info(KERN_DEFAULT); show_stack(current, &stack); trace_buffer_restore(tflags); } diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c index 84ed837..61fbd2d 100644 --- a/arch/blackfin/kernel/early_printk.c +++ b/arch/blackfin/kernel/early_printk.c @@ -25,8 +25,6 @@ extern struct console *bfin_earlyserial_init(unsigned int port, extern struct console *bfin_jc_early_init(void); #endif -static struct console *early_console; - /* Default console */ #define DEFAULT_PORT 0 #define DEFAULT_CFLAG CS8|B57600 diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 9782c03..4aa5545 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c @@ -46,15 +46,14 @@ EXPORT_SYMBOL(pm_power_off); * The idle loop on BFIN */ #ifdef CONFIG_IDLE_L1 -static void default_idle(void)__attribute__((l1_text)); -void cpu_idle(void)__attribute__((l1_text)); +void arch_cpu_idle(void)__attribute__((l1_text)); #endif /* * This is our default idle handler. We need to disable * interrupts here to ensure we don't miss a wakeup call. */ -static void default_idle(void) +void arch_cpu_idle(void) { #ifdef CONFIG_IPIPE ipipe_suspend_domain(); @@ -66,31 +65,12 @@ static void default_idle(void) hard_local_irq_enable(); } -/* - * The idle thread. We try to conserve power, while trying to keep - * overall latency low. The architecture specific idle is passed - * a value to indicate the level of "idleness" of the system. - */ -void cpu_idle(void) -{ - /* endless idle loop with no priority at all */ - while (1) { - #ifdef CONFIG_HOTPLUG_CPU - if (cpu_is_offline(smp_processor_id())) - cpu_die(); -#endif - tick_nohz_idle_enter(); - rcu_idle_enter(); - while (!need_resched()) - default_idle(); - rcu_idle_exit(); - tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); - } +void arch_cpu_idle_dead(void) +{ + cpu_die(); } +#endif /* * Do necessary setup to start up a newly executed thread. diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c index f7f7a18..c36efa0 100644 --- a/arch/blackfin/kernel/trace.c +++ b/arch/blackfin/kernel/trace.c @@ -853,6 +853,8 @@ void show_regs(struct pt_regs *fp) unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); pr_notice("\n"); + show_regs_print_info(KERN_NOTICE); + if (CPUID != bfin_cpuid()) pr_notice("Compiled for cpu family 0x%04x (Rev %d), " "but running on:0x%04x (Rev %d)\n", diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c index 61c1f47..97d7016 100644 --- a/arch/blackfin/mach-bf609/boards/ezkit.c +++ b/arch/blackfin/mach-bf609/boards/ezkit.c @@ -936,19 +936,19 @@ static struct v4l2_input adv7842_inputs[] = { .index = 2, .name = "Component", .type = V4L2_INPUT_TYPE_CAMERA, - .capabilities = V4L2_IN_CAP_CUSTOM_TIMINGS, + .capabilities = V4L2_IN_CAP_DV_TIMINGS, }, { .index = 3, .name = "VGA", .type = V4L2_INPUT_TYPE_CAMERA, - .capabilities = V4L2_IN_CAP_CUSTOM_TIMINGS, + .capabilities = V4L2_IN_CAP_DV_TIMINGS, }, { .index = 4, .name = "HDMI", .type = V4L2_INPUT_TYPE_CAMERA, - .capabilities = V4L2_IN_CAP_CUSTOM_TIMINGS, + .capabilities = V4L2_IN_CAP_DV_TIMINGS, }, }; @@ -1074,7 +1074,7 @@ static struct v4l2_output adv7511_outputs[] = { .index = 0, .name = "HDMI", .type = V4L2_INPUT_TYPE_CAMERA, - .capabilities = V4L2_OUT_CAP_CUSTOM_TIMINGS, + .capabilities = V4L2_OUT_CAP_DV_TIMINGS, }, }; diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile index 75f0ba2..675466d 100644 --- a/arch/blackfin/mach-common/Makefile +++ b/arch/blackfin/mach-common/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_PM) += pm.o ifneq ($(CONFIG_BF60x),y) obj-$(CONFIG_PM) += dpmc_modes.o endif -obj-$(CONFIG_CPU_FREQ) += cpufreq.o obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_BFIN_KERNEL_CLOCK) += clocks-init.o diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c deleted file mode 100644 index d88bd31..0000000 --- a/arch/blackfin/mach-common/cpufreq.c +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Blackfin core clock scaling - * - * Copyright 2008-2011 Analog Devices Inc. - * - * Licensed under the GPL-2 or later. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/types.h> -#include <linux/init.h> -#include <linux/clk.h> -#include <linux/cpufreq.h> -#include <linux/fs.h> -#include <linux/delay.h> -#include <asm/blackfin.h> -#include <asm/time.h> -#include <asm/dpmc.h> - - -/* this is the table of CCLK frequencies, in Hz */ -/* .index is the entry in the auxiliary dpm_state_table[] */ -static struct cpufreq_frequency_table bfin_freq_table[] = { - { - .frequency = CPUFREQ_TABLE_END, - .index = 0, - }, - { - .frequency = CPUFREQ_TABLE_END, - .index = 1, - }, - { - .frequency = CPUFREQ_TABLE_END, - .index = 2, - }, - { - .frequency = CPUFREQ_TABLE_END, - .index = 0, - }, -}; - -static struct bfin_dpm_state { - unsigned int csel; /* system clock divider */ - unsigned int tscale; /* change the divider on the core timer interrupt */ -} dpm_state_table[3]; - -#if defined(CONFIG_CYCLES_CLOCKSOURCE) -/* - * normalized to maximum frequency offset for CYCLES, - * used in time-ts cycles clock source, but could be used - * somewhere also. - */ -unsigned long long __bfin_cycles_off; -unsigned int __bfin_cycles_mod; -#endif - -/**************************************************************************/ -static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk) -{ - - unsigned long csel, min_cclk; - int index; - - /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */ -#if ANOMALY_05000273 || ANOMALY_05000274 || \ - (!(defined(CONFIG_BF54x) || defined(CONFIG_BF60x)) \ - && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE)) - min_cclk = sclk * 2; -#else - min_cclk = sclk; -#endif - -#ifndef CONFIG_BF60x - csel = ((bfin_read_PLL_DIV() & CSEL) >> 4); -#else - csel = bfin_read32(CGU0_DIV) & 0x1F; -#endif - - for (index = 0; (cclk >> index) >= min_cclk && csel <= 3 && index < 3; index++, csel++) { - bfin_freq_table[index].frequency = cclk >> index; -#ifndef CONFIG_BF60x - dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */ -#else - dpm_state_table[index].csel = csel; -#endif - dpm_state_table[index].tscale = (TIME_SCALE >> index) - 1; - - pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n", - bfin_freq_table[index].frequency, - dpm_state_table[index].csel, - dpm_state_table[index].tscale); - } - return; -} - -static void bfin_adjust_core_timer(void *info) -{ - unsigned int tscale; - unsigned int index = *(unsigned int *)info; - - /* we have to adjust the core timer, because it is using cclk */ - tscale = dpm_state_table[index].tscale; - bfin_write_TSCALE(tscale); - return; -} - -static unsigned int bfin_getfreq_khz(unsigned int cpu) -{ - /* Both CoreA/B have the same core clock */ - return get_cclk() / 1000; -} - -#ifdef CONFIG_BF60x -unsigned long cpu_set_cclk(int cpu, unsigned long new) -{ - struct clk *clk; - int ret; - - clk = clk_get(NULL, "CCLK"); - if (IS_ERR(clk)) - return -ENODEV; - - ret = clk_set_rate(clk, new); - clk_put(clk); - return ret; -} -#endif - -static int bfin_target(struct cpufreq_policy *poli, - unsigned int target_freq, unsigned int relation) -{ -#ifndef CONFIG_BF60x - unsigned int plldiv; -#endif - unsigned int index, cpu; - unsigned long cclk_hz; - struct cpufreq_freqs freqs; - static unsigned long lpj_ref; - static unsigned int lpj_ref_freq; - int ret = 0; - -#if defined(CONFIG_CYCLES_CLOCKSOURCE) - cycles_t cycles; -#endif - - for_each_online_cpu(cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - - if (!policy) - continue; - - if (cpufreq_frequency_table_target(policy, bfin_freq_table, - target_freq, relation, &index)) - return -EINVAL; - - cclk_hz = bfin_freq_table[index].frequency; - - freqs.old = bfin_getfreq_khz(0); - freqs.new = cclk_hz; - freqs.cpu = cpu; - - pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", - cclk_hz, target_freq, freqs.old); - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - if (cpu == CPUFREQ_CPU) { -#ifndef CONFIG_BF60x - plldiv = (bfin_read_PLL_DIV() & SSEL) | - dpm_state_table[index].csel; - bfin_write_PLL_DIV(plldiv); -#else - ret = cpu_set_cclk(cpu, freqs.new * 1000); - if (ret != 0) { - WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret); - break; - } -#endif - on_each_cpu(bfin_adjust_core_timer, &index, 1); -#if defined(CONFIG_CYCLES_CLOCKSOURCE) - cycles = get_cycles(); - SSYNC(); - cycles += 10; /* ~10 cycles we lose after get_cycles() */ - __bfin_cycles_off += - (cycles << __bfin_cycles_mod) - (cycles << index); - __bfin_cycles_mod = index; -#endif - if (!lpj_ref_freq) { - lpj_ref = loops_per_jiffy; - lpj_ref_freq = freqs.old; - } - if (freqs.new != freqs.old) { - loops_per_jiffy = cpufreq_scale(lpj_ref, - lpj_ref_freq, freqs.new); - } - } - /* TODO: just test case for cycles clock source, remove later */ - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - } - - pr_debug("cpufreq: done\n"); - return ret; -} - -static int bfin_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, bfin_freq_table); -} - -static int __bfin_cpu_init(struct cpufreq_policy *policy) -{ - - unsigned long cclk, sclk; - - cclk = get_cclk() / 1000; - sclk = get_sclk() / 1000; - - if (policy->cpu == CPUFREQ_CPU) - bfin_init_tables(cclk, sclk); - - policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ - - policy->cur = cclk; - cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu); - return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table); -} - -static struct freq_attr *bfin_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver bfin_driver = { - .verify = bfin_verify_speed, - .target = bfin_target, - .get = bfin_getfreq_khz, - .init = __bfin_cpu_init, - .name = "bfin cpufreq", - .owner = THIS_MODULE, - .attr = bfin_freq_attr, -}; - -static int __init bfin_cpu_init(void) -{ - return cpufreq_register_driver(&bfin_driver); -} - -static void __exit bfin_cpu_exit(void) -{ - cpufreq_unregister_driver(&bfin_driver); -} - -MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); -MODULE_DESCRIPTION("cpufreq driver for Blackfin"); -MODULE_LICENSE("GPL"); - -module_init(bfin_cpu_init); -module_exit(bfin_cpu_exit); diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index bb61ae4..1bc2ce6 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -335,7 +335,7 @@ void __cpuinit secondary_start_kernel(void) */ calibrate_delay(); - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } void __init smp_prepare_boot_cpu(void) diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c index 9cb8553..82d01a7 100644 --- a/arch/blackfin/mm/init.c +++ b/arch/blackfin/mm/init.c @@ -103,7 +103,7 @@ void __init mem_init(void) max_mapnr = num_physpages = MAP_NR(high_memory); printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages); - /* This will put all memory onto the freelists. */ + /* This will put all low memory onto the freelists. */ totalram_pages = free_all_bootmem(); reservedpages = 0; @@ -129,24 +129,11 @@ void __init mem_init(void) initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10))); } -static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr; - /* next to check that the page we free is not a partial page */ - for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); -} - #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { #ifndef CONFIG_MPU - free_init_pages("initrd memory", start, end); + free_reserved_area(start, end, 0, "initrd"); #endif } #endif @@ -154,10 +141,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) void __init_refok free_initmem(void) { #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU - free_init_pages("unused kernel memory", - (unsigned long)(&__init_begin), - (unsigned long)(&__init_end)); - + free_initmem_default(0); if (memory_start == (unsigned long)(&__init_end)) memory_start = (unsigned long)(&__init_begin); #endif diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h index cf78e09..2c71d56 100644 --- a/arch/c6x/include/asm/irqflags.h +++ b/arch/c6x/include/asm/irqflags.h @@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void) /* set interrupt enabled status */ static inline void arch_local_irq_restore(unsigned long flags) { - asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags)); + asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory"); } /* unconditionally enable interrupts */ diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c index 6434df4..57d2ea8 100644 --- a/arch/c6x/kernel/process.c +++ b/arch/c6x/kernel/process.c @@ -33,7 +33,7 @@ extern asmlinkage void ret_from_kernel_thread(void); void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); -static void c6x_idle(void) +void arch_cpu_idle(void) { unsigned long tmp; @@ -49,32 +49,6 @@ static void c6x_idle(void) : "=b"(tmp)); } -/* - * The idle loop for C64x - */ -void cpu_idle(void) -{ - /* endless idle loop with no priority at all */ - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - while (1) { - local_irq_disable(); - if (need_resched()) { - local_irq_enable(); - break; - } - c6x_idle(); /* enables local irqs */ - } - rcu_idle_exit(); - tick_nohz_idle_exit(); - - preempt_enable_no_resched(); - schedule(); - preempt_disable(); - } -} - static void halt_loop(void) { printk(KERN_EMERG "System Halted, OK to turn off power\n"); diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c index 1be74e5..dcc2c2f 100644 --- a/arch/c6x/kernel/traps.c +++ b/arch/c6x/kernel/traps.c @@ -31,6 +31,7 @@ void __init trap_init(void) void show_regs(struct pt_regs *regs) { pr_err("\n"); + show_regs_print_info(KERN_ERR); pr_err("PC: %08lx SP: %08lx\n", regs->pc, regs->sp); pr_err("Status: %08lx ORIG_A4: %08lx\n", regs->csr, regs->orig_a4); pr_err("A0: %08lx B0: %08lx\n", regs->a0, regs->b0); @@ -67,15 +68,6 @@ void show_regs(struct pt_regs *regs) pr_err("A31: %08lx B31: %08lx\n", regs->a31, regs->b31); } -void dump_stack(void) -{ - unsigned long stack; - - show_stack(current, &stack); -} -EXPORT_SYMBOL(dump_stack); - - void die(char *str, struct pt_regs *fp, int nr) { console_verbose(); diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c index 89395f0..a9fcd89 100644 --- a/arch/c6x/mm/init.c +++ b/arch/c6x/mm/init.c @@ -77,37 +77,11 @@ void __init mem_init(void) #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - int pages = 0; - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - pages++; - } - printk(KERN_INFO "Freeing initrd memory: %luk freed\n", - (pages * PAGE_SIZE) >> 10); + free_reserved_area(start, end, 0, "initrd"); } #endif void __init free_initmem(void) { - unsigned long addr; - - /* - * The following code should be cool even if these sections - * are not page aligned. - */ - addr = PAGE_ALIGN((unsigned long)(__init_begin)); - - /* next to check that the page we free is not a partial page */ - for (; addr + PAGE_SIZE < (unsigned long)(__init_end); - addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing unused kernel memory: %dK freed\n", - (int) ((addr - PAGE_ALIGN((long) &__init_begin)) >> 10)); + free_initmem_default(0); } diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index bb0ac66..06dd026 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig @@ -43,7 +43,7 @@ config CRIS select GENERIC_ATOMIC64 select HAVE_GENERIC_HARDIRQS select HAVE_UID16 - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_IRQ_SHOW select GENERIC_IOMAP diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c index b101875..753e9a0 100644 --- a/arch/cris/arch-v10/kernel/process.c +++ b/arch/cris/arch-v10/kernel/process.c @@ -30,8 +30,9 @@ void etrax_gpio_wake_up_check(void); /* drivers/gpio.c */ void default_idle(void) { #ifdef CONFIG_ETRAX_GPIO - etrax_gpio_wake_up_check(); + etrax_gpio_wake_up_check(); #endif + local_irq_enable(); } /* @@ -175,6 +176,9 @@ unsigned long get_wchan(struct task_struct *p) void show_regs(struct pt_regs * regs) { unsigned long usp = rdusp(); + + show_regs_print_info(KERN_DEFAULT); + printk("IRP: %08lx SRP: %08lx DCCR: %08lx USP: %08lx MOF: %08lx\n", regs->irp, regs->srp, regs->dccr, usp, regs->mof ); printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n", diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c index 2b23ef0..cebd32e 100644 --- a/arch/cris/arch-v32/kernel/process.c +++ b/arch/cris/arch-v32/kernel/process.c @@ -20,18 +20,12 @@ extern void stop_watchdog(void); -extern int cris_hlt_counter; - /* We use this if we don't have any better idle routine. */ void default_idle(void) { - local_irq_disable(); - if (!need_resched() && !cris_hlt_counter) { - /* Halt until exception. */ - __asm__ volatile("ei \n\t" - "halt "); - } - local_irq_enable(); + /* Halt until exception. */ + __asm__ volatile("ei \n\t" + "halt "); } /* @@ -170,6 +164,9 @@ get_wchan(struct task_struct *p) void show_regs(struct pt_regs * regs) { unsigned long usp = rdusp(); + + show_regs_print_info(KERN_DEFAULT); + printk("ERP: %08lx SRP: %08lx CCS: %08lx USP: %08lx MOF: %08lx\n", regs->erp, regs->srp, regs->ccs, usp, regs->mof); diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index 04a16ed..cdd1202 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c @@ -145,8 +145,6 @@ smp_boot_one_cpu(int cpuid, struct task_struct idle) * specific stuff such as the local timer and the MMU. */ void __init smp_callin(void) { - extern void cpu_idle(void); - int cpu = cpu_now_booting; reg_intr_vect_rw_mask vect_mask = {0}; @@ -170,7 +168,7 @@ void __init smp_callin(void) local_irq_enable(); set_cpu_online(cpu, true); - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } /* Stop execution on this CPU.*/ diff --git a/arch/cris/arch-v32/mach-a3/Makefile b/arch/cris/arch-v32/mach-a3/Makefile index d366e08..18a2271 100644 --- a/arch/cris/arch-v32/mach-a3/Makefile +++ b/arch/cris/arch-v32/mach-a3/Makefile @@ -3,7 +3,6 @@ # obj-y := dma.o pinmux.o io.o arbiter.o -obj-$(CONFIG_CPU_FREQ) += cpufreq.o clean: diff --git a/arch/cris/arch-v32/mach-a3/cpufreq.c b/arch/cris/arch-v32/mach-a3/cpufreq.c deleted file mode 100644 index ee391ec..0000000 --- a/arch/cris/arch-v32/mach-a3/cpufreq.c +++ /dev/null @@ -1,152 +0,0 @@ -#include <linux/init.h> -#include <linux/module.h> -#include <linux/cpufreq.h> -#include <hwregs/reg_map.h> -#include <hwregs/reg_rdwr.h> -#include <hwregs/clkgen_defs.h> -#include <hwregs/ddr2_defs.h> - -static int -cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, - void *data); - -static struct notifier_block cris_sdram_freq_notifier_block = { - .notifier_call = cris_sdram_freq_notifier -}; - -static struct cpufreq_frequency_table cris_freq_table[] = { - {0x01, 6000}, - {0x02, 200000}, - {0, CPUFREQ_TABLE_END}, -}; - -static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) -{ - reg_clkgen_rw_clk_ctrl clk_ctrl; - clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); - return clk_ctrl.pll ? 200000 : 6000; -} - -static void cris_freq_set_cpu_state(unsigned int state) -{ - int i = 0; - struct cpufreq_freqs freqs; - reg_clkgen_rw_clk_ctrl clk_ctrl; - clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); - -#ifdef CONFIG_SMP - for_each_present_cpu(i) -#endif - { - freqs.old = cris_freq_get_cpu_frequency(i); - freqs.new = cris_freq_table[state].frequency; - freqs.cpu = i; - } - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - local_irq_disable(); - - /* Even though we may be SMP they will share the same clock - * so all settings are made on CPU0. */ - if (cris_freq_table[state].frequency == 200000) - clk_ctrl.pll = 1; - else - clk_ctrl.pll = 0; - REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl); - - local_irq_enable(); - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); -}; - -static int cris_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); -} - -static int cris_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int newstate = 0; - - if (cpufreq_frequency_table_target(policy, cris_freq_table, - target_freq, relation, &newstate)) - return -EINVAL; - - cris_freq_set_cpu_state(newstate); - - return 0; -} - -static int cris_freq_cpu_init(struct cpufreq_policy *policy) -{ - int result; - - /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 1000000; /* 1ms */ - policy->cur = cris_freq_get_cpu_frequency(0); - - result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); - if (result) - return (result); - - cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); - - return 0; -} - - -static int cris_freq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - - -static struct freq_attr *cris_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver cris_freq_driver = { - .get = cris_freq_get_cpu_frequency, - .verify = cris_freq_verify, - .target = cris_freq_target, - .init = cris_freq_cpu_init, - .exit = cris_freq_cpu_exit, - .name = "cris_freq", - .owner = THIS_MODULE, - .attr = cris_freq_attr, -}; - -static int __init cris_freq_init(void) -{ - int ret; - ret = cpufreq_register_driver(&cris_freq_driver); - cpufreq_register_notifier(&cris_sdram_freq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - return ret; -} - -static int -cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, - void *data) -{ - int i; - struct cpufreq_freqs *freqs = data; - if (val == CPUFREQ_PRECHANGE) { - reg_ddr2_rw_cfg cfg = - REG_RD(ddr2, regi_ddr2_ctrl, rw_cfg); - cfg.ref_interval = (freqs->new == 200000 ? 1560 : 46); - - if (freqs->new == 200000) - for (i = 0; i < 50000; i++); - REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing); - } - return 0; -} - - -module_init(cris_freq_init); diff --git a/arch/cris/arch-v32/mach-fs/Makefile b/arch/cris/arch-v32/mach-fs/Makefile index d366e08..18a2271 100644 --- a/arch/cris/arch-v32/mach-fs/Makefile +++ b/arch/cris/arch-v32/mach-fs/Makefile @@ -3,7 +3,6 @@ # obj-y := dma.o pinmux.o io.o arbiter.o -obj-$(CONFIG_CPU_FREQ) += cpufreq.o clean: diff --git a/arch/cris/arch-v32/mach-fs/cpufreq.c b/arch/cris/arch-v32/mach-fs/cpufreq.c deleted file mode 100644 index d92cf70..0000000 --- a/arch/cris/arch-v32/mach-fs/cpufreq.c +++ /dev/null @@ -1,145 +0,0 @@ -#include <linux/init.h> -#include <linux/module.h> -#include <linux/cpufreq.h> -#include <hwregs/reg_map.h> -#include <arch/hwregs/reg_rdwr.h> -#include <arch/hwregs/config_defs.h> -#include <arch/hwregs/bif_core_defs.h> - -static int -cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, - void *data); - -static struct notifier_block cris_sdram_freq_notifier_block = { - .notifier_call = cris_sdram_freq_notifier -}; - -static struct cpufreq_frequency_table cris_freq_table[] = { - {0x01, 6000}, - {0x02, 200000}, - {0, CPUFREQ_TABLE_END}, -}; - -static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) -{ - reg_config_rw_clk_ctrl clk_ctrl; - clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); - return clk_ctrl.pll ? 200000 : 6000; -} - -static void cris_freq_set_cpu_state(unsigned int state) -{ - int i; - struct cpufreq_freqs freqs; - reg_config_rw_clk_ctrl clk_ctrl; - clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); - - for_each_possible_cpu(i) { - freqs.old = cris_freq_get_cpu_frequency(i); - freqs.new = cris_freq_table[state].frequency; - freqs.cpu = i; - } - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - local_irq_disable(); - - /* Even though we may be SMP they will share the same clock - * so all settings are made on CPU0. */ - if (cris_freq_table[state].frequency == 200000) - clk_ctrl.pll = 1; - else - clk_ctrl.pll = 0; - REG_WR(config, regi_config, rw_clk_ctrl, clk_ctrl); - - local_irq_enable(); - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); -}; - -static int cris_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); -} - -static int cris_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) -{ - unsigned int newstate = 0; - - if (cpufreq_frequency_table_target - (policy, cris_freq_table, target_freq, relation, &newstate)) - return -EINVAL; - - cris_freq_set_cpu_state(newstate); - - return 0; -} - -static int cris_freq_cpu_init(struct cpufreq_policy *policy) -{ - int result; - - /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 1000000; /* 1ms */ - policy->cur = cris_freq_get_cpu_frequency(0); - - result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); - if (result) - return (result); - - cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); - - return 0; -} - -static int cris_freq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static struct freq_attr *cris_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver cris_freq_driver = { - .get = cris_freq_get_cpu_frequency, - .verify = cris_freq_verify, - .target = cris_freq_target, - .init = cris_freq_cpu_init, - .exit = cris_freq_cpu_exit, - .name = "cris_freq", - .owner = THIS_MODULE, - .attr = cris_freq_attr, -}; - -static int __init cris_freq_init(void) -{ - int ret; - ret = cpufreq_register_driver(&cris_freq_driver); - cpufreq_register_notifier(&cris_sdram_freq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - return ret; -} - -static int -cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, - void *data) -{ - int i; - struct cpufreq_freqs *freqs = data; - if (val == CPUFREQ_PRECHANGE) { - reg_bif_core_rw_sdram_timing timing = - REG_RD(bif_core, regi_bif_core, rw_sdram_timing); - timing.cpd = (freqs->new == 200000 ? 0 : 1); - - if (freqs->new == 200000) - for (i = 0; i < 50000; i++) ; - REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing); - } - return 0; -} - -module_init(cris_freq_init); diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h index 675823f..c0a29b9 100644 --- a/arch/cris/include/asm/processor.h +++ b/arch/cris/include/asm/processor.h @@ -65,13 +65,6 @@ static inline void release_thread(struct task_struct *dead_task) #define cpu_relax() barrier() -/* - * disable hlt during certain critical i/o operations - */ -#define HAVE_DISABLE_HLT -void disable_hlt(void); -void enable_hlt(void); - void default_idle(void); #endif /* __ASM_CRIS_PROCESSOR_H */ diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c index 104ff4d..b78498e 100644 --- a/arch/cris/kernel/process.c +++ b/arch/cris/kernel/process.c @@ -29,59 +29,14 @@ //#define DEBUG -/* - * The hlt_counter, disable_hlt and enable_hlt is just here as a hook if - * there would ever be a halt sequence (for power save when idle) with - * some largish delay when halting or resuming *and* a driver that can't - * afford that delay. The hlt_counter would then be checked before - * executing the halt sequence, and the driver marks the unhaltable - * region by enable_hlt/disable_hlt. - */ - -int cris_hlt_counter=0; - -void disable_hlt(void) -{ - cris_hlt_counter++; -} - -EXPORT_SYMBOL(disable_hlt); - -void enable_hlt(void) -{ - cris_hlt_counter--; -} - -EXPORT_SYMBOL(enable_hlt); - extern void default_idle(void); void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); -/* - * The idle thread. There's no useful work to be - * done, so just try to conserve power and have a - * low exit latency (ie sit in a loop waiting for - * somebody to say that they'd like to reschedule) - */ - -void cpu_idle (void) +void arch_cpu_idle(void) { - /* endless idle loop with no priority at all */ - while (1) { - rcu_idle_enter(); - while (!need_resched()) { - /* - * Mark this as an RCU critical section so that - * synchronize_kernel() in the unload path waits - * for our completion. - */ - default_idle(); - } - rcu_idle_exit(); - schedule_preempt_disabled(); - } + default_idle(); } void hard_reset_now (void); diff --git a/arch/cris/kernel/traps.c b/arch/cris/kernel/traps.c index a11ad32..0ffda73 100644 --- a/arch/cris/kernel/traps.c +++ b/arch/cris/kernel/traps.c @@ -147,13 +147,6 @@ show_stack(void) #endif void -dump_stack(void) -{ - show_stack(NULL, NULL); -} -EXPORT_SYMBOL(dump_stack); - -void set_nmi_handler(void (*handler)(struct pt_regs *)) { nmi_handler = handler; diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c index d72ab58..9ac8094 100644 --- a/arch/cris/mm/init.c +++ b/arch/cris/mm/init.c @@ -12,12 +12,10 @@ #include <linux/init.h> #include <linux/bootmem.h> #include <asm/tlb.h> +#include <asm/sections.h> unsigned long empty_zero_page; -extern char _stext, _edata, _etext; /* From linkerscript */ -extern char __init_begin, __init_end; - void __init mem_init(void) { @@ -67,15 +65,5 @@ mem_init(void) void free_initmem(void) { - unsigned long addr; - - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk (KERN_INFO "Freeing unused kernel memory: %luk freed\n", - (unsigned long)((&__init_end - &__init_begin) >> 10)); + free_initmem_default(0); } diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 12369b1..2ce731f 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -6,7 +6,7 @@ config FRV select HAVE_PERF_EVENTS select HAVE_UID16 select HAVE_GENERIC_HARDIRQS - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select GENERIC_IRQ_SHOW select HAVE_DEBUG_BUGVERBOSE select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c index 23916b2..5d40aeb77 100644 --- a/arch/frv/kernel/process.c +++ b/arch/frv/kernel/process.c @@ -59,29 +59,12 @@ static void core_sleep_idle(void) mb(); } -void (*idle)(void) = core_sleep_idle; - -/* - * The idle thread. There's no useful work to be - * done, so just try to conserve power and have a - * low exit latency (ie sit in a loop waiting for - * somebody to say that they'd like to reschedule) - */ -void cpu_idle(void) +void arch_cpu_idle(void) { - /* endless idle loop with no priority at all */ - while (1) { - rcu_idle_enter(); - while (!need_resched()) { - check_pgt_cache(); - - if (!frv_dma_inprogress && idle) - idle(); - } - rcu_idle_exit(); - - schedule_preempt_disabled(); - } + if (!frv_dma_inprogress) + core_sleep_idle(); + else + local_irq_enable(); } void machine_restart(char * __unused) diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c index 5cfd142..4bff48c 100644 --- a/arch/frv/kernel/traps.c +++ b/arch/frv/kernel/traps.c @@ -466,17 +466,6 @@ asmlinkage void compound_exception(unsigned long esfr1, BUG(); } /* end compound_exception() */ -/*****************************************************************************/ -/* - * The architecture-independent backtrace generator - */ -void dump_stack(void) -{ - show_stack(NULL, NULL); -} - -EXPORT_SYMBOL(dump_stack); - void show_stack(struct task_struct *task, unsigned long *sp) { } @@ -508,6 +497,7 @@ void show_regs(struct pt_regs *regs) int loop; printk("\n"); + show_regs_print_info(KERN_DEFAULT); printk("Frame: @%08lx [%s]\n", (unsigned long) regs, @@ -522,8 +512,6 @@ void show_regs(struct pt_regs *regs) else printk(" | "); } - - printk("Process %s (pid: %d)\n", current->comm, current->pid); } void die_if_kernel(const char *str, ...) diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c index 92e97b0..dee354f 100644 --- a/arch/frv/mm/init.c +++ b/arch/frv/mm/init.c @@ -122,7 +122,7 @@ void __init mem_init(void) #endif int codek = 0, datak = 0; - /* this will put all memory onto the freelists */ + /* this will put all low memory onto the freelists */ totalram_pages = free_all_bootmem(); #ifdef CONFIG_MMU @@ -131,14 +131,8 @@ void __init mem_init(void) datapages++; #ifdef CONFIG_HIGHMEM - for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) { - struct page *page = &mem_map[pfn]; - - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalram_pages++; - } + for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) + free_highmem_page(&mem_map[pfn]); #endif codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10; @@ -168,21 +162,7 @@ void __init mem_init(void) void free_initmem(void) { #if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL) - unsigned long start, end, addr; - - start = PAGE_ALIGN((unsigned long) &__init_begin); /* round up */ - end = ((unsigned long) &__init_end) & PAGE_MASK; /* round down */ - - /* next to check that the page we free is not a partial page */ - for (addr = start; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - - printk("Freeing unused kernel memory: %ldKiB freed (0x%lx - 0x%lx)\n", - (end - start) >> 10, start, end); + free_initmem_default(0); #endif } /* end free_initmem() */ @@ -193,14 +173,6 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - int pages = 0; - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - pages++; - } - printk("Freeing initrd memory: %dKiB freed\n", (pages * PAGE_SIZE) >> 10); + free_reserved_area(start, end, 0, "initrd"); } /* end free_initrd_mem() */ #endif diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index ae8551e..79250de 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -5,7 +5,7 @@ config H8300 select HAVE_GENERIC_HARDIRQS select GENERIC_ATOMIC64 select HAVE_UID16 - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index b609f63..1a744ab 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c @@ -53,40 +53,13 @@ asmlinkage void ret_from_kernel_thread(void); * The idle loop on an H8/300.. */ #if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM) -static void default_idle(void) +void arch_cpu_idle(void) { - local_irq_disable(); - if (!need_resched()) { - local_irq_enable(); - /* XXX: race here! What if need_resched() gets set now? */ - __asm__("sleep"); - } else - local_irq_enable(); -} -#else -static void default_idle(void) -{ - cpu_relax(); + local_irq_enable(); + /* XXX: race here! What if need_resched() gets set now? */ + __asm__("sleep"); } #endif -void (*idle)(void) = default_idle; - -/* - * The idle thread. There's no useful work to be - * done, so just try to conserve power and have a - * low exit latency (ie sit in a loop waiting for - * somebody to say that they'd like to reschedule) - */ -void cpu_idle(void) -{ - while (1) { - rcu_idle_enter(); - while (!need_resched()) - idle(); - rcu_idle_exit(); - schedule_preempt_disabled(); - } -} void machine_restart(char * __unused) { @@ -110,6 +83,8 @@ void machine_power_off(void) void show_regs(struct pt_regs * regs) { + show_regs_print_info(KERN_DEFAULT); + printk("\nPC: %08lx Status: %02x", regs->pc, regs->ccr); printk("\nORIG_ER0: %08lx ER0: %08lx ER1: %08lx", diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c index 7833aa3..cfe494d 100644 --- a/arch/h8300/kernel/traps.c +++ b/arch/h8300/kernel/traps.c @@ -164,10 +164,3 @@ void show_trace_task(struct task_struct *tsk) { show_stack(tsk,(unsigned long *)tsk->thread.esp0); } - -void dump_stack(void) -{ - show_stack(NULL,NULL); -} - -EXPORT_SYMBOL(dump_stack); diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c index 981e250..ff349d7 100644 --- a/arch/h8300/mm/init.c +++ b/arch/h8300/mm/init.c @@ -139,7 +139,7 @@ void __init mem_init(void) start_mem = PAGE_ALIGN(start_mem); max_mapnr = num_physpages = MAP_NR(high_memory); - /* this will put all memory onto the freelists */ + /* this will put all low memory onto the freelists */ totalram_pages = free_all_bootmem(); codek = (_etext - _stext) >> 10; @@ -161,15 +161,7 @@ void __init mem_init(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - int pages = 0; - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - pages++; - } - printk ("Freeing initrd memory: %dk freed\n", pages); + free_reserved_area(start, end, 0, "initrd"); } #endif @@ -177,23 +169,7 @@ void free_initmem(void) { #ifdef CONFIG_RAMKERNEL - unsigned long addr; -/* - * the following code should be cool even if these sections - * are not page aligned. - */ - addr = PAGE_ALIGN((unsigned long)(__init_begin)); - /* next to check that the page we free is not a partial page */ - for (; addr + PAGE_SIZE < (unsigned long)__init_end; addr +=PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n", - (addr - PAGE_ALIGN((long) __init_begin)) >> 10, - (int)(PAGE_ALIGN((unsigned long)__init_begin)), - (int)(addr - PAGE_SIZE)); + free_initmem_default(0); #endif } diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index 06ae9ff..9b948c6 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c @@ -51,28 +51,11 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) * If hardware or VM offer wait termination even though interrupts * are disabled. */ -static void default_idle(void) +void arch_cpu_idle(void) { __vmwait(); -} - -void (*idle_sleep)(void) = default_idle; - -void cpu_idle(void) -{ - while (1) { - tick_nohz_idle_enter(); - local_irq_disable(); - while (!need_resched()) { - idle_sleep(); - /* interrupts wake us up, but aren't serviced */ - local_irq_enable(); /* service interrupt */ - local_irq_disable(); - } - local_irq_enable(); - tick_nohz_idle_exit(); - schedule(); - } + /* interrupts wake us up, but irqs are still disabled */ + local_irq_enable(); } /* diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c index 8e095df..0e364ca 100644 --- a/arch/hexagon/kernel/smp.c +++ b/arch/hexagon/kernel/smp.c @@ -184,7 +184,7 @@ void __cpuinit start_secondary(void) local_irq_enable(); - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index be5e2dd..cc2171b 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c @@ -191,14 +191,6 @@ void show_stack(struct task_struct *task, unsigned long *fp) do_show_stack(task, fp, 0); } -void dump_stack(void) -{ - unsigned long *fp; - asm("%0 = r30" : "=r" (fp)); - show_stack(current, fp); -} -EXPORT_SYMBOL(dump_stack); - int die(const char *str, struct pt_regs *regs, long err) { static struct { diff --git a/arch/hexagon/kernel/vm_events.c b/arch/hexagon/kernel/vm_events.c index 9b5a4a2..f337281 100644 --- a/arch/hexagon/kernel/vm_events.c +++ b/arch/hexagon/kernel/vm_events.c @@ -33,6 +33,8 @@ */ void show_regs(struct pt_regs *regs) { + show_regs_print_info(KERN_EMERG); + printk(KERN_EMERG "restart_r0: \t0x%08lx syscall_nr: %ld\n", regs->restart_r0, regs->syscall_nr); printk(KERN_EMERG "preds: \t\t0x%08lx\n", regs->preds); diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 33f3fdc..e725ea0 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -26,7 +26,7 @@ config IA64 select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select HAVE_VIRT_CPU_ACCOUNTING - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select ARCH_DISCARD_MEMBLOCK select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP @@ -187,7 +187,7 @@ config IA64_DIG config IA64_DIG_VTD bool "DIG+Intel+IOMMU" - select DMAR + select INTEL_IOMMU select PCI_MSI config IA64_HP_ZX1 @@ -591,9 +591,9 @@ source "kernel/power/Kconfig" source "drivers/acpi/Kconfig" if PM - -source "arch/ia64/kernel/cpufreq/Kconfig" - +menu "CPU Frequency scaling" +source "drivers/cpufreq/Kconfig" +endmenu endif endmenu diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index da2f319..e70cade 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c @@ -142,8 +142,7 @@ static void transmit_chars(struct tty_struct *tty, struct serial_state *info, goto out; } - if (info->xmit.head == info->xmit.tail || tty->stopped || - tty->hw_stopped) { + if (info->xmit.head == info->xmit.tail || tty->stopped) { #ifdef SIMSERIAL_DEBUG printk("transmit_chars: head=%d, tail=%d, stopped=%d\n", info->xmit.head, info->xmit.tail, tty->stopped); @@ -181,7 +180,7 @@ static void rs_flush_chars(struct tty_struct *tty) struct serial_state *info = tty->driver_data; if (info->xmit.head == info->xmit.tail || tty->stopped || - tty->hw_stopped || !info->xmit.buf) + !info->xmit.buf) return; transmit_chars(tty, info, NULL); @@ -217,7 +216,7 @@ static int rs_write(struct tty_struct * tty, * Hey, we transmit directly from here in our case */ if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) && - !tty->stopped && !tty->hw_stopped) + !tty->stopped) transmit_chars(tty, info, NULL); return ret; @@ -325,14 +324,6 @@ static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) -static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) -{ - /* Handle turning off CRTSCTS */ - if ((old_termios->c_cflag & CRTSCTS) && - !(tty->termios.c_cflag & CRTSCTS)) { - tty->hw_stopped = 0; - } -} /* * This routine will shutdown a serial port; interrupts are disabled, and * DTR is dropped if the hangup on close termio flag is on. @@ -481,7 +472,6 @@ static const struct tty_operations hp_ops = { .throttle = rs_throttle, .unthrottle = rs_unthrottle, .send_xchar = rs_send_xchar, - .set_termios = rs_set_termios, .hangup = rs_hangup, .proc_fops = &rs_proc_fops, }; diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index d2bf1fd..76acbcd 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h @@ -106,16 +106,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; { - register unsigned long r8 __asm ("r8"); + register unsigned long r8 __asm ("r8") = 0; unsigned long prev; __asm__ __volatile__( " mf;; \n" - " mov %0=r0 \n" " mov ar.ccv=%4;; \n" "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n" " .xdata4 \"__ex_table\", 1b-., 2f-. \n" "[2:]" - : "=r" (r8), "=r" (prev) + : "+r" (r8), "=&r" (prev) : "r" (uaddr), "r" (newval), "rO" ((long) (unsigned) oldval) : "memory"); diff --git a/arch/ia64/include/asm/hugetlb.h b/arch/ia64/include/asm/hugetlb.h index 94eaa5b..aa91005 100644 --- a/arch/ia64/include/asm/hugetlb.h +++ b/arch/ia64/include/asm/hugetlb.h @@ -2,6 +2,7 @@ #define _ASM_IA64_HUGETLB_H #include <asm/page.h> +#include <asm-generic/hugetlb.h> void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h index 2b68d85..1bf2cf2 100644 --- a/arch/ia64/include/asm/irqflags.h +++ b/arch/ia64/include/asm/irqflags.h @@ -89,6 +89,7 @@ static inline bool arch_irqs_disabled(void) static inline void arch_safe_halt(void) { + arch_local_irq_enable(); ia64_pal_halt_light(); /* PAL_HALT_LIGHT */ } diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h index 43f96ab..8c70961 100644 --- a/arch/ia64/include/asm/mca.h +++ b/arch/ia64/include/asm/mca.h @@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS]; extern int cpe_vector; extern int ia64_cpe_irq; extern void ia64_mca_init(void); +extern void ia64_mca_irq_init(void); extern void ia64_mca_cpu_init(void *); extern void ia64_os_mca_dispatch(void); extern void ia64_os_mca_dispatch_end(void); diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h index 2e27ef1..2db0a6c 100644 --- a/arch/ia64/include/asm/numa.h +++ b/arch/ia64/include/asm/numa.h @@ -67,14 +67,13 @@ extern int paddr_to_nid(unsigned long paddr); extern void map_cpu_to_node(int cpu, int nid); extern void unmap_cpu_from_node(int cpu, int nid); - +extern void numa_clear_node(int cpu); #else /* !CONFIG_NUMA */ #define map_cpu_to_node(cpu, nid) do{}while(0) #define unmap_cpu_from_node(cpu, nid) do{}while(0) - #define paddr_to_nid(addr) 0 - +#define numa_clear_node(cpu) do { } while (0) #endif /* CONFIG_NUMA */ #endif /* _ASM_IA64_NUMA_H */ diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index 020d655..cade13d 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -131,8 +131,6 @@ struct thread_info { #define TS_POLLING 1 /* true if in idle loop and not sleeping */ #define TS_RESTORE_SIGMASK 2 /* restore signal mask in do_signal() */ -#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) - #ifndef __ASSEMBLY__ #define HAVE_SET_RESTORE_SIGMASK 1 static inline void set_restore_sigmask(void) diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index d959c84..20678a9 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -23,7 +23,6 @@ obj-$(CONFIG_SMP) += smp.o smpboot.o obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o obj-$(CONFIG_IA64_CYCLONE) += cyclone.o -obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o diff --git a/arch/ia64/kernel/cpufreq/Kconfig b/arch/ia64/kernel/cpufreq/Kconfig deleted file mode 100644 index 2d9d527..0000000 --- a/arch/ia64/kernel/cpufreq/Kconfig +++ /dev/null @@ -1,29 +0,0 @@ - -# -# CPU Frequency scaling -# - -menu "CPU Frequency scaling" - -source "drivers/cpufreq/Kconfig" - -if CPU_FREQ - -comment "CPUFreq processor drivers" - -config IA64_ACPI_CPUFREQ - tristate "ACPI Processor P-States driver" - select CPU_FREQ_TABLE - depends on ACPI_PROCESSOR - help - This driver adds a CPUFreq driver which utilizes the ACPI - Processor Performance States. - - For details, take a look at <file:Documentation/cpu-freq/>. - - If in doubt, say N. - -endif # CPU_FREQ - -endmenu - diff --git a/arch/ia64/kernel/cpufreq/Makefile b/arch/ia64/kernel/cpufreq/Makefile deleted file mode 100644 index 4838f2a..0000000 --- a/arch/ia64/kernel/cpufreq/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o - diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c deleted file mode 100644 index f09b174..0000000 --- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ /dev/null @@ -1,437 +0,0 @@ -/* - * arch/ia64/kernel/cpufreq/acpi-cpufreq.c - * This file provides the ACPI based P-state support. This - * module works with generic cpufreq infrastructure. Most of - * the code is based on i386 version - * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c) - * - * Copyright (C) 2005 Intel Corp - * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> - */ - -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/cpufreq.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <asm/io.h> -#include <asm/uaccess.h> -#include <asm/pal.h> - -#include <linux/acpi.h> -#include <acpi/processor.h> - -MODULE_AUTHOR("Venkatesh Pallipadi"); -MODULE_DESCRIPTION("ACPI Processor P-States Driver"); -MODULE_LICENSE("GPL"); - - -struct cpufreq_acpi_io { - struct acpi_processor_performance acpi_data; - struct cpufreq_frequency_table *freq_table; - unsigned int resume; -}; - -static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; - -static struct cpufreq_driver acpi_cpufreq_driver; - - -static int -processor_set_pstate ( - u32 value) -{ - s64 retval; - - pr_debug("processor_set_pstate\n"); - - retval = ia64_pal_set_pstate((u64)value); - - if (retval) { - pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n", - value, retval); - return -ENODEV; - } - return (int)retval; -} - - -static int -processor_get_pstate ( - u32 *value) -{ - u64 pstate_index = 0; - s64 retval; - - pr_debug("processor_get_pstate\n"); - - retval = ia64_pal_get_pstate(&pstate_index, - PAL_GET_PSTATE_TYPE_INSTANT); - *value = (u32) pstate_index; - - if (retval) - pr_debug("Failed to get current freq with " - "error 0x%lx, idx 0x%x\n", retval, *value); - - return (int)retval; -} - - -/* To be used only after data->acpi_data is initialized */ -static unsigned -extract_clock ( - struct cpufreq_acpi_io *data, - unsigned value, - unsigned int cpu) -{ - unsigned long i; - - pr_debug("extract_clock\n"); - - for (i = 0; i < data->acpi_data.state_count; i++) { - if (value == data->acpi_data.states[i].status) - return data->acpi_data.states[i].core_frequency; - } - return data->acpi_data.states[i-1].core_frequency; -} - - -static unsigned int -processor_get_freq ( - struct cpufreq_acpi_io *data, - unsigned int cpu) -{ - int ret = 0; - u32 value = 0; - cpumask_t saved_mask; - unsigned long clock_freq; - - pr_debug("processor_get_freq\n"); - - saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - if (smp_processor_id() != cpu) - goto migrate_end; - - /* processor_get_pstate gets the instantaneous frequency */ - ret = processor_get_pstate(&value); - - if (ret) { - set_cpus_allowed_ptr(current, &saved_mask); - printk(KERN_WARNING "get performance failed with error %d\n", - ret); - ret = 0; - goto migrate_end; - } - clock_freq = extract_clock(data, value, cpu); - ret = (clock_freq*1000); - -migrate_end: - set_cpus_allowed_ptr(current, &saved_mask); - return ret; -} - - -static int -processor_set_freq ( - struct cpufreq_acpi_io *data, - unsigned int cpu, - int state) -{ - int ret = 0; - u32 value = 0; - struct cpufreq_freqs cpufreq_freqs; - cpumask_t saved_mask; - int retval; - - pr_debug("processor_set_freq\n"); - - saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - if (smp_processor_id() != cpu) { - retval = -EAGAIN; - goto migrate_end; - } - - if (state == data->acpi_data.state) { - if (unlikely(data->resume)) { - pr_debug("Called after resume, resetting to P%d\n", state); - data->resume = 0; - } else { - pr_debug("Already at target state (P%d)\n", state); - retval = 0; - goto migrate_end; - } - } - - pr_debug("Transitioning from P%d to P%d\n", - data->acpi_data.state, state); - - /* cpufreq frequency struct */ - cpufreq_freqs.cpu = cpu; - cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency; - cpufreq_freqs.new = data->freq_table[state].frequency; - - /* notify cpufreq */ - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); - - /* - * First we write the target state's 'control' value to the - * control_register. - */ - - value = (u32) data->acpi_data.states[state].control; - - pr_debug("Transitioning to state: 0x%08x\n", value); - - ret = processor_set_pstate(value); - if (ret) { - unsigned int tmp = cpufreq_freqs.new; - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); - cpufreq_freqs.new = cpufreq_freqs.old; - cpufreq_freqs.old = tmp; - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); - printk(KERN_WARNING "Transition failed with error %d\n", ret); - retval = -ENODEV; - goto migrate_end; - } - - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); - - data->acpi_data.state = state; - - retval = 0; - -migrate_end: - set_cpus_allowed_ptr(current, &saved_mask); - return (retval); -} - - -static unsigned int -acpi_cpufreq_get ( - unsigned int cpu) -{ - struct cpufreq_acpi_io *data = acpi_io_data[cpu]; - - pr_debug("acpi_cpufreq_get\n"); - - return processor_get_freq(data, cpu); -} - - -static int -acpi_cpufreq_target ( - struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - unsigned int next_state = 0; - unsigned int result = 0; - - pr_debug("acpi_cpufreq_setpolicy\n"); - - result = cpufreq_frequency_table_target(policy, - data->freq_table, target_freq, relation, &next_state); - if (result) - return (result); - - result = processor_set_freq(data, policy->cpu, next_state); - - return (result); -} - - -static int -acpi_cpufreq_verify ( - struct cpufreq_policy *policy) -{ - unsigned int result = 0; - struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - - pr_debug("acpi_cpufreq_verify\n"); - - result = cpufreq_frequency_table_verify(policy, - data->freq_table); - - return (result); -} - - -static int -acpi_cpufreq_cpu_init ( - struct cpufreq_policy *policy) -{ - unsigned int i; - unsigned int cpu = policy->cpu; - struct cpufreq_acpi_io *data; - unsigned int result = 0; - - pr_debug("acpi_cpufreq_cpu_init\n"); - - data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); - if (!data) - return (-ENOMEM); - - acpi_io_data[cpu] = data; - - result = acpi_processor_register_performance(&data->acpi_data, cpu); - - if (result) - goto err_free; - - /* capability check */ - if (data->acpi_data.state_count <= 1) { - pr_debug("No P-States\n"); - result = -ENODEV; - goto err_unreg; - } - - if ((data->acpi_data.control_register.space_id != - ACPI_ADR_SPACE_FIXED_HARDWARE) || - (data->acpi_data.status_register.space_id != - ACPI_ADR_SPACE_FIXED_HARDWARE)) { - pr_debug("Unsupported address space [%d, %d]\n", - (u32) (data->acpi_data.control_register.space_id), - (u32) (data->acpi_data.status_register.space_id)); - result = -ENODEV; - goto err_unreg; - } - - /* alloc freq_table */ - data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * - (data->acpi_data.state_count + 1), - GFP_KERNEL); - if (!data->freq_table) { - result = -ENOMEM; - goto err_unreg; - } - - /* detect transition latency */ - policy->cpuinfo.transition_latency = 0; - for (i=0; i<data->acpi_data.state_count; i++) { - if ((data->acpi_data.states[i].transition_latency * 1000) > - policy->cpuinfo.transition_latency) { - policy->cpuinfo.transition_latency = - data->acpi_data.states[i].transition_latency * 1000; - } - } - policy->cur = processor_get_freq(data, policy->cpu); - - /* table init */ - for (i = 0; i <= data->acpi_data.state_count; i++) - { - data->freq_table[i].index = i; - if (i < data->acpi_data.state_count) { - data->freq_table[i].frequency = - data->acpi_data.states[i].core_frequency * 1000; - } else { - data->freq_table[i].frequency = CPUFREQ_TABLE_END; - } - } - - result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); - if (result) { - goto err_freqfree; - } - - /* notify BIOS that we exist */ - acpi_processor_notify_smm(THIS_MODULE); - - printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management " - "activated.\n", cpu); - - for (i = 0; i < data->acpi_data.state_count; i++) - pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", - (i == data->acpi_data.state?'*':' '), i, - (u32) data->acpi_data.states[i].core_frequency, - (u32) data->acpi_data.states[i].power, - (u32) data->acpi_data.states[i].transition_latency, - (u32) data->acpi_data.states[i].bus_master_latency, - (u32) data->acpi_data.states[i].status, - (u32) data->acpi_data.states[i].control); - - cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); - - /* the first call to ->target() should result in us actually - * writing something to the appropriate registers. */ - data->resume = 1; - - return (result); - - err_freqfree: - kfree(data->freq_table); - err_unreg: - acpi_processor_unregister_performance(&data->acpi_data, cpu); - err_free: - kfree(data); - acpi_io_data[cpu] = NULL; - - return (result); -} - - -static int -acpi_cpufreq_cpu_exit ( - struct cpufreq_policy *policy) -{ - struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - - pr_debug("acpi_cpufreq_cpu_exit\n"); - - if (data) { - cpufreq_frequency_table_put_attr(policy->cpu); - acpi_io_data[policy->cpu] = NULL; - acpi_processor_unregister_performance(&data->acpi_data, - policy->cpu); - kfree(data); - } - - return (0); -} - - -static struct freq_attr* acpi_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - - -static struct cpufreq_driver acpi_cpufreq_driver = { - .verify = acpi_cpufreq_verify, - .target = acpi_cpufreq_target, - .get = acpi_cpufreq_get, - .init = acpi_cpufreq_cpu_init, - .exit = acpi_cpufreq_cpu_exit, - .name = "acpi-cpufreq", - .owner = THIS_MODULE, - .attr = acpi_cpufreq_attr, -}; - - -static int __init -acpi_cpufreq_init (void) -{ - pr_debug("acpi_cpufreq_init\n"); - - return cpufreq_register_driver(&acpi_cpufreq_driver); -} - - -static void __exit -acpi_cpufreq_exit (void) -{ - pr_debug("acpi_cpufreq_exit\n"); - - cpufreq_unregister_driver(&acpi_cpufreq_driver); - return; -} - - -late_initcall(acpi_cpufreq_init); -module_exit(acpi_cpufreq_exit); - diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index c4cd45d..abc6dee 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -90,53 +90,6 @@ ENTRY(fsys_getpid) FSYS_RETURN END(fsys_getpid) -ENTRY(fsys_getppid) - .prologue - .altrp b6 - .body - add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16 - ;; - ld8 r17=[r17] // r17 = current->group_leader - add r9=TI_FLAGS+IA64_TASK_SIZE,r16 - ;; - - ld4 r9=[r9] - add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = ¤t->group_leader->real_parent - ;; - and r9=TIF_ALLWORK_MASK,r9 - -1: ld8 r18=[r17] // r18 = current->group_leader->real_parent - ;; - cmp.ne p8,p0=0,r9 - add r8=IA64_TASK_TGID_OFFSET,r18 // r8 = ¤t->group_leader->real_parent->tgid - ;; - - /* - * The .acq is needed to ensure that the read of tgid has returned its data before - * we re-check "real_parent". - */ - ld4.acq r8=[r8] // r8 = current->group_leader->real_parent->tgid -#ifdef CONFIG_SMP - /* - * Re-read current->group_leader->real_parent. - */ - ld8 r19=[r17] // r19 = current->group_leader->real_parent -(p8) br.spnt.many fsys_fallback_syscall - ;; - cmp.ne p6,p0=r18,r19 // did real_parent change? - mov r19=0 // i must not leak kernel bits... -(p6) br.cond.spnt.few 1b // yes -> redo the read of tgid and the check - ;; - mov r17=0 // i must not leak kernel bits... - mov r18=0 // i must not leak kernel bits... -#else - mov r17=0 // i must not leak kernel bits... - mov r18=0 // i must not leak kernel bits... - mov r19=0 // i must not leak kernel bits... -#endif - FSYS_RETURN -END(fsys_getppid) - ENTRY(fsys_set_tid_address) .prologue .altrp b6 @@ -614,7 +567,7 @@ paravirt_fsyscall_table: data8 0 // chown data8 0 // lseek // 1040 data8 fsys_getpid // getpid - data8 fsys_getppid // getppid + data8 0 // getppid data8 0 // mount data8 0 // umount data8 0 // setuid // 1045 diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index ee33c3a..19f107b 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -76,7 +76,7 @@ * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ * * Note: The term "IRQ" is loosely used everywhere in Linux kernel to - * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ + * describe interrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ * (isa_irq) is the only exception in this source code. */ @@ -1010,6 +1010,26 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver) return 0; } +static int +iosapic_delete_rte(unsigned int irq, unsigned int gsi) +{ + struct iosapic_rte_info *rte, *temp; + + list_for_each_entry_safe(rte, temp, &iosapic_intr_info[irq].rtes, + rte_list) { + if (rte->iosapic->gsi_base + rte->rte_index == gsi) { + if (rte->refcnt) + return -EBUSY; + + list_del(&rte->rte_list); + kfree(rte); + return 0; + } + } + + return -EINVAL; +} + int iosapic_init(unsigned long phys_addr, unsigned int gsi_base) { int num_rte, err, index; @@ -1069,7 +1089,7 @@ int iosapic_init(unsigned long phys_addr, unsigned int gsi_base) int iosapic_remove(unsigned int gsi_base) { - int index, err = 0; + int i, irq, index, err = 0; unsigned long flags; spin_lock_irqsave(&iosapic_lock, flags); @@ -1087,6 +1107,16 @@ int iosapic_remove(unsigned int gsi_base) goto out; } + for (i = gsi_base; i < gsi_base + iosapic_lists[index].num_rte; i++) { + irq = __gsi_to_irq(i); + if (irq < 0) + continue; + + err = iosapic_delete_rte(irq, i); + if (err) + goto out; + } + iounmap(iosapic_lists[index].addr); iosapic_free(index); out: diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index ad69606..f2c41828 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -23,6 +23,8 @@ #include <linux/interrupt.h> #include <linux/kernel_stat.h> +#include <asm/mca.h> + /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. @@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask) #endif /* CONFIG_SMP */ +int __init arch_early_irq_init(void) +{ + ia64_mca_irq_init(); + return 0; +} + #ifdef CONFIG_HOTPLUG_CPU unsigned int vectors_in_migration[NR_IRQS]; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 65bf9cd..d7396db 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -2074,22 +2074,16 @@ ia64_mca_init(void) printk(KERN_INFO "MCA related initialization done\n"); } + /* - * ia64_mca_late_init - * - * Opportunity to setup things that require initialization later - * than ia64_mca_init. Setup a timer to poll for CPEs if the - * platform doesn't support an interrupt driven mechanism. - * - * Inputs : None - * Outputs : Status + * These pieces cannot be done in ia64_mca_init() because it is called before + * early_irq_init() which would wipe out our percpu irq registrations. But we + * cannot leave them until ia64_mca_late_init() because by then all the other + * processors have been brought online and have set their own CMC vectors to + * point at a non-existant action. Called from arch_early_irq_init(). */ -static int __init -ia64_mca_late_init(void) +void __init ia64_mca_irq_init(void) { - if (!mca_init) - return 0; - /* * Configure the CMCI/P vector and handler. Interrupts for CMC are * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). @@ -2108,6 +2102,23 @@ ia64_mca_late_init(void) /* Setup the CPEI/P handler */ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); #endif +} + +/* + * ia64_mca_late_init + * + * Opportunity to setup things that require initialization later + * than ia64_mca_init. Setup a timer to poll for CPEs if the + * platform doesn't support an interrupt driven mechanism. + * + * Inputs : None + * Outputs : Status + */ +static int __init +ia64_mca_late_init(void) +{ + if (!mca_init) + return 0; register_hotcpu_notifier(&mca_cpu_notifier); diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 9392e02..94f8bf7 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -349,7 +349,7 @@ init_record_index_pools(void) /* - 3 - */ slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; - slidx_pool.buffer = (slidx_list_t *) + slidx_pool.buffer = kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); return slidx_pool.buffer ? 0 : -ENOMEM; diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 77597e5..79521d5 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -849,17 +849,6 @@ static palinfo_entry_t palinfo_entries[]={ #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) -/* - * this array is used to keep track of the proc entries we create. This is - * required in the module mode when we need to remove all entries. The procfs code - * does not do recursion of deletion - * - * Notes: - * - +1 accounts for the cpuN directory entry in /proc/pal - */ -#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1)) - -static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES]; static struct proc_dir_entry *palinfo_dir; /* @@ -971,60 +960,32 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi static void __cpuinit create_palinfo_proc_entries(unsigned int cpu) { -# define CPUSTR "cpu%d" - pal_func_cpu_u_t f; - struct proc_dir_entry **pdir; struct proc_dir_entry *cpu_dir; int j; - char cpustr[sizeof(CPUSTR)]; - - - /* - * we keep track of created entries in a depth-first order for - * cleanup purposes. Each entry is stored into palinfo_proc_entries - */ - sprintf(cpustr,CPUSTR, cpu); + char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ + sprintf(cpustr, "cpu%d", cpu); cpu_dir = proc_mkdir(cpustr, palinfo_dir); + if (!cpu_dir) + return; f.req_cpu = cpu; - /* - * Compute the location to store per cpu entries - * We dont store the top level entry in this list, but - * remove it finally after removing all cpu entries. - */ - pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)]; - *pdir++ = cpu_dir; for (j=0; j < NR_PALINFO_ENTRIES; j++) { f.func_id = j; - *pdir = create_proc_read_entry( - palinfo_entries[j].name, 0, cpu_dir, - palinfo_read_entry, (void *)f.value); - pdir++; + create_proc_read_entry( + palinfo_entries[j].name, 0, cpu_dir, + palinfo_read_entry, (void *)f.value); } } static void remove_palinfo_proc_entries(unsigned int hcpu) { - int j; - struct proc_dir_entry *cpu_dir, **pdir; - - pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)]; - cpu_dir = *pdir; - *pdir++=NULL; - for (j=0; j < (NR_PALINFO_ENTRIES); j++) { - if ((*pdir)) { - remove_proc_entry ((*pdir)->name, cpu_dir); - *pdir ++= NULL; - } - } - - if (cpu_dir) { - remove_proc_entry(cpu_dir->name, palinfo_dir); - } + char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ + sprintf(cpustr, "cpu%d", hcpu); + remove_proc_subtree(cpustr, palinfo_dir); } static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, @@ -1058,6 +1019,8 @@ palinfo_init(void) printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); palinfo_dir = proc_mkdir("pal", NULL); + if (!palinfo_dir) + return -ENOMEM; /* Create palinfo dirs in /proc for all online cpus */ for_each_online_cpu(i) { @@ -1073,22 +1036,8 @@ palinfo_init(void) static void __exit palinfo_exit(void) { - int i = 0; - - /* remove all nodes: depth first pass. Could optimize this */ - for_each_online_cpu(i) { - remove_palinfo_proc_entries(i); - } - - /* - * Remove the top level entry finally - */ - remove_proc_entry(palinfo_dir->name, NULL); - - /* - * Unregister from cpu notifier callbacks - */ unregister_hotcpu_notifier(&palinfo_cpu_notifier); + remove_proc_subtree("pal", NULL); } module_init(palinfo_init); diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 433f5e8..9ea25fc 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -42,6 +42,7 @@ #include <linux/completion.h> #include <linux/tracehook.h> #include <linux/slab.h> +#include <linux/cpu.h> #include <asm/errno.h> #include <asm/intrinsics.h> @@ -619,6 +620,7 @@ static struct file_system_type pfm_fs_type = { .mount = pfmfs_mount, .kill_sb = kill_anon_super, }; +MODULE_ALIAS_FS("pfmfs"); DEFINE_PER_CPU(unsigned long, pfm_syst_info); DEFINE_PER_CPU(struct task_struct *, pmu_owner); @@ -1321,8 +1323,6 @@ out: } EXPORT_SYMBOL(pfm_unregister_buffer_fmt); -extern void update_pal_halt_status(int); - static int pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) { @@ -1370,9 +1370,9 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) cpu)); /* - * disable default_idle() to go to PAL_HALT + * Force idle() into poll mode */ - update_pal_halt_status(0); + cpu_idle_poll_ctrl(true); UNLOCK_PFS(flags); @@ -1429,11 +1429,8 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) is_syswide, cpu)); - /* - * if possible, enable default_idle() to go into PAL_HALT - */ - if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0) - update_pal_halt_status(1); + /* Undo forced polling. Last session reenables pal_halt */ + cpu_idle_poll_ctrl(false); UNLOCK_PFS(flags); diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index e34f565..55d4ba4 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -96,21 +96,13 @@ show_stack (struct task_struct *task, unsigned long *sp) } void -dump_stack (void) -{ - show_stack(NULL, NULL); -} - -EXPORT_SYMBOL(dump_stack); - -void show_regs (struct pt_regs *regs) { unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; print_modules(); - printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current), - smp_processor_id(), current->comm); + printk("\n"); + show_regs_print_info(KERN_DEFAULT); printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n", regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(), init_utsname()->release); @@ -209,41 +201,13 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) local_irq_disable(); /* force interrupt disable */ } -static int pal_halt = 1; -static int can_do_pal_halt = 1; - static int __init nohalt_setup(char * str) { - pal_halt = can_do_pal_halt = 0; + cpu_idle_poll_ctrl(true); return 1; } __setup("nohalt", nohalt_setup); -void -update_pal_halt_status(int status) -{ - can_do_pal_halt = pal_halt && status; -} - -/* - * We use this if we don't have any better idle routine.. - */ -void -default_idle (void) -{ - local_irq_enable(); - while (!need_resched()) { - if (can_do_pal_halt) { - local_irq_disable(); - if (!need_resched()) { - safe_halt(); - } - local_irq_enable(); - } else - cpu_relax(); - } -} - #ifdef CONFIG_HOTPLUG_CPU /* We don't actually take CPU down, just spin without interrupts. */ static inline void play_dead(void) @@ -270,50 +234,29 @@ static inline void play_dead(void) } #endif /* CONFIG_HOTPLUG_CPU */ -void __attribute__((noreturn)) -cpu_idle (void) +void arch_cpu_idle_dead(void) +{ + play_dead(); +} + +void arch_cpu_idle(void) { void (*mark_idle)(int) = ia64_mark_idle; - int cpu = smp_processor_id(); - - /* endless idle loop with no priority at all */ - while (1) { - rcu_idle_enter(); - if (can_do_pal_halt) { - current_thread_info()->status &= ~TS_POLLING; - /* - * TS_POLLING-cleared state must be visible before we - * test NEED_RESCHED: - */ - smp_mb(); - } else { - current_thread_info()->status |= TS_POLLING; - } - if (!need_resched()) { - void (*idle)(void); #ifdef CONFIG_SMP - min_xtp(); + min_xtp(); #endif - rmb(); - if (mark_idle) - (*mark_idle)(1); - - if (!idle) - idle = default_idle; - (*idle)(); - if (mark_idle) - (*mark_idle)(0); + rmb(); + if (mark_idle) + (*mark_idle)(1); + + safe_halt(); + + if (mark_idle) + (*mark_idle)(0); #ifdef CONFIG_SMP - normal_xtp(); + normal_xtp(); #endif - } - rcu_idle_exit(); - schedule_preempt_disabled(); - check_pgt_cache(); - if (cpu_is_offline(cpu)) - play_dead(); - } } void diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 2029cc0..13bfdd2 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -1063,6 +1063,7 @@ check_bugs (void) static int __init run_dmi_scan(void) { dmi_scan_machine(); + dmi_set_dump_stack_arch_desc(); return 0; } core_initcall(run_dmi_scan); diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 500f1e4..8d87168 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -455,7 +455,7 @@ start_secondary (void *unused) preempt_disable(); smp_callin(); - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); return 0; } diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 4332f7e..a7869f8 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c @@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte) "srlz.d;;" "ssm psr.i;;" "srlz.d;;" - : "=r"(ret) : "r"(iha), "r"(pte):"memory"); + : "=&r"(ret) : "r"(iha), "r"(pte) : "memory"); return ret; } diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 80dab50..67c59eb 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -47,6 +47,8 @@ void show_mem(unsigned int filter) printk(KERN_INFO "Mem-info:\n"); show_free_areas(filter); printk(KERN_INFO "Node memory in pages:\n"); + if (filter & SHOW_MEM_FILTER_PAGE_COUNT) + return; for_each_online_pgdat(pgdat) { unsigned long present; unsigned long flags; diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index c2e955e..ae4db4b 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -623,6 +623,8 @@ void show_mem(unsigned int filter) printk(KERN_INFO "Mem-info:\n"); show_free_areas(filter); + if (filter & SHOW_MEM_FILTER_PAGE_COUNT) + return; printk(KERN_INFO "Node memory in pages:\n"); for_each_online_pgdat(pgdat) { unsigned long present; @@ -817,13 +819,12 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) #endif #ifdef CONFIG_SPARSEMEM_VMEMMAP -int __meminit vmemmap_populate(struct page *start_page, - unsigned long size, int node) +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) { - return vmemmap_populate_basepages(start_page, size, node); + return vmemmap_populate_basepages(start, end, node); } -void vmemmap_free(struct page *memmap, unsigned long nr_pages) +void vmemmap_free(unsigned long start, unsigned long end) { } #endif diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 20bc967..d1fe4b4 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -154,25 +154,14 @@ ia64_init_addr_space (void) void free_initmem (void) { - unsigned long addr, eaddr; - - addr = (unsigned long) ia64_imva(__init_begin); - eaddr = (unsigned long) ia64_imva(__init_end); - while (addr < eaddr) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - ++totalram_pages; - addr += PAGE_SIZE; - } - printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n", - (__init_end - __init_begin) >> 10); + free_reserved_area((unsigned long)ia64_imva(__init_begin), + (unsigned long)ia64_imva(__init_end), + 0, "unused kernel"); } void __init free_initrd_mem (unsigned long start, unsigned long end) { - struct page *page; /* * EFI uses 4KB pages while the kernel can use 4KB or bigger. * Thus EFI and the kernel may have different page sizes. It is @@ -213,11 +202,7 @@ free_initrd_mem (unsigned long start, unsigned long end) for (; start < end; start += PAGE_SIZE) { if (!virt_addr_valid(start)) continue; - page = virt_to_page(start); - ClearPageReserved(page); - init_page_count(page); - free_page(start); - ++totalram_pages; + free_reserved_page(virt_to_page(start)); } } diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 3dccdd8..43964cd 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c @@ -16,7 +16,7 @@ #include <asm/meminit.h> static inline void __iomem * -__ioremap (unsigned long phys_addr) +__ioremap_uc(unsigned long phys_addr) { return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr); } @@ -24,7 +24,11 @@ __ioremap (unsigned long phys_addr) void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size) { - return __ioremap(phys_addr); + u64 attr; + attr = kern_mem_attribute(phys_addr, size); + if (attr & EFI_MEMORY_WB) + return (void __iomem *) phys_to_virt(phys_addr); + return __ioremap_uc(phys_addr); } void __iomem * @@ -47,7 +51,7 @@ ioremap (unsigned long phys_addr, unsigned long size) if (attr & EFI_MEMORY_WB) return (void __iomem *) phys_to_virt(phys_addr); else if (attr & EFI_MEMORY_UC) - return __ioremap(phys_addr); + return __ioremap_uc(phys_addr); /* * Some chipsets don't support UC access to memory. If @@ -93,7 +97,7 @@ ioremap (unsigned long phys_addr, unsigned long size) return (void __iomem *) (offset + (char __iomem *)addr); } - return __ioremap(phys_addr); + return __ioremap_uc(phys_addr); } EXPORT_SYMBOL(ioremap); @@ -103,7 +107,7 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size) if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) return NULL; - return __ioremap(phys_addr); + return __ioremap_uc(phys_addr); } EXPORT_SYMBOL(ioremap_nocache); diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index 3efea7d..4248492 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -61,18 +61,36 @@ paddr_to_nid(unsigned long paddr) int __meminit __early_pfn_to_nid(unsigned long pfn) { int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; + /* + * NOTE: The following SMP-unsafe globals are only used early in boot + * when the kernel is running single-threaded. + */ + static int __meminitdata last_ssec, last_esec; + static int __meminitdata last_nid; + + if (section >= last_ssec && section < last_esec) + return last_nid; for (i = 0; i < num_node_memblks; i++) { ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT; esec = (node_memblk[i].start_paddr + node_memblk[i].size + ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT; - if (section >= ssec && section < esec) + if (section >= ssec && section < esec) { + last_ssec = ssec; + last_esec = esec; + last_nid = node_memblk[i].nid; return node_memblk[i].nid; + } } return -1; } +void __cpuinit numa_clear_node(int cpu) +{ + unmap_cpu_from_node(cpu, NUMA_NO_NODE); +} + #ifdef CONFIG_MEMORY_HOTPLUG /* * SRAT information is stored in node_memblk[], then we can use SRAT diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 60532ab..de1474f 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -15,6 +15,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> +#include <linux/pci-acpi.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/slab.h> @@ -458,6 +459,16 @@ void pcibios_fixup_bus(struct pci_bus *b) platform_pci_fixup_bus(b); } +void pcibios_add_bus(struct pci_bus *bus) +{ + acpi_pci_add_bus(bus); +} + +void pcibios_remove_bus(struct pci_bus *bus) +{ + acpi_pci_remove_bus(bus); +} + void pcibios_set_master (struct pci_dev *dev) { /* No special bus mastering setup handling */ diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 14c1711..e35f648 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c @@ -490,11 +490,14 @@ static int __init tiocx_init(void) { cnodeid_t cnodeid; int found_tiocx_device = 0; + int err; if (!ia64_platform_is("sn2")) return 0; - bus_register(&tiocx_bus_type); + err = bus_register(&tiocx_bus_type); + if (err) + return err; for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) { nasid_t nasid; diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 9262381..bcd17b2 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -10,7 +10,7 @@ config M32R select ARCH_WANT_IPC_PARSE_VERSION select HAVE_DEBUG_BUGVERBOSE select HAVE_GENERIC_HARDIRQS - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_ATOMIC64 diff --git a/arch/m32r/include/uapi/asm/stat.h b/arch/m32r/include/uapi/asm/stat.h index da4518f..98470fe 100644 --- a/arch/m32r/include/uapi/asm/stat.h +++ b/arch/m32r/include/uapi/asm/stat.h @@ -63,10 +63,10 @@ struct stat64 { long long st_size; unsigned long st_blksize; -#if defined(__BIG_ENDIAN) +#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) unsigned long __pad4; /* future possible st_blocks high bits */ unsigned long st_blocks; /* Number 512-byte blocks allocated. */ -#elif defined(__LITTLE_ENDIAN) +#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) unsigned long st_blocks; /* Number 512-byte blocks allocated. */ unsigned long __pad4; /* future possible st_blocks high bits */ #else diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index bde899e..e69221d 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c @@ -47,24 +47,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk) void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); -/* - * The idle thread. There's no useful work to be - * done, so just try to conserve power and have a - * low exit latency (ie sit in a loop waiting for - * somebody to say that they'd like to reschedule) - */ -void cpu_idle (void) -{ - /* endless idle loop with no priority at all */ - while (1) { - rcu_idle_enter(); - while (!need_resched()) - cpu_relax(); - rcu_idle_exit(); - schedule_preempt_disabled(); - } -} - void machine_restart(char *__unused) { #if defined(CONFIG_PLAT_MAPPI3) @@ -91,6 +73,8 @@ void machine_power_off(void) void show_regs(struct pt_regs * regs) { printk("\n"); + show_regs_print_info(KERN_DEFAULT); + printk("BPC[%08lx]:PSW[%08lx]:LR [%08lx]:FP [%08lx]\n", \ regs->bpc, regs->psw, regs->lr, regs->fp); printk("BBPC[%08lx]:BBPSW[%08lx]:SPU[%08lx]:SPI[%08lx]\n", \ diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index 13168a7..0ac558a 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c @@ -432,7 +432,7 @@ int __init start_secondary(void *unused) */ local_flush_tlb_all(); - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); return 0; } diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 3bcb207..a7a424f 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -132,10 +132,8 @@ static void show_trace(struct task_struct *task, unsigned long *stack) printk("Call Trace: "); while (!kstack_end(stack)) { addr = *stack++; - if (__kernel_text_address(addr)) { - printk("[<%08lx>] ", addr); - print_symbol("%s\n", addr); - } + if (__kernel_text_address(addr)) + printk("[<%08lx>] %pSR\n", addr, (void *)addr); } printk("\n"); } @@ -169,15 +167,6 @@ void show_stack(struct task_struct *task, unsigned long *sp) show_trace(task, sp); } -void dump_stack(void) -{ - unsigned long stack; - - show_trace(current, &stack); -} - -EXPORT_SYMBOL(dump_stack); - static void show_registers(struct pt_regs *regs) { int i = 0; diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c index 78b660e..ab4cbce 100644 --- a/arch/m32r/mm/init.c +++ b/arch/m32r/mm/init.c @@ -28,10 +28,7 @@ #include <asm/mmu_context.h> #include <asm/setup.h> #include <asm/tlb.h> - -/* References to section boundaries */ -extern char _text, _etext, _edata; -extern char __init_begin, __init_end; +#include <asm/sections.h> pgd_t swapper_pg_dir[1024]; @@ -184,17 +181,7 @@ void __init mem_init(void) *======================================================================*/ void free_initmem(void) { - unsigned long addr; - - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", \ - (int)(&__init_end - &__init_begin) >> 10); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD @@ -204,13 +191,6 @@ void free_initmem(void) *======================================================================*/ void free_initrd_mem(unsigned long start, unsigned long end) { - unsigned long p; - for (p = start; p < end; p += PAGE_SIZE) { - ClearPageReserved(virt_to_page(p)); - init_page_count(virt_to_page(p)); - free_page(p); - totalram_pages++; - } - printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); + free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 0e708c7..6de8133 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -8,7 +8,7 @@ config M68K select GENERIC_IRQ_SHOW select GENERIC_ATOMIC64 select HAVE_UID16 - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS select GENERIC_CPU_DEVICES select GENERIC_STRNCPY_FROM_USER if MMU diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus index 93ef034..675b087 100644 --- a/arch/m68k/Kconfig.bus +++ b/arch/m68k/Kconfig.bus @@ -45,6 +45,16 @@ config ISA (MCA) or VESA. ISA is an older system, now being displaced by PCI; newer boards don't support it. If you have ISA, say Y, otherwise N. +config ATARI_ROM_ISA + bool "Atari ROM port ISA adapter support" + depends on ATARI + help + This option enables support for the ROM port ISA adapter used to + operate ISA cards on Atari. Only 8 bit cards are supported, and + no interrupt lines are connected. + The only driver currently using this adapter is the EtherNEC + driver for RTL8019AS based NE2000 compatible network cards. + config GENERIC_ISA_DMA def_bool ISA diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices index 4bc945d..d163991 100644 --- a/arch/m68k/Kconfig.devices +++ b/arch/m68k/Kconfig.devices @@ -55,6 +55,30 @@ config NFETH which will emulate a regular ethernet device while presenting an ethertap device to the host system. +config ATARI_ETHERNAT + bool "Atari EtherNAT Ethernet support" + depends on ATARI + ---help--- + Say Y to include support for the EtherNAT network adapter for the + CT/60 extension port. + + To compile the actual ethernet driver, choose Y or M for the SMC91X + option in the network device section; the module will be called smc91x. + +config ATARI_ETHERNEC + bool "Atari EtherNEC Ethernet support" + depends on ATARI_ROM_ISA + ---help--- + Say Y to include support for the EtherNEC network adapter for the + ROM port. The driver works by polling instead of interrupts, so it + is quite slow. + + This driver also suppports the ethernet part of the NetUSBee ROM + port combined Ethernet/USB adapter. + + To compile the actual ethernet driver, choose Y or M in for the NE2000 + option in the network device section; the module will be called ne. + endmenu menu "Character devices" diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine index 7cdf6b0..7240584 100644 --- a/arch/m68k/Kconfig.machine +++ b/arch/m68k/Kconfig.machine @@ -310,7 +310,6 @@ config COBRA5282 config SOM5282EM bool "EMAC.Inc SOM5282EM board support" depends on M528x - select EMAC_INC help Support for the EMAC.Inc SOM5282EM module. diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c index 3f41092..20cde4e 100644 --- a/arch/m68k/atari/ataints.c +++ b/arch/m68k/atari/ataints.c @@ -49,6 +49,7 @@ #include <asm/atari_stdma.h> #include <asm/irq.h> #include <asm/entry.h> +#include <asm/io.h> /* @@ -122,6 +123,136 @@ static struct irq_chip atari_irq_chip = { }; /* + * ST-MFP timer D chained interrupts - each driver gets its own timer + * interrupt instance. + */ + +struct mfptimerbase { + volatile struct MFP *mfp; + unsigned char mfp_mask, mfp_data; + unsigned short int_mask; + int handler_irq, mfptimer_irq, server_irq; + char *name; +} stmfp_base = { + .mfp = &st_mfp, + .int_mask = 0x0, + .handler_irq = IRQ_MFP_TIMD, + .mfptimer_irq = IRQ_MFP_TIMER1, + .name = "MFP Timer D" +}; + +static irqreturn_t mfptimer_handler(int irq, void *dev_id) +{ + struct mfptimerbase *base = dev_id; + int mach_irq; + unsigned char ints; + + mach_irq = base->mfptimer_irq; + ints = base->int_mask; + for (; ints; mach_irq++, ints >>= 1) { + if (ints & 1) + generic_handle_irq(mach_irq); + } + return IRQ_HANDLED; +} + + +static void atari_mfptimer_enable(struct irq_data *data) +{ + int mfp_num = data->irq - IRQ_MFP_TIMER1; + stmfp_base.int_mask |= 1 << mfp_num; + atari_enable_irq(IRQ_MFP_TIMD); +} + +static void atari_mfptimer_disable(struct irq_data *data) +{ + int mfp_num = data->irq - IRQ_MFP_TIMER1; + stmfp_base.int_mask &= ~(1 << mfp_num); + if (!stmfp_base.int_mask) + atari_disable_irq(IRQ_MFP_TIMD); +} + +static struct irq_chip atari_mfptimer_chip = { + .name = "timer_d", + .irq_enable = atari_mfptimer_enable, + .irq_disable = atari_mfptimer_disable, +}; + + +/* + * EtherNAT CPLD interrupt handling + * CPLD interrupt register is at phys. 0x80000023 + * Need this mapped in at interrupt startup time + * Possibly need this mapped on demand anyway - + * EtherNAT USB driver needs to disable IRQ before + * startup! + */ + +static unsigned char *enat_cpld; + +static unsigned int atari_ethernat_startup(struct irq_data *data) +{ + int enat_num = 140 - data->irq + 1; + + m68k_irq_startup(data); + /* + * map CPLD interrupt register + */ + if (!enat_cpld) + enat_cpld = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0x2); + /* + * do _not_ enable the USB chip interrupt here - causes interrupt storm + * and triggers dead interrupt watchdog + * Need to reset the USB chip to a sane state in early startup before + * removing this hack + */ + if (enat_num == 1) + *enat_cpld |= 1 << enat_num; + + return 0; +} + +static void atari_ethernat_enable(struct irq_data *data) +{ + int enat_num = 140 - data->irq + 1; + /* + * map CPLD interrupt register + */ + if (!enat_cpld) + enat_cpld = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0x2); + *enat_cpld |= 1 << enat_num; +} + +static void atari_ethernat_disable(struct irq_data *data) +{ + int enat_num = 140 - data->irq + 1; + /* + * map CPLD interrupt register + */ + if (!enat_cpld) + enat_cpld = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0x2); + *enat_cpld &= ~(1 << enat_num); +} + +static void atari_ethernat_shutdown(struct irq_data *data) +{ + int enat_num = 140 - data->irq + 1; + if (enat_cpld) { + *enat_cpld &= ~(1 << enat_num); + iounmap(enat_cpld); + enat_cpld = NULL; + } +} + +static struct irq_chip atari_ethernat_chip = { + .name = "ethernat", + .irq_startup = atari_ethernat_startup, + .irq_shutdown = atari_ethernat_shutdown, + .irq_enable = atari_ethernat_enable, + .irq_disable = atari_ethernat_disable, +}; + +/* * void atari_init_IRQ (void) * * Parameters: None @@ -198,6 +329,27 @@ void __init atari_init_IRQ(void) /* Initialize the PSG: all sounds off, both ports output */ sound_ym.rd_data_reg_sel = 7; sound_ym.wd_data = 0xff; + + m68k_setup_irq_controller(&atari_mfptimer_chip, handle_simple_irq, + IRQ_MFP_TIMER1, 8); + + /* prepare timer D data for use as poll interrupt */ + /* set Timer D data Register - needs to be > 0 */ + st_mfp.tim_dt_d = 254; /* < 100 Hz */ + /* start timer D, div = 1:100 */ + st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 0xf0) | 0x6; + + /* request timer D dispatch handler */ + if (request_irq(IRQ_MFP_TIMD, mfptimer_handler, IRQF_SHARED, + stmfp_base.name, &stmfp_base)) + pr_err("Couldn't register %s interrupt\n", stmfp_base.name); + + /* + * EtherNAT ethernet / USB interrupt handlers + */ + + m68k_setup_irq_controller(&atari_ethernat_chip, handle_simple_irq, + 139, 2); } diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c index 037c11c..fb2d0bd 100644 --- a/arch/m68k/atari/config.c +++ b/arch/m68k/atari/config.c @@ -31,6 +31,8 @@ #include <linux/init.h> #include <linux/delay.h> #include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/usb/isp116x.h> #include <linux/vt_kern.h> #include <linux/module.h> @@ -655,3 +657,240 @@ static void atari_get_hardware_list(struct seq_file *m) ATARIHW_ANNOUNCE(VME, "VME Bus"); ATARIHW_ANNOUNCE(DSP56K, "DSP56001 processor"); } + +/* + * MSch: initial platform device support for Atari, + * required for EtherNAT/EtherNEC/NetUSBee drivers + */ + +#if defined(CONFIG_ATARI_ETHERNAT) || defined(CONFIG_ATARI_ETHERNEC) +static void isp1160_delay(struct device *dev, int delay) +{ + ndelay(delay); +} +#endif + +#ifdef CONFIG_ATARI_ETHERNAT +/* + * EtherNAT: SMC91C111 Ethernet chipset, handled by smc91x driver + */ + +#define ATARI_ETHERNAT_IRQ 140 + +static struct resource smc91x_resources[] = { + [0] = { + .name = "smc91x-regs", + .start = ATARI_ETHERNAT_PHYS_ADDR, + .end = ATARI_ETHERNAT_PHYS_ADDR + 0xfffff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .name = "smc91x-irq", + .start = ATARI_ETHERNAT_IRQ, + .end = ATARI_ETHERNAT_IRQ, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device smc91x_device = { + .name = "smc91x", + .id = -1, + .num_resources = ARRAY_SIZE(smc91x_resources), + .resource = smc91x_resources, +}; + +/* + * ISP 1160 - using the isp116x-hcd module + */ + +#define ATARI_USB_PHYS_ADDR 0x80000012 +#define ATARI_USB_IRQ 139 + +static struct resource isp1160_resources[] = { + [0] = { + .name = "isp1160-data", + .start = ATARI_USB_PHYS_ADDR, + .end = ATARI_USB_PHYS_ADDR + 0x1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .name = "isp1160-regs", + .start = ATARI_USB_PHYS_ADDR + 0x4, + .end = ATARI_USB_PHYS_ADDR + 0x5, + .flags = IORESOURCE_MEM, + }, + [2] = { + .name = "isp1160-irq", + .start = ATARI_USB_IRQ, + .end = ATARI_USB_IRQ, + .flags = IORESOURCE_IRQ, + }, +}; + +/* (DataBusWidth16|AnalogOCEnable|DREQOutputPolarity|DownstreamPort15KRSel ) */ +static struct isp116x_platform_data isp1160_platform_data = { + /* Enable internal resistors on downstream ports */ + .sel15Kres = 1, + /* On-chip overcurrent protection */ + .oc_enable = 1, + /* INT output polarity */ + .int_act_high = 1, + /* INT edge or level triggered */ + .int_edge_triggered = 0, + + /* WAKEUP pin connected - NOT SUPPORTED */ + /* .remote_wakeup_connected = 0, */ + /* Wakeup by devices on usb bus enabled */ + .remote_wakeup_enable = 0, + .delay = isp1160_delay, +}; + +static struct platform_device isp1160_device = { + .name = "isp116x-hcd", + .id = 0, + .num_resources = ARRAY_SIZE(isp1160_resources), + .resource = isp1160_resources, + .dev = { + .platform_data = &isp1160_platform_data, + }, +}; + +static struct platform_device *atari_ethernat_devices[] __initdata = { + &smc91x_device, + &isp1160_device +}; +#endif /* CONFIG_ATARI_ETHERNAT */ + +#ifdef CONFIG_ATARI_ETHERNEC +/* + * EtherNEC: RTL8019 (NE2000 compatible) Ethernet chipset, + * handled by ne.c driver + */ + +#define ATARI_ETHERNEC_PHYS_ADDR 0xfffa0000 +#define ATARI_ETHERNEC_BASE 0x300 +#define ATARI_ETHERNEC_IRQ IRQ_MFP_TIMER1 + +static struct resource rtl8019_resources[] = { + [0] = { + .name = "rtl8019-regs", + .start = ATARI_ETHERNEC_BASE, + .end = ATARI_ETHERNEC_BASE + 0x20 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + .name = "rtl8019-irq", + .start = ATARI_ETHERNEC_IRQ, + .end = ATARI_ETHERNEC_IRQ, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtl8019_device = { + .name = "ne", + .id = -1, + .num_resources = ARRAY_SIZE(rtl8019_resources), + .resource = rtl8019_resources, +}; + +/* + * NetUSBee: ISP1160 USB host adapter via ROM-port adapter + */ + +#define ATARI_NETUSBEE_PHYS_ADDR 0xfffa8000 +#define ATARI_NETUSBEE_BASE 0x340 +#define ATARI_NETUSBEE_IRQ IRQ_MFP_TIMER2 + +static struct resource netusbee_resources[] = { + [0] = { + .name = "isp1160-data", + .start = ATARI_NETUSBEE_BASE, + .end = ATARI_NETUSBEE_BASE + 0x1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .name = "isp1160-regs", + .start = ATARI_NETUSBEE_BASE + 0x20, + .end = ATARI_NETUSBEE_BASE + 0x21, + .flags = IORESOURCE_MEM, + }, + [2] = { + .name = "isp1160-irq", + .start = ATARI_NETUSBEE_IRQ, + .end = ATARI_NETUSBEE_IRQ, + .flags = IORESOURCE_IRQ, + }, +}; + +/* (DataBusWidth16|AnalogOCEnable|DREQOutputPolarity|DownstreamPort15KRSel ) */ +static struct isp116x_platform_data netusbee_platform_data = { + /* Enable internal resistors on downstream ports */ + .sel15Kres = 1, + /* On-chip overcurrent protection */ + .oc_enable = 1, + /* INT output polarity */ + .int_act_high = 1, + /* INT edge or level triggered */ + .int_edge_triggered = 0, + + /* WAKEUP pin connected - NOT SUPPORTED */ + /* .remote_wakeup_connected = 0, */ + /* Wakeup by devices on usb bus enabled */ + .remote_wakeup_enable = 0, + .delay = isp1160_delay, +}; + +static struct platform_device netusbee_device = { + .name = "isp116x-hcd", + .id = 1, + .num_resources = ARRAY_SIZE(netusbee_resources), + .resource = netusbee_resources, + .dev = { + .platform_data = &netusbee_platform_data, + }, +}; + +static struct platform_device *atari_netusbee_devices[] __initdata = { + &rtl8019_device, + &netusbee_device +}; +#endif /* CONFIG_ATARI_ETHERNEC */ + +int __init atari_platform_init(void) +{ + int rv = 0; + + if (!MACH_IS_ATARI) + return -ENODEV; + +#ifdef CONFIG_ATARI_ETHERNAT + { + unsigned char *enatc_virt; + enatc_virt = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0xf); + if (hwreg_present(enatc_virt)) { + rv = platform_add_devices(atari_ethernat_devices, + ARRAY_SIZE(atari_ethernat_devices)); + } + iounmap(enatc_virt); + } +#endif + +#ifdef CONFIG_ATARI_ETHERNEC + { + int error; + unsigned char *enec_virt; + enec_virt = (unsigned char *)ioremap((ATARI_ETHERNEC_PHYS_ADDR), 0xf); + if (hwreg_present(enec_virt)) { + error = platform_add_devices(atari_netusbee_devices, + ARRAY_SIZE(atari_netusbee_devices)); + if (error && !rv) + rv = error; + } + iounmap(enec_virt); + } +#endif + + return rv; +} + +arch_initcall(atari_platform_init); diff --git a/arch/m68k/include/asm/MC68328.h b/arch/m68k/include/asm/MC68328.h index a337e56..4ebf098 100644 --- a/arch/m68k/include/asm/MC68328.h +++ b/arch/m68k/include/asm/MC68328.h @@ -293,7 +293,7 @@ /* * Here go the bitmasks themselves */ -#define IMR_MSPIM (1 << SPIM _IRQ_NUM) /* Mask SPI Master interrupt */ +#define IMR_MSPIM (1 << SPIM_IRQ_NUM) /* Mask SPI Master interrupt */ #define IMR_MTMR2 (1 << TMR2_IRQ_NUM) /* Mask Timer 2 interrupt */ #define IMR_MUART (1 << UART_IRQ_NUM) /* Mask UART interrupt */ #define IMR_MWDT (1 << WDT_IRQ_NUM) /* Mask Watchdog Timer interrupt */ @@ -327,7 +327,7 @@ #define IWR_ADDR 0xfffff308 #define IWR LONG_REF(IWR_ADDR) -#define IWR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */ +#define IWR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */ #define IWR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */ #define IWR_UART (1 << UART_IRQ_NUM) /* UART interrupt */ #define IWR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */ @@ -357,7 +357,7 @@ #define ISR_ADDR 0xfffff30c #define ISR LONG_REF(ISR_ADDR) -#define ISR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */ +#define ISR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */ #define ISR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */ #define ISR_UART (1 << UART_IRQ_NUM) /* UART interrupt */ #define ISR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */ @@ -391,7 +391,7 @@ #define IPR_ADDR 0xfffff310 #define IPR LONG_REF(IPR_ADDR) -#define IPR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */ +#define IPR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */ #define IPR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */ #define IPR_UART (1 << UART_IRQ_NUM) /* UART interrupt */ #define IPR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */ @@ -757,7 +757,7 @@ /* 'EZ328-compatible definitions */ #define TCN_ADDR TCN1_ADDR -#define TCN TCN +#define TCN TCN1 /* * Timer Unit 1 and 2 Status Registers diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h index c0cb363..d887050 100644 --- a/arch/m68k/include/asm/atarihw.h +++ b/arch/m68k/include/asm/atarihw.h @@ -805,5 +805,11 @@ struct MSTE_RTC { #define mste_rtc ((*(volatile struct MSTE_RTC *)MSTE_RTC_BAS)) +/* +** EtherNAT add-on card for Falcon - combined ethernet and USB adapter +*/ + +#define ATARI_ETHERNAT_PHYS_ADDR 0x80000000 + #endif /* linux/atarihw.h */ diff --git a/arch/m68k/include/asm/atariints.h b/arch/m68k/include/asm/atariints.h index 5fc13bd..953e0ac 100644 --- a/arch/m68k/include/asm/atariints.h +++ b/arch/m68k/include/asm/atariints.h @@ -32,7 +32,7 @@ #define VME_SOURCE_BASE 56 #define VME_MAX_SOURCES 16 -#define NUM_ATARI_SOURCES (VME_SOURCE_BASE+VME_MAX_SOURCES-STMFP_SOURCE_BASE) +#define NUM_ATARI_SOURCES 141 /* convert vector number to int source number */ #define IRQ_VECTOR_TO_SOURCE(v) ((v) - ((v) < 0x20 ? 0x18 : (0x40-8))) @@ -94,6 +94,15 @@ #define IRQ_SCCA_RX (52) #define IRQ_SCCA_SPCOND (54) +/* shared MFP timer D interrupts - hires timer for EtherNEC et al. */ +#define IRQ_MFP_TIMER1 (64) +#define IRQ_MFP_TIMER2 (65) +#define IRQ_MFP_TIMER3 (66) +#define IRQ_MFP_TIMER4 (67) +#define IRQ_MFP_TIMER5 (68) +#define IRQ_MFP_TIMER6 (69) +#define IRQ_MFP_TIMER7 (70) +#define IRQ_MFP_TIMER8 (71) #define INT_CLK 24576 /* CLK while int_clk =2.456MHz and divide = 100 */ #define INT_TICKS 246 /* to make sched_time = 99.902... HZ */ diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h index 5c81d0e..bc755bc 100644 --- a/arch/m68k/include/asm/cmpxchg.h +++ b/arch/m68k/include/asm/cmpxchg.h @@ -124,6 +124,9 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old, #define cmpxchg_local(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr)))) + +#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) + #else /* diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h index 12d8fe4..d28fa8f 100644 --- a/arch/m68k/include/asm/delay.h +++ b/arch/m68k/include/asm/delay.h @@ -92,5 +92,28 @@ static inline void __udelay(unsigned long usecs) #define udelay(n) (__builtin_constant_p(n) ? \ ((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n)) +/* + * nanosecond delay: + * + * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) is the number of loops + * per microsecond + * + * 1000 / ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) is the number of + * nanoseconds per loop + * + * So n / ( 1000 / ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) ) would + * be the number of loops for n nanoseconds + */ + +/* + * The simpler m68k and ColdFire processors do not have a 32*32->64 + * multiply instruction. So we need to handle them a little differently. + * We use a bit of shifting and a single 32*32->32 multiply to get close. + * This is a macro so that the const version can factor out the first + * multiply and shift. + */ +#define HZSCALE (268435456 / (1000000 / HZ)) + +#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000)); #endif /* defined(_M68K_DELAY_H) */ diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h index 4395ffc..8cc8343 100644 --- a/arch/m68k/include/asm/gpio.h +++ b/arch/m68k/include/asm/gpio.h @@ -86,4 +86,24 @@ static inline int gpio_cansleep(unsigned gpio) return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio); } +static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) +{ + int err; + + err = gpio_request(gpio, label); + if (err) + return err; + + if (flags & GPIOF_DIR_IN) + err = gpio_direction_input(gpio); + else + err = gpio_direction_output(gpio, + (flags & GPIOF_INIT_HIGH) ? 1 : 0); + + if (err) + gpio_free(gpio); + + return err; +} + #endif diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h index a6686d2..ffdf54f4 100644 --- a/arch/m68k/include/asm/io_mm.h +++ b/arch/m68k/include/asm/io_mm.h @@ -63,6 +63,23 @@ #endif #endif /* AMIGA_PCMCIA */ +#ifdef CONFIG_ATARI_ROM_ISA + +#define enec_isa_read_base 0xfffa0000 +#define enec_isa_write_base 0xfffb0000 + +#define ENEC_ISA_IO_B(ioaddr) (enec_isa_read_base+((((unsigned long)(ioaddr))&0x7F)<<9)) +#define ENEC_ISA_IO_W(ioaddr) (enec_isa_read_base+((((unsigned long)(ioaddr))&0x7F)<<9)) +#define ENEC_ISA_MEM_B(madr) (enec_isa_read_base+((((unsigned long)(madr))&0x7F)<<9)) +#define ENEC_ISA_MEM_W(madr) (enec_isa_read_base+((((unsigned long)(madr))&0x7F)<<9)) + +#ifndef MULTI_ISA +#define MULTI_ISA 0 +#else +#undef MULTI_ISA +#define MULTI_ISA 1 +#endif +#endif /* ATARI_ROM_ISA */ #if defined(CONFIG_PCI) && defined(CONFIG_COLDFIRE) @@ -111,14 +128,15 @@ void mcf_pci_outsl(u32 addr, const u32 *buf, u32 len); #define readw(addr) in_le16(addr) #define writew(v, addr) out_le16((addr), (v)) -#elif defined(CONFIG_ISA) +#elif defined(CONFIG_ISA) || defined(CONFIG_ATARI_ROM_ISA) #if MULTI_ISA == 0 #undef MULTI_ISA #endif -#define ISA_TYPE_Q40 (1) -#define ISA_TYPE_AG (2) +#define ISA_TYPE_Q40 (1) +#define ISA_TYPE_AG (2) +#define ISA_TYPE_ENEC (3) #if defined(CONFIG_Q40) && !defined(MULTI_ISA) #define ISA_TYPE ISA_TYPE_Q40 @@ -128,6 +146,10 @@ void mcf_pci_outsl(u32 addr, const u32 *buf, u32 len); #define ISA_TYPE ISA_TYPE_AG #define ISA_SEX 1 #endif +#if defined(CONFIG_ATARI_ROM_ISA) && !defined(MULTI_ISA) +#define ISA_TYPE ISA_TYPE_ENEC +#define ISA_SEX 0 +#endif #ifdef MULTI_ISA extern int isa_type; @@ -152,6 +174,9 @@ static inline u8 __iomem *isa_itb(unsigned long addr) #ifdef CONFIG_AMIGA_PCMCIA case ISA_TYPE_AG: return (u8 __iomem *)AG_ISA_IO_B(addr); #endif +#ifdef CONFIG_ATARI_ROM_ISA + case ISA_TYPE_ENEC: return (u8 __iomem *)ENEC_ISA_IO_B(addr); +#endif default: return NULL; /* avoid warnings, just in case */ } } @@ -165,6 +190,9 @@ static inline u16 __iomem *isa_itw(unsigned long addr) #ifdef CONFIG_AMIGA_PCMCIA case ISA_TYPE_AG: return (u16 __iomem *)AG_ISA_IO_W(addr); #endif +#ifdef CONFIG_ATARI_ROM_ISA + case ISA_TYPE_ENEC: return (u16 __iomem *)ENEC_ISA_IO_W(addr); +#endif default: return NULL; /* avoid warnings, just in case */ } } @@ -188,6 +216,9 @@ static inline u8 __iomem *isa_mtb(unsigned long addr) #ifdef CONFIG_AMIGA_PCMCIA case ISA_TYPE_AG: return (u8 __iomem *)addr; #endif +#ifdef CONFIG_ATARI_ROM_ISA + case ISA_TYPE_ENEC: return (u8 __iomem *)ENEC_ISA_MEM_B(addr); +#endif default: return NULL; /* avoid warnings, just in case */ } } @@ -201,6 +232,9 @@ static inline u16 __iomem *isa_mtw(unsigned long addr) #ifdef CONFIG_AMIGA_PCMCIA case ISA_TYPE_AG: return (u16 __iomem *)addr; #endif +#ifdef CONFIG_ATARI_ROM_ISA + case ISA_TYPE_ENEC: return (u16 __iomem *)ENEC_ISA_MEM_W(addr); +#endif default: return NULL; /* avoid warnings, just in case */ } } @@ -222,6 +256,36 @@ static inline u16 __iomem *isa_mtw(unsigned long addr) (ISA_SEX ? out_be16(isa_mtw((unsigned long)(p)),(val)) \ : out_le16(isa_mtw((unsigned long)(p)),(val))) +#ifdef CONFIG_ATARI_ROM_ISA +#define isa_rom_inb(port) rom_in_8(isa_itb(port)) +#define isa_rom_inw(port) \ + (ISA_SEX ? rom_in_be16(isa_itw(port)) \ + : rom_in_le16(isa_itw(port))) + +#define isa_rom_outb(val, port) rom_out_8(isa_itb(port), (val)) +#define isa_rom_outw(val, port) \ + (ISA_SEX ? rom_out_be16(isa_itw(port), (val)) \ + : rom_out_le16(isa_itw(port), (val))) + +#define isa_rom_readb(p) rom_in_8(isa_mtb((unsigned long)(p))) +#define isa_rom_readw(p) \ + (ISA_SEX ? rom_in_be16(isa_mtw((unsigned long)(p))) \ + : rom_in_le16(isa_mtw((unsigned long)(p)))) +#define isa_rom_readw_swap(p) \ + (ISA_SEX ? rom_in_le16(isa_mtw((unsigned long)(p))) \ + : rom_in_be16(isa_mtw((unsigned long)(p)))) +#define isa_rom_readw_raw(p) rom_in_be16(isa_mtw((unsigned long)(p))) + +#define isa_rom_writeb(val, p) rom_out_8(isa_mtb((unsigned long)(p)), (val)) +#define isa_rom_writew(val, p) \ + (ISA_SEX ? rom_out_be16(isa_mtw((unsigned long)(p)), (val)) \ + : rom_out_le16(isa_mtw((unsigned long)(p)), (val))) +#define isa_rom_writew_swap(val, p) \ + (ISA_SEX ? rom_out_le16(isa_mtw((unsigned long)(p)), (val)) \ + : rom_out_be16(isa_mtw((unsigned long)(p)), (val))) +#define isa_rom_writew_raw(val, p) rom_out_be16(isa_mtw((unsigned long)(p)), (val)) +#endif /* CONFIG_ATARI_ROM_ISA */ + static inline void isa_delay(void) { switch(ISA_TYPE) @@ -232,6 +296,9 @@ static inline void isa_delay(void) #ifdef CONFIG_AMIGA_PCMCIA case ISA_TYPE_AG: break; #endif +#ifdef CONFIG_ATARI_ROM_ISA + case ISA_TYPE_ENEC: break; +#endif default: break; /* avoid warnings */ } } @@ -263,6 +330,29 @@ static inline void isa_delay(void) raw_outsw_swapw(isa_itw(port), (u16 *)(buf), (nr)<<1)) +#ifdef CONFIG_ATARI_ROM_ISA +#define isa_rom_inb_p(p) ({ u8 _v = isa_rom_inb(p); isa_delay(); _v; }) +#define isa_rom_inw_p(p) ({ u16 _v = isa_rom_inw(p); isa_delay(); _v; }) +#define isa_rom_outb_p(v, p) ({ isa_rom_outb((v), (p)); isa_delay(); }) +#define isa_rom_outw_p(v, p) ({ isa_rom_outw((v), (p)); isa_delay(); }) + +#define isa_rom_insb(port, buf, nr) raw_rom_insb(isa_itb(port), (u8 *)(buf), (nr)) + +#define isa_rom_insw(port, buf, nr) \ + (ISA_SEX ? raw_rom_insw(isa_itw(port), (u16 *)(buf), (nr)) : \ + raw_rom_insw_swapw(isa_itw(port), (u16 *)(buf), (nr))) + +#define isa_rom_outsb(port, buf, nr) raw_rom_outsb(isa_itb(port), (u8 *)(buf), (nr)) + +#define isa_rom_outsw(port, buf, nr) \ + (ISA_SEX ? raw_rom_outsw(isa_itw(port), (u16 *)(buf), (nr)) : \ + raw_rom_outsw_swapw(isa_itw(port), (u16 *)(buf), (nr))) +#endif /* CONFIG_ATARI_ROM_ISA */ + +#endif /* CONFIG_ISA || CONFIG_ATARI_ROM_ISA */ + + +#if defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA) #define inb isa_inb #define inb_p isa_inb_p #define outb isa_outb @@ -285,9 +375,43 @@ static inline void isa_delay(void) #define readw isa_readw #define writeb isa_writeb #define writew isa_writew +#endif /* CONFIG_ISA && !CONFIG_ATARI_ROM_ISA */ -#else /* CONFIG_ISA */ - +#ifdef CONFIG_ATARI_ROM_ISA +/* + * kernel with both ROM port ISA and IDE compiled in, those have + * conflicting defs for in/out. Simply consider port < 1024 + * ROM port ISA and everything else regular ISA for IDE. read,write defined + * below. + */ +#define inb(port) ((port) < 1024 ? isa_rom_inb(port) : in_8(port)) +#define inb_p(port) ((port) < 1024 ? isa_rom_inb_p(port) : in_8(port)) +#define inw(port) ((port) < 1024 ? isa_rom_inw(port) : in_le16(port)) +#define inw_p(port) ((port) < 1024 ? isa_rom_inw_p(port) : in_le16(port)) +#define inl isa_inl +#define inl_p isa_inl_p + +#define outb(val, port) ((port) < 1024 ? isa_rom_outb((val), (port)) : out_8((port), (val))) +#define outb_p(val, port) ((port) < 1024 ? isa_rom_outb_p((val), (port)) : out_8((port), (val))) +#define outw(val, port) ((port) < 1024 ? isa_rom_outw((val), (port)) : out_le16((port), (val))) +#define outw_p(val, port) ((port) < 1024 ? isa_rom_outw_p((val), (port)) : out_le16((port), (val))) +#define outl isa_outl +#define outl_p isa_outl_p + +#define insb(port, buf, nr) ((port) < 1024 ? isa_rom_insb((port), (buf), (nr)) : isa_insb((port), (buf), (nr))) +#define insw(port, buf, nr) ((port) < 1024 ? isa_rom_insw((port), (buf), (nr)) : isa_insw((port), (buf), (nr))) +#define insl isa_insl +#define outsb(port, buf, nr) ((port) < 1024 ? isa_rom_outsb((port), (buf), (nr)) : isa_outsb((port), (buf), (nr))) +#define outsw(port, buf, nr) ((port) < 1024 ? isa_rom_outsw((port), (buf), (nr)) : isa_outsw((port), (buf), (nr))) +#define outsl isa_outsl + +#define readb(addr) in_8(addr) +#define writeb(val, addr) out_8((addr), (val)) +#define readw(addr) in_le16(addr) +#define writew(val, addr) out_le16((addr), (val)) +#endif /* CONFIG_ATARI_ROM_ISA */ + +#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA) /* * We need to define dummy functions for GENERIC_IOMAP support. */ @@ -319,7 +443,7 @@ static inline void isa_delay(void) #define readw(addr) in_le16(addr) #define writew(val,addr) out_le16((addr),(val)) -#endif /* CONFIG_ISA */ +#endif /* !CONFIG_ISA && !CONFIG_ATARI_ROM_ISA */ #define readl(addr) in_le32(addr) #define writel(val,addr) out_le32((addr),(val)) diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h index c1155f0..81ca118 100644 --- a/arch/m68k/include/asm/irq.h +++ b/arch/m68k/include/asm/irq.h @@ -6,12 +6,16 @@ * different m68k hosts compiled into the kernel. * Currently the Atari has 72 and the Amiga 24, but if both are * supported in the kernel it is better to make room for 72. + * With EtherNAT add-on card on Atari, the highest interrupt + * number is 140 so NR_IRQS needs to be 141. */ #if defined(CONFIG_COLDFIRE) #define NR_IRQS 256 #elif defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X) #define NR_IRQS 200 -#elif defined(CONFIG_ATARI) || defined(CONFIG_MAC) +#elif defined(CONFIG_ATARI) +#define NR_IRQS 141 +#elif defined(CONFIG_MAC) #define NR_IRQS 72 #elif defined(CONFIG_Q40) #define NR_IRQS 43 diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h index d9eb983..932faa3 100644 --- a/arch/m68k/include/asm/raw_io.h +++ b/arch/m68k/include/asm/raw_io.h @@ -10,7 +10,7 @@ #ifdef __KERNEL__ -#include <asm/types.h> +#include <asm/byteorder.h> /* Values for nocacheflag and cmode */ @@ -60,6 +60,57 @@ extern void __iounmap(void *addr, unsigned long size); #define __raw_writew(val,addr) out_be16((addr),(val)) #define __raw_writel(val,addr) out_be32((addr),(val)) +/* + * Atari ROM port (cartridge port) ISA adapter, used for the EtherNEC NE2000 + * network card driver. + * The ISA adapter connects address lines A9-A13 to ISA address lines A0-A4, + * and hardwires the rest of the ISA addresses for a base address of 0x300. + * + * Data lines D8-D15 are connected to ISA data lines D0-D7 for reading. + * For writes, address lines A1-A8 are latched to ISA data lines D0-D7 + * (meaning the bit pattern on A1-A8 can be read back as byte). + * + * Read and write operations are distinguished by the base address used: + * reads are from the ROM A side range, writes are through the B side range + * addresses (A side base + 0x10000). + * + * Reads and writes are byte only. + * + * 16 bit reads and writes are necessary for the NetUSBee adapter's USB + * chipset - 16 bit words are read straight off the ROM port while 16 bit + * reads are split into two byte writes. The low byte is latched to the + * NetUSBee buffer by a read from the _read_ window (with the data pattern + * asserted as A1-A8 address pattern). The high byte is then written to the + * write range as usual, completing the write cycle. + */ + +#if defined(CONFIG_ATARI_ROM_ISA) +#define rom_in_8(addr) \ + ({ u16 __v = (*(__force volatile u16 *) (addr)); __v >>= 8; __v; }) +#define rom_in_be16(addr) \ + ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; }) +#define rom_in_le16(addr) \ + ({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; }) + +#define rom_out_8(addr, b) \ + ({u8 __w, __v = (b); u32 _addr = ((u32) (addr)); \ + __w = ((*(__force volatile u8 *) ((_addr | 0x10000) + (__v<<1)))); }) +#define rom_out_be16(addr, w) \ + ({u16 __w, __v = (w); u32 _addr = ((u32) (addr)); \ + __w = ((*(__force volatile u16 *) ((_addr & 0xFFFF0000UL) + ((__v & 0xFF)<<1)))); \ + __w = ((*(__force volatile u16 *) ((_addr | 0x10000) + ((__v >> 8)<<1)))); }) +#define rom_out_le16(addr, w) \ + ({u16 __w, __v = (w); u32 _addr = ((u32) (addr)); \ + __w = ((*(__force volatile u16 *) ((_addr & 0xFFFF0000UL) + ((__v >> 8)<<1)))); \ + __w = ((*(__force volatile u16 *) ((_addr | 0x10000) + ((__v & 0xFF)<<1)))); }) + +#define raw_rom_inb rom_in_8 +#define raw_rom_inw rom_in_be16 + +#define raw_rom_outb(val, port) rom_out_8((port), (val)) +#define raw_rom_outw(val, port) rom_out_be16((port), (val)) +#endif /* CONFIG_ATARI_ROM_ISA */ + static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len) { unsigned int i; @@ -342,6 +393,62 @@ static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf, : "d0", "a0", "a1", "d6"); } + +#if defined(CONFIG_ATARI_ROM_ISA) +static inline void raw_rom_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len) +{ + unsigned int i; + + for (i = 0; i < len; i++) + *buf++ = rom_in_8(port); +} + +static inline void raw_rom_outsb(volatile u8 __iomem *port, const u8 *buf, + unsigned int len) +{ + unsigned int i; + + for (i = 0; i < len; i++) + rom_out_8(port, *buf++); +} + +static inline void raw_rom_insw(volatile u16 __iomem *port, u16 *buf, + unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++) + *buf++ = rom_in_be16(port); +} + +static inline void raw_rom_outsw(volatile u16 __iomem *port, const u16 *buf, + unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++) + rom_out_be16(port, *buf++); +} + +static inline void raw_rom_insw_swapw(volatile u16 __iomem *port, u16 *buf, + unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++) + *buf++ = rom_in_le16(port); +} + +static inline void raw_rom_outsw_swapw(volatile u16 __iomem *port, const u16 *buf, + unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++) + rom_out_le16(port, *buf++); +} +#endif /* CONFIG_ATARI_ROM_ISA */ + #endif /* __KERNEL__ */ #endif /* _RAW_IO_H */ diff --git a/arch/m68k/include/asm/string.h b/arch/m68k/include/asm/string.h index 3219845..9aea9f1 100644 --- a/arch/m68k/include/asm/string.h +++ b/arch/m68k/include/asm/string.h @@ -4,15 +4,6 @@ #include <linux/types.h> #include <linux/compiler.h> -static inline size_t __kernel_strlen(const char *s) -{ - const char *sc; - - for (sc = s; *sc++; ) - ; - return sc - s - 1; -} - static inline char *__kernel_strcpy(char *dest, const char *src) { char *xdest = dest; @@ -27,11 +18,6 @@ static inline char *__kernel_strcpy(char *dest, const char *src) #ifndef __IN_STRING_C -#define __HAVE_ARCH_STRLEN -#define strlen(s) (__builtin_constant_p(s) ? \ - __builtin_strlen(s) : \ - __kernel_strlen(s)) - #define __HAVE_ARCH_STRNLEN static inline size_t strnlen(const char *s, size_t count) { diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index d538694..c55ff71 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -51,40 +51,16 @@ unsigned long thread_saved_pc(struct task_struct *tsk) return sw->retpc; } -/* - * The idle loop on an m68k.. - */ -static void default_idle(void) +void arch_cpu_idle(void) { - if (!need_resched()) #if defined(MACH_ATARI_ONLY) - /* block out HSYNC on the atari (falcon) */ - __asm__("stop #0x2200" : : : "cc"); + /* block out HSYNC on the atari (falcon) */ + __asm__("stop #0x2200" : : : "cc"); #else - __asm__("stop #0x2000" : : : "cc"); + __asm__("stop #0x2000" : : : "cc"); #endif } -void (*idle)(void) = default_idle; - -/* - * The idle thread. There's no useful work to be - * done, so just try to conserve power and have a - * low exit latency (ie sit in a loop waiting for - * somebody to say that they'd like to reschedule) - */ -void cpu_idle(void) -{ - /* endless idle loop with no priority at all */ - while (1) { - rcu_idle_enter(); - while (!need_resched()) - idle(); - rcu_idle_exit(); - schedule_preempt_disabled(); - } -} - void machine_restart(char * __unused) { if (mach_reset) diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index 80cfbe5..e67e531 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c @@ -381,6 +381,12 @@ void __init setup_arch(char **cmdline_p) isa_sex = 1; } #endif +#ifdef CONFIG_ATARI_ROM_ISA + if (MACH_IS_ATARI) { + isa_type = ISA_TYPE_ENEC; + isa_sex = 0; + } +#endif #endif } diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c index 71fb299..911ba47 100644 --- a/arch/m68k/kernel/setup_no.c +++ b/arch/m68k/kernel/setup_no.c @@ -57,6 +57,9 @@ void (*mach_reset)(void); void (*mach_halt)(void); void (*mach_power_off)(void); +#ifdef CONFIG_M68000 +#define CPU_NAME "MC68000" +#endif #ifdef CONFIG_M68328 #define CPU_NAME "MC68328" #endif diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index f32ab22..88fcd8c 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -992,18 +992,6 @@ void show_stack(struct task_struct *task, unsigned long *stack) } /* - * The architecture-independent backtrace generator - */ -void dump_stack(void) -{ - unsigned long stack; - - show_trace(&stack); -} - -EXPORT_SYMBOL(dump_stack); - -/* * The vector number returned in the frame pointer may also contain * the "fs" (Fault Status) bits on ColdFire. These are in the bottom * 2 bits, and upper 2 bits. So we need to mask out the real vector diff --git a/arch/m68k/lib/string.c b/arch/m68k/lib/string.c index b9a57ab..4d61fa8 100644 --- a/arch/m68k/lib/string.c +++ b/arch/m68k/lib/string.c @@ -17,6 +17,6 @@ EXPORT_SYMBOL(strcpy); char *strcat(char *dest, const char *src) { - return __kernel_strcpy(dest + __kernel_strlen(dest), src); + return __kernel_strcpy(dest + strlen(dest), src); } EXPORT_SYMBOL(strcat); diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index afd8106f..1af2ca3 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -110,18 +110,7 @@ void __init paging_init(void) void free_initmem(void) { #ifndef CONFIG_MMU_SUN3 - unsigned long addr; - - addr = (unsigned long) __init_begin; - for (; addr < ((unsigned long) __init_end); addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - pr_notice("Freeing unused kernel memory: %luk freed (0x%x - 0x%x)\n", - (addr - (unsigned long) __init_begin) >> 10, - (unsigned int) __init_begin, (unsigned int) __init_end); + free_initmem_default(0); #endif /* CONFIG_MMU_SUN3 */ } @@ -188,7 +177,7 @@ void __init mem_init(void) } } -#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) +#if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) /* insert pointer tables allocated so far into the tablelist */ init_pointer_table((unsigned long)kernel_pg_dir); for (i = 0; i < PTRS_PER_PGD; i++) { @@ -213,15 +202,6 @@ void __init mem_init(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - int pages = 0; - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - pages++; - } - pr_notice("Freeing initrd memory: %dk freed\n", - pages << (PAGE_SHIFT - 10)); + free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/m68k/platform/coldfire/m528x.c b/arch/m68k/platform/coldfire/m528x.c index 83b7dad..b03a9d2 100644 --- a/arch/m68k/platform/coldfire/m528x.c +++ b/arch/m68k/platform/coldfire/m528x.c @@ -69,7 +69,7 @@ static void __init m528x_uarts_init(void) u8 port; /* make sure PUAPAR is set for UART0 and UART1 */ - port = readb(MCF5282_GPIO_PUAPAR); + port = readb(MCFGPIO_PUAPAR); port |= 0x03 | (0x03 << 2); writeb(port, MCFGPIO_PUAPAR); } diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig index afc8973..b06b418 100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig @@ -25,6 +25,7 @@ config METAG select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select HAVE_MOD_ARCH_SPECIFIC + select HAVE_OPROFILE select HAVE_PERF_EVENTS select HAVE_SYSCALL_TRACEPOINTS select IRQ_DOMAIN @@ -209,6 +210,9 @@ config METAG_PERFCOUNTER_IRQS When disabled, Performance Counters information will be collected based on Timer Interrupt. +config HW_PERF_EVENTS + def_bool METAG_PERFCOUNTER_IRQS && PERF_EVENTS + config METAG_DA bool "DA support" help diff --git a/arch/metag/Makefile b/arch/metag/Makefile index 81bd6a1..b566116 100644 --- a/arch/metag/Makefile +++ b/arch/metag/Makefile @@ -49,6 +49,8 @@ core-y += arch/metag/mm/ libs-y += arch/metag/lib/ libs-y += arch/metag/tbx/ +drivers-$(CONFIG_OPROFILE) += arch/metag/oprofile/ + boot := arch/metag/boot boot_targets += uImage diff --git a/arch/metag/boot/dts/Makefile b/arch/metag/boot/dts/Makefile index e0b5afd..dbd95217 100644 --- a/arch/metag/boot/dts/Makefile +++ b/arch/metag/boot/dts/Makefile @@ -4,13 +4,17 @@ dtb-y += skeleton.dtb builtindtb-y := skeleton ifneq ($(CONFIG_METAG_BUILTIN_DTB_NAME),"") - builtindtb-y := $(CONFIG_METAG_BUILTIN_DTB_NAME) + builtindtb-y := $(patsubst "%",%,$(CONFIG_METAG_BUILTIN_DTB_NAME)) endif -obj-$(CONFIG_METAG_BUILTIN_DTB) += $(patsubst "%",%,$(builtindtb-y)).dtb.o + +dtb-$(CONFIG_METAG_BUILTIN_DTB) += $(builtindtb-y).dtb +obj-$(CONFIG_METAG_BUILTIN_DTB) += $(builtindtb-y).dtb.o targets += dtbs targets += $(dtb-y) +.SECONDARY: $(obj)/$(builtindtb-y).dtb.S + dtbs: $(addprefix $(obj)/, $(dtb-y)) -clean-files += *.dtb +clean-files += *.dtb *.dtb.S diff --git a/arch/metag/configs/meta1_defconfig b/arch/metag/configs/meta1_defconfig index c35a75e..01cd67e 100644 --- a/arch/metag/configs/meta1_defconfig +++ b/arch/metag/configs/meta1_defconfig @@ -1,6 +1,5 @@ # CONFIG_LOCALVERSION_AUTO is not set # CONFIG_SWAP is not set -CONFIG_LOG_BUF_SHIFT=13 CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_KALLSYMS_ALL=y diff --git a/arch/metag/configs/meta2_defconfig b/arch/metag/configs/meta2_defconfig index fb31484..643392b 100644 --- a/arch/metag/configs/meta2_defconfig +++ b/arch/metag/configs/meta2_defconfig @@ -1,7 +1,6 @@ # CONFIG_LOCALVERSION_AUTO is not set # CONFIG_SWAP is not set CONFIG_SYSVIPC=y -CONFIG_LOG_BUF_SHIFT=13 CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_KALLSYMS_ALL=y diff --git a/arch/metag/configs/meta2_smp_defconfig b/arch/metag/configs/meta2_smp_defconfig index 6c7b777..f330673 100644 --- a/arch/metag/configs/meta2_smp_defconfig +++ b/arch/metag/configs/meta2_smp_defconfig @@ -1,7 +1,6 @@ # CONFIG_LOCALVERSION_AUTO is not set # CONFIG_SWAP is not set CONFIG_SYSVIPC=y -CONFIG_LOG_BUF_SHIFT=13 CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_KALLSYMS_ALL=y diff --git a/arch/metag/include/asm/elf.h b/arch/metag/include/asm/elf.h index d63b9d0..d2baf69 100644 --- a/arch/metag/include/asm/elf.h +++ b/arch/metag/include/asm/elf.h @@ -100,9 +100,6 @@ typedef unsigned long elf_fpregset_t; #define ELF_PLATFORM (NULL) -#define SET_PERSONALITY(ex) \ - set_personality(PER_LINUX | (current->personality & (~PER_MASK))) - #define STACK_RND_MASK (0) #ifdef CONFIG_METAG_USER_TCM diff --git a/arch/metag/include/asm/metag_mem.h b/arch/metag/include/asm/metag_mem.h index 3f7b54d..aa5a076 100644 --- a/arch/metag/include/asm/metag_mem.h +++ b/arch/metag/include/asm/metag_mem.h @@ -700,6 +700,9 @@ #define SYSC_xCPARTG_AND_S 8 #define SYSC_xCPARTL_OR_BITS 0x000F0000 /* Ors into top 4 bits */ #define SYSC_xCPARTL_OR_S 16 +#ifdef METAC_2_1 +#define SYSC_DCPART_GCON_BIT 0x00100000 /* Coherent shared local */ +#endif /* METAC_2_1 */ #define SYSC_xCPARTG_OR_BITS 0x0F000000 /* Ors into top 4 bits */ #define SYSC_xCPARTG_OR_S 24 #define SYSC_CWRMODE_BIT 0x80000000 /* Write cache mode bit */ diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h index 0ecd34d..7c4a330 100644 --- a/arch/metag/include/asm/thread_info.h +++ b/arch/metag/include/asm/thread_info.h @@ -150,6 +150,4 @@ static inline int kstack_end(void *addr) #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) -#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) - #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild index 876c71f..84e09fe 100644 --- a/arch/metag/include/uapi/asm/Kbuild +++ b/arch/metag/include/uapi/asm/Kbuild @@ -2,6 +2,7 @@ include include/uapi/asm-generic/Kbuild.asm header-y += byteorder.h +header-y += ech.h header-y += ptrace.h header-y += resource.h header-y += sigcontext.h diff --git a/arch/metag/include/uapi/asm/ech.h b/arch/metag/include/uapi/asm/ech.h new file mode 100644 index 0000000..ac94d1c --- /dev/null +++ b/arch/metag/include/uapi/asm/ech.h @@ -0,0 +1,15 @@ +#ifndef _UAPI_METAG_ECH_H +#define _UAPI_METAG_ECH_H + +/* + * These bits can be set in the top half of the D0.8 register when DSP context + * switching is enabled, in order to support partial DSP context save/restore. + */ + +#define TBICTX_XEXT_BIT 0x1000 /* Enable extended context save */ +#define TBICTX_XTDP_BIT 0x0800 /* DSP accumulators/RAM/templates */ +#define TBICTX_XHL2_BIT 0x0400 /* Hardware loops */ +#define TBICTX_XAXX_BIT 0x0200 /* Extended AX registers (A*.4-7) */ +#define TBICTX_XDX8_BIT 0x0100 /* Extended DX registers (D*.8-15) */ + +#endif /* _UAPI_METAG_ECH_H */ diff --git a/arch/metag/kernel/cachepart.c b/arch/metag/kernel/cachepart.c index 3a589dfb..954548b 100644 --- a/arch/metag/kernel/cachepart.c +++ b/arch/metag/kernel/cachepart.c @@ -24,15 +24,21 @@ unsigned int get_dcache_size(void) { unsigned int config2 = metag_in32(METAC_CORE_CONFIG2); - return 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS) - >> METAC_CORECFG2_DCSZ_S); + unsigned int sz = 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS) + >> METAC_CORECFG2_DCSZ_S); + if (config2 & METAC_CORECFG2_DCSMALL_BIT) + sz >>= 6; + return sz; } unsigned int get_icache_size(void) { unsigned int config2 = metag_in32(METAC_CORE_CONFIG2); - return 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS) - >> METAC_CORE_C2ICSZ_S); + unsigned int sz = 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS) + >> METAC_CORE_C2ICSZ_S); + if (config2 & METAC_CORECFG2_ICSMALL_BIT) + sz >>= 6; + return sz; } unsigned int get_global_dcache_size(void) @@ -61,7 +67,7 @@ static unsigned int get_thread_cache_size(unsigned int cache, int thread_id) return 0; #if PAGE_OFFSET >= LINGLOBAL_BASE /* Checking for global cache */ - cache_size = (cache == DCACHE ? get_global_dache_size() : + cache_size = (cache == DCACHE ? get_global_dcache_size() : get_global_icache_size()); offset = 8; #else diff --git a/arch/metag/kernel/da.c b/arch/metag/kernel/da.c index 52aabb6..a35dbed 100644 --- a/arch/metag/kernel/da.c +++ b/arch/metag/kernel/da.c @@ -5,12 +5,14 @@ */ +#include <linux/export.h> #include <linux/io.h> #include <linux/kernel.h> #include <asm/da.h> #include <asm/metag_mem.h> bool _metag_da_present; +EXPORT_SYMBOL_GPL(_metag_da_present); int __init metag_da_probe(void) { diff --git a/arch/metag/kernel/head.S b/arch/metag/kernel/head.S index 969dffa..713f71d 100644 --- a/arch/metag/kernel/head.S +++ b/arch/metag/kernel/head.S @@ -1,6 +1,7 @@ ! Copyright 2005,2006,2007,2009 Imagination Technologies #include <linux/init.h> +#include <asm/metag_mem.h> #include <generated/asm-offsets.h> #undef __exit @@ -48,6 +49,13 @@ __exit: .global _secondary_startup .type _secondary_startup,function _secondary_startup: +#if CONFIG_PAGE_OFFSET < LINGLOBAL_BASE + ! In case GCOn has just been turned on we need to fence any writes that + ! the boot thread might have performed prior to coherency taking effect. + MOVT D0Re0,#HI(LINSYSEVENT_WR_ATOMIC_UNLOCK) + MOV D1Re0,#0 + SETD [D0Re0], D1Re0 +#endif MOVT A0StP,#HI(_secondary_data_stack) ADD A0StP,A0StP,#LO(_secondary_data_stack) GETD A0StP,[A0StP] diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c index a876d5f..3665694 100644 --- a/arch/metag/kernel/perf/perf_event.c +++ b/arch/metag/kernel/perf/perf_event.c @@ -22,9 +22,9 @@ #include <linux/slab.h> #include <asm/core_reg.h> -#include <asm/hwthread.h> #include <asm/io.h> #include <asm/irq.h> +#include <asm/processor.h> #include "perf_event.h" @@ -40,10 +40,10 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); /* PMU admin */ const char *perf_pmu_name(void) { - if (metag_pmu) - return metag_pmu->pmu.name; + if (!metag_pmu) + return NULL; - return NULL; + return metag_pmu->name; } EXPORT_SYMBOL_GPL(perf_pmu_name); @@ -171,6 +171,7 @@ static int metag_pmu_event_init(struct perf_event *event) switch (event->attr.type) { case PERF_TYPE_HARDWARE: case PERF_TYPE_HW_CACHE: + case PERF_TYPE_RAW: err = _hw_perf_event_init(event); break; @@ -211,9 +212,10 @@ again: /* * Calculate the delta and add it to the counter. */ - delta = new_raw_count - prev_raw_count; + delta = (new_raw_count - prev_raw_count) & MAX_PERIOD; local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); } int metag_pmu_event_set_period(struct perf_event *event, @@ -223,6 +225,10 @@ int metag_pmu_event_set_period(struct perf_event *event, s64 period = hwc->sample_period; int ret = 0; + /* The period may have been changed */ + if (unlikely(period != hwc->last_period)) + left += period - hwc->last_period; + if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); @@ -240,8 +246,10 @@ int metag_pmu_event_set_period(struct perf_event *event, if (left > (s64)metag_pmu->max_period) left = metag_pmu->max_period; - if (metag_pmu->write) - metag_pmu->write(idx, (u64)(-left) & MAX_PERIOD); + if (metag_pmu->write) { + local64_set(&hwc->prev_count, -(s32)left); + metag_pmu->write(idx, -left & MAX_PERIOD); + } perf_event_update_userpage(event); @@ -549,6 +557,10 @@ static int _hw_perf_event_init(struct perf_event *event) if (err) return err; break; + + case PERF_TYPE_RAW: + mapping = attr->config; + break; } /* Return early if the event is unsupported */ @@ -610,15 +622,13 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx) WARN_ONCE((config != 0x100), "invalid configuration (%d) for counter (%d)\n", config, idx); - - /* Reset the cycle count */ - __core_reg_set(TXTACTCYC, 0); + local64_set(&event->prev_count, __core_reg_get(TXTACTCYC)); goto unlock; } /* Check for a core internal or performance channel event. */ if (tmp) { - void *perf_addr = (void *)PERF_COUNT(idx); + void *perf_addr; /* * Anything other than a cycle count will write the low- @@ -632,9 +642,14 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx) case 0xf0: perf_addr = (void *)PERF_CHAN(idx); break; + + default: + perf_addr = NULL; + break; } - metag_out32((tmp & 0x0f), perf_addr); + if (perf_addr) + metag_out32((config & 0x0f), perf_addr); /* * Now we use the high nibble as the performance event to @@ -643,13 +658,21 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx) config = tmp >> 4; } - /* - * Enabled counters start from 0. Early cores clear the count on - * write but newer cores don't, so we make sure that the count is - * set to 0. - */ tmp = ((config & 0xf) << 28) | - ((1 << 24) << cpu_2_hwthread_id[get_cpu()]); + ((1 << 24) << hard_processor_id()); + if (metag_pmu->max_period) + /* + * Cores supporting overflow interrupts may have had the counter + * set to a specific value that needs preserving. + */ + tmp |= metag_in32(PERF_COUNT(idx)) & 0x00ffffff; + else + /* + * Older cores reset the counter on write, so prev_count needs + * resetting too so we can calculate a correct delta. + */ + local64_set(&event->prev_count, 0); + metag_out32(tmp, PERF_COUNT(idx)); unlock: raw_spin_unlock_irqrestore(&events->pmu_lock, flags); @@ -693,9 +716,8 @@ static u64 metag_pmu_read_counter(int idx) { u32 tmp = 0; - /* The act of reading the cycle counter also clears it */ if (METAG_INST_COUNTER == idx) { - __core_reg_swap(TXTACTCYC, tmp); + tmp = __core_reg_get(TXTACTCYC); goto out; } @@ -764,10 +786,16 @@ static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev) /* * Enable the counter again once core overflow processing has - * completed. + * completed. Note the counter value may have been modified while it was + * inactive to set it up ready for the next interrupt. */ - if (!perf_event_overflow(event, &sampledata, regs)) + if (!perf_event_overflow(event, &sampledata, regs)) { + __global_lock2(flags); + counter = (counter & 0xff000000) | + (metag_in32(PERF_COUNT(idx)) & 0x00ffffff); metag_out32(counter, PERF_COUNT(idx)); + __global_unlock2(flags); + } return IRQ_HANDLED; } @@ -830,7 +858,7 @@ static int __init init_hw_perf_events(void) metag_pmu->max_period = 0; } - metag_pmu->name = "Meta 2"; + metag_pmu->name = "meta2"; metag_pmu->version = version; metag_pmu->pmu = pmu; } diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c index c6efe62..483dff9 100644 --- a/arch/metag/kernel/process.c +++ b/arch/metag/kernel/process.c @@ -22,6 +22,7 @@ #include <linux/pm.h> #include <linux/syscalls.h> #include <linux/uaccess.h> +#include <linux/smp.h> #include <asm/core_reg.h> #include <asm/user_gateway.h> #include <asm/tcm.h> @@ -31,7 +32,7 @@ /* * Wait for the next interrupt and enable local interrupts */ -static inline void arch_idle(void) +void arch_cpu_idle(void) { int tmp; @@ -59,36 +60,12 @@ static inline void arch_idle(void) : "r" (get_trigger_mask())); } -void cpu_idle(void) -{ - set_thread_flag(TIF_POLLING_NRFLAG); - - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - - while (!need_resched()) { - /* - * We need to disable interrupts here to ensure we don't - * miss a wakeup call. - */ - local_irq_disable(); - if (!need_resched()) { #ifdef CONFIG_HOTPLUG_CPU - if (cpu_is_offline(smp_processor_id())) - cpu_die(); -#endif - arch_idle(); - } else { - local_irq_enable(); - } - } - - rcu_idle_exit(); - tick_nohz_idle_exit(); - schedule_preempt_disabled(); - } +void arch_cpu_idle_dead(void) +{ + cpu_die(); } +#endif void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); @@ -152,6 +129,8 @@ void show_regs(struct pt_regs *regs) "D1.7 " }; + show_regs_print_info(KERN_INFO); + pr_info(" pt_regs @ %p\n", regs); pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask); pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags, diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c index 47a8828..7563628 100644 --- a/arch/metag/kernel/ptrace.c +++ b/arch/metag/kernel/ptrace.c @@ -288,10 +288,36 @@ static int metag_rp_state_set(struct task_struct *target, return metag_rp_state_copyin(regs, pos, count, kbuf, ubuf); } +static int metag_tls_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + void __user *tls = target->thread.tls_ptr; + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); +} + +static int metag_tls_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + void __user *tls; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); + if (ret) + return ret; + + target->thread.tls_ptr = tls; + return ret; +} + enum metag_regset { REGSET_GENERAL, REGSET_CBUF, REGSET_READPIPE, + REGSET_TLS, }; static const struct user_regset metag_regsets[] = { @@ -319,6 +345,14 @@ static const struct user_regset metag_regsets[] = { .get = metag_rp_state_get, .set = metag_rp_state_set, }, + [REGSET_TLS] = { + .core_note_type = NT_METAG_TLS, + .n = 1, + .size = sizeof(void *), + .align = sizeof(void *), + .get = metag_tls_get, + .set = metag_tls_set, + }, }; static const struct user_regset_view user_metag_view = { diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c index 8792461..4f5726f 100644 --- a/arch/metag/kernel/setup.c +++ b/arch/metag/kernel/setup.c @@ -124,6 +124,7 @@ struct machine_desc *machine_desc __initdata; u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_HWTHREAD_ID }; +EXPORT_SYMBOL_GPL(cpu_2_hwthread_id); /* * Map a hardware thread ID to a Linux CPU number diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c index 4b6d1f14..f443ec9 100644 --- a/arch/metag/kernel/smp.c +++ b/arch/metag/kernel/smp.c @@ -28,6 +28,8 @@ #include <asm/cachepart.h> #include <asm/core_reg.h> #include <asm/cpu.h> +#include <asm/global_lock.h> +#include <asm/metag_mem.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -37,6 +39,9 @@ #include <asm/hwthread.h> #include <asm/traps.h> +#define SYSC_DCPART(n) (SYSC_DCPART0 + SYSC_xCPARTn_STRIDE * (n)) +#define SYSC_ICPART(n) (SYSC_ICPART0 + SYSC_xCPARTn_STRIDE * (n)) + DECLARE_PER_CPU(PTBI, pTBI); void *secondary_data_stack; @@ -99,6 +104,114 @@ int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle) return 0; } +/** + * describe_cachepart_change: describe a change to cache partitions. + * @thread: Hardware thread number. + * @label: Label of cache type, e.g. "dcache" or "icache". + * @sz: Total size of the cache. + * @old: Old cache partition configuration (*CPART* register). + * @new: New cache partition configuration (*CPART* register). + * + * If the cache partition has changed, prints a message to the log describing + * those changes. + */ +static __cpuinit void describe_cachepart_change(unsigned int thread, + const char *label, + unsigned int sz, + unsigned int old, + unsigned int new) +{ + unsigned int lor1, land1, gor1, gand1; + unsigned int lor2, land2, gor2, gand2; + unsigned int diff = old ^ new; + + if (!diff) + return; + + pr_info("Thread %d: %s partition changed:", thread, label); + if (diff & (SYSC_xCPARTL_OR_BITS | SYSC_xCPARTL_AND_BITS)) { + lor1 = (old & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S; + lor2 = (new & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S; + land1 = (old & SYSC_xCPARTL_AND_BITS) >> SYSC_xCPARTL_AND_S; + land2 = (new & SYSC_xCPARTL_AND_BITS) >> SYSC_xCPARTL_AND_S; + pr_cont(" L:%#x+%#x->%#x+%#x", + (lor1 * sz) >> 4, + ((land1 + 1) * sz) >> 4, + (lor2 * sz) >> 4, + ((land2 + 1) * sz) >> 4); + } + if (diff & (SYSC_xCPARTG_OR_BITS | SYSC_xCPARTG_AND_BITS)) { + gor1 = (old & SYSC_xCPARTG_OR_BITS) >> SYSC_xCPARTG_OR_S; + gor2 = (new & SYSC_xCPARTG_OR_BITS) >> SYSC_xCPARTG_OR_S; + gand1 = (old & SYSC_xCPARTG_AND_BITS) >> SYSC_xCPARTG_AND_S; + gand2 = (new & SYSC_xCPARTG_AND_BITS) >> SYSC_xCPARTG_AND_S; + pr_cont(" G:%#x+%#x->%#x+%#x", + (gor1 * sz) >> 4, + ((gand1 + 1) * sz) >> 4, + (gor2 * sz) >> 4, + ((gand2 + 1) * sz) >> 4); + } + if (diff & SYSC_CWRMODE_BIT) + pr_cont(" %sWR", + (new & SYSC_CWRMODE_BIT) ? "+" : "-"); + if (diff & SYSC_DCPART_GCON_BIT) + pr_cont(" %sGCOn", + (new & SYSC_DCPART_GCON_BIT) ? "+" : "-"); + pr_cont("\n"); +} + +/** + * setup_smp_cache: ensure cache coherency for new SMP thread. + * @thread: New hardware thread number. + * + * Ensures that coherency is enabled and that the threads share the same cache + * partitions. + */ +static __cpuinit void setup_smp_cache(unsigned int thread) +{ + unsigned int this_thread, lflags; + unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new; + unsigned int icsz, icpart_old, icpart_new; + + /* + * Copy over the current thread's cache partition configuration to the + * new thread so that they share cache partitions. + */ + __global_lock2(lflags); + this_thread = hard_processor_id(); + /* Share dcache partition */ + dcpart_this = metag_in32(SYSC_DCPART(this_thread)); + dcpart_old = metag_in32(SYSC_DCPART(thread)); + dcpart_new = dcpart_this; +#if PAGE_OFFSET < LINGLOBAL_BASE + /* + * For the local data cache to be coherent the threads must also have + * GCOn enabled. + */ + dcpart_new |= SYSC_DCPART_GCON_BIT; + metag_out32(dcpart_new, SYSC_DCPART(this_thread)); +#endif + metag_out32(dcpart_new, SYSC_DCPART(thread)); + /* Share icache partition too */ + icpart_new = metag_in32(SYSC_ICPART(this_thread)); + icpart_old = metag_in32(SYSC_ICPART(thread)); + metag_out32(icpart_new, SYSC_ICPART(thread)); + __global_unlock2(lflags); + + /* + * Log if the cache partitions were altered so the user is aware of any + * potential unintentional cache wastage. + */ + dcsz = get_dcache_size(); + icsz = get_dcache_size(); + describe_cachepart_change(this_thread, "dcache", dcsz, + dcpart_this, dcpart_new); + describe_cachepart_change(thread, "dcache", dcsz, + dcpart_old, dcpart_new); + describe_cachepart_change(thread, "icache", icsz, + icpart_old, icpart_new); +} + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) { unsigned int thread = cpu_2_hwthread_id[cpu]; @@ -108,6 +221,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) flush_tlb_all(); + setup_smp_cache(thread); + /* * Tell the secondary CPU where to find its idle thread's stack. */ @@ -297,7 +412,7 @@ asmlinkage void secondary_start_kernel(void) /* * OK, it's off to the idle thread for us */ - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } void __init smp_cpus_done(unsigned int max_cpus) diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c index 8961f24..2ceeaae 100644 --- a/arch/metag/kernel/traps.c +++ b/arch/metag/kernel/traps.c @@ -987,9 +987,3 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) show_trace(tsk, sp, NULL); } - -void dump_stack(void) -{ - show_stack(NULL, NULL); -} -EXPORT_SYMBOL(dump_stack); diff --git a/arch/metag/mm/Kconfig b/arch/metag/mm/Kconfig index cd7f2f2..794f26a 100644 --- a/arch/metag/mm/Kconfig +++ b/arch/metag/mm/Kconfig @@ -40,6 +40,7 @@ endchoice config NUMA bool "Non Uniform Memory Access (NUMA) Support" + select ARCH_WANT_NUMA_VARIABLE_LOCALITY help Some Meta systems have MMU-mappable on-chip memories with lower latencies than main memory. This enables support for @@ -97,9 +98,6 @@ config MAX_ACTIVE_REGIONS default "2" if SPARSEMEM default "1" -config ARCH_POPULATES_NODE_MAP - def_bool y - config ARCH_SELECT_MEMORY_MODEL def_bool y diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c index 504a398..d05b845 100644 --- a/arch/metag/mm/init.c +++ b/arch/metag/mm/init.c @@ -380,14 +380,8 @@ void __init mem_init(void) #ifdef CONFIG_HIGHMEM unsigned long tmp; - for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { - struct page *page = pfn_to_page(tmp); - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalhigh_pages++; - } - totalram_pages += totalhigh_pages; + for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) + free_highmem_page(pfn_to_page(tmp)); num_physpages += totalhigh_pages; #endif /* CONFIG_HIGHMEM */ @@ -412,32 +406,15 @@ void __init mem_init(void) return; } -static void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr; - - for (addr = begin; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); - free_page(addr); - totalram_pages++; - } - pr_info("Freeing %s: %luk freed\n", what, (end - begin) >> 10); -} - void free_initmem(void) { - free_init_pages("unused kernel memory", - (unsigned long)(&__init_begin), - (unsigned long)(&__init_end)); + free_initmem_default(POISON_FREE_INITMEM); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - end = end & PAGE_MASK; - free_init_pages("initrd memory", start, end); + free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); } #endif diff --git a/arch/metag/oprofile/Makefile b/arch/metag/oprofile/Makefile new file mode 100644 index 0000000..c9639d4 --- /dev/null +++ b/arch/metag/oprofile/Makefile @@ -0,0 +1,17 @@ +obj-$(CONFIG_OPROFILE) += oprofile.o + +oprofile-core-y += buffer_sync.o +oprofile-core-y += cpu_buffer.o +oprofile-core-y += event_buffer.o +oprofile-core-y += oprof.o +oprofile-core-y += oprofile_files.o +oprofile-core-y += oprofile_stats.o +oprofile-core-y += oprofilefs.o +oprofile-core-y += timer_int.o +oprofile-core-$(CONFIG_HW_PERF_EVENTS) += oprofile_perf.o + +oprofile-y += backtrace.o +oprofile-y += common.o +oprofile-y += $(addprefix ../../../drivers/oprofile/,$(oprofile-core-y)) + +ccflags-y += -Werror diff --git a/arch/metag/oprofile/backtrace.c b/arch/metag/oprofile/backtrace.c new file mode 100644 index 0000000..7cc3f37 --- /dev/null +++ b/arch/metag/oprofile/backtrace.c @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2010-2013 Imagination Technologies Ltd. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/oprofile.h> +#include <linux/uaccess.h> +#include <asm/processor.h> +#include <asm/stacktrace.h> + +#include "backtrace.h" + +static void user_backtrace_fp(unsigned long __user *fp, unsigned int depth) +{ + while (depth-- && access_ok(VERIFY_READ, fp, 8)) { + unsigned long addr; + unsigned long __user *fpnew; + if (__copy_from_user_inatomic(&addr, fp + 1, sizeof(addr))) + break; + addr -= 4; + + oprofile_add_trace(addr); + + /* stack grows up, so frame pointers must decrease */ + if (__copy_from_user_inatomic(&fpnew, fp + 0, sizeof(fpnew))) + break; + if (fpnew >= fp) + break; + fp = fpnew; + } +} + +static int kernel_backtrace_frame(struct stackframe *frame, void *data) +{ + unsigned int *depth = data; + + oprofile_add_trace(frame->pc); + + /* decrement depth and stop if we reach 0 */ + if ((*depth)-- == 0) + return 1; + + /* otherwise onto the next frame */ + return 0; +} + +void metag_backtrace(struct pt_regs * const regs, unsigned int depth) +{ + if (user_mode(regs)) { + unsigned long *fp = (unsigned long *)regs->ctx.AX[1].U0; + user_backtrace_fp((unsigned long __user __force *)fp, depth); + } else { + struct stackframe frame; + frame.fp = regs->ctx.AX[1].U0; /* A0FrP */ + frame.sp = user_stack_pointer(regs); /* A0StP */ + frame.lr = 0; /* from stack */ + frame.pc = regs->ctx.CurrPC; /* PC */ + walk_stackframe(&frame, &kernel_backtrace_frame, &depth); + } +} diff --git a/arch/metag/oprofile/backtrace.h b/arch/metag/oprofile/backtrace.h new file mode 100644 index 0000000..c0fcc42 --- /dev/null +++ b/arch/metag/oprofile/backtrace.h @@ -0,0 +1,6 @@ +#ifndef _METAG_OPROFILE_BACKTRACE_H +#define _METAG_OPROFILE_BACKTRACE_H + +void metag_backtrace(struct pt_regs * const regs, unsigned int depth); + +#endif diff --git a/arch/metag/oprofile/common.c b/arch/metag/oprofile/common.c new file mode 100644 index 0000000..ba26152 --- /dev/null +++ b/arch/metag/oprofile/common.c @@ -0,0 +1,66 @@ +/* + * arch/metag/oprofile/common.c + * + * Copyright (C) 2013 Imagination Technologies Ltd. + * + * Based on arch/sh/oprofile/common.c: + * + * Copyright (C) 2003 - 2010 Paul Mundt + * + * Based on arch/mips/oprofile/common.c: + * + * Copyright (C) 2004, 2005 Ralf Baechle + * Copyright (C) 2005 MIPS Technologies, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/oprofile.h> +#include <linux/perf_event.h> +#include <linux/slab.h> + +#include "backtrace.h" + +#ifdef CONFIG_HW_PERF_EVENTS +/* + * This will need to be reworked when multiple PMUs are supported. + */ +static char *metag_pmu_op_name; + +char *op_name_from_perf_id(void) +{ + return metag_pmu_op_name; +} + +int __init oprofile_arch_init(struct oprofile_operations *ops) +{ + ops->backtrace = metag_backtrace; + + if (perf_num_counters() == 0) + return -ENODEV; + + metag_pmu_op_name = kasprintf(GFP_KERNEL, "metag/%s", + perf_pmu_name()); + if (unlikely(!metag_pmu_op_name)) + return -ENOMEM; + + return oprofile_perf_init(ops); +} + +void oprofile_arch_exit(void) +{ + oprofile_perf_exit(); + kfree(metag_pmu_op_name); +} +#else +int __init oprofile_arch_init(struct oprofile_operations *ops) +{ + ops->backtrace = metag_backtrace; + /* fall back to timer interrupt PC sampling */ + return -ENODEV; +} +void oprofile_arch_exit(void) {} +#endif /* CONFIG_HW_PERF_EVENTS */ diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 7843d11..54237af 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -19,13 +19,14 @@ config MICROBLAZE select HAVE_DEBUG_KMEMLEAK select IRQ_DOMAIN select HAVE_GENERIC_HARDIRQS - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_CPU_DEVICES select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS + select GENERIC_IDLE_POLL_SETUP select MODULES_USE_ELF_RELA select CLONE_BACKWARDS @@ -38,9 +39,6 @@ config RWSEM_GENERIC_SPINLOCK config ZONE_DMA def_bool y -config ARCH_POPULATES_NODE_MAP - def_bool y - config RWSEM_XCHGADD_ALGORITHM bool diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h index 0759153..d6e0ffe 100644 --- a/arch/microblaze/include/asm/processor.h +++ b/arch/microblaze/include/asm/processor.h @@ -22,7 +22,6 @@ extern const struct seq_operations cpuinfo_op; # define cpu_relax() barrier() -# define cpu_sleep() do {} while (0) #define task_pt_regs(tsk) \ (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1) @@ -160,10 +159,6 @@ unsigned long get_wchan(struct task_struct *p); # define STACK_TOP TASK_SIZE # define STACK_TOP_MAX STACK_TOP -void disable_hlt(void); -void enable_hlt(void); -void default_idle(void); - #ifdef CONFIG_DEBUG_FS extern struct dentry *of_debugfs_root; #endif diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h index 0e0b0a5..f05df56 100644 --- a/arch/microblaze/include/asm/setup.h +++ b/arch/microblaze/include/asm/setup.h @@ -46,7 +46,6 @@ void machine_shutdown(void); void machine_halt(void); void machine_power_off(void); -void free_init_pages(char *what, unsigned long begin, unsigned long end); extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index 008f304..de26ea6 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h @@ -182,7 +182,6 @@ static inline bool test_and_clear_restore_sigmask(void) ti->status &= ~TS_RESTORE_SIGMASK; return true; } -#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) #endif #endif /* __KERNEL__ */ diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c index 60dcacc..365f2d5 100644 --- a/arch/microblaze/kernel/early_printk.c +++ b/arch/microblaze/kernel/early_printk.c @@ -21,7 +21,6 @@ #include <asm/setup.h> #include <asm/prom.h> -static u32 early_console_initialized; static u32 base_addr; #ifdef CONFIG_SERIAL_UARTLITE_CONSOLE @@ -109,27 +108,11 @@ static struct console early_serial_uart16550_console = { }; #endif /* CONFIG_SERIAL_8250_CONSOLE */ -static struct console *early_console; - -void early_printk(const char *fmt, ...) -{ - char buf[512]; - int n; - va_list ap; - - if (early_console_initialized) { - va_start(ap, fmt); - n = vscnprintf(buf, 512, fmt, ap); - early_console->write(early_console, buf, n); - va_end(ap); - } -} - int __init setup_early_printk(char *opt) { int version = 0; - if (early_console_initialized) + if (early_console) return 1; base_addr = of_early_console(&version); @@ -159,7 +142,6 @@ int __init setup_early_printk(char *opt) } register_console(early_console); - early_console_initialized = 1; return 0; } return 1; @@ -169,7 +151,7 @@ int __init setup_early_printk(char *opt) * only for early console because of performance degression */ void __init remap_early_printk(void) { - if (!early_console_initialized || !early_console) + if (!early_console) return; pr_info("early_printk_console remapping from 0x%x to ", base_addr); base_addr = (u32) ioremap(base_addr, PAGE_SIZE); @@ -194,9 +176,9 @@ void __init remap_early_printk(void) void __init disable_early_printk(void) { - if (!early_console_initialized || !early_console) + if (!early_console) return; pr_warn("disabling early console\n"); unregister_console(early_console); - early_console_initialized = 0; + early_console = NULL; } diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index fa0ea60..a558938 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -20,6 +20,8 @@ void show_regs(struct pt_regs *regs) { + show_regs_print_info(KERN_INFO); + pr_info(" Registers dump: mode=%X\r\n", regs->pt_mode); pr_info(" r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n", regs->r1, regs->r2, regs->r3, regs->r4); @@ -44,71 +46,6 @@ void show_regs(struct pt_regs *regs) void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); -static int hlt_counter = 1; - -void disable_hlt(void) -{ - hlt_counter++; -} -EXPORT_SYMBOL(disable_hlt); - -void enable_hlt(void) -{ - hlt_counter--; -} -EXPORT_SYMBOL(enable_hlt); - -static int __init nohlt_setup(char *__unused) -{ - hlt_counter = 1; - return 1; -} -__setup("nohlt", nohlt_setup); - -static int __init hlt_setup(char *__unused) -{ - hlt_counter = 0; - return 1; -} -__setup("hlt", hlt_setup); - -void default_idle(void) -{ - if (likely(hlt_counter)) { - local_irq_disable(); - stop_critical_timings(); - cpu_relax(); - start_critical_timings(); - local_irq_enable(); - } else { - clear_thread_flag(TIF_POLLING_NRFLAG); - smp_mb__after_clear_bit(); - local_irq_disable(); - while (!need_resched()) - cpu_sleep(); - local_irq_enable(); - set_thread_flag(TIF_POLLING_NRFLAG); - } -} - -void cpu_idle(void) -{ - set_thread_flag(TIF_POLLING_NRFLAG); - - /* endless idle loop with no priority at all */ - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - while (!need_resched()) - default_idle(); - rcu_idle_exit(); - tick_nohz_idle_exit(); - - schedule_preempt_disabled(); - check_pgt_cache(); - } -} - void flush_thread(void) { } diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c index 30e6b50..cb61953 100644 --- a/arch/microblaze/kernel/traps.c +++ b/arch/microblaze/kernel/traps.c @@ -75,9 +75,3 @@ void show_stack(struct task_struct *task, unsigned long *sp) debug_show_held_locks(task); } - -void dump_stack(void) -{ - show_stack(NULL, NULL); -} -EXPORT_SYMBOL(dump_stack); diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 8f8b367..4ec137d 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -82,13 +82,9 @@ static unsigned long highmem_setup(void) /* FIXME not sure about */ if (memblock_is_reserved(pfn << PAGE_SHIFT)) continue; - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalhigh_pages++; + free_highmem_page(page); reservedpages++; } - totalram_pages += totalhigh_pages; pr_info("High memory: %luk\n", totalhigh_pages << (PAGE_SHIFT-10)); @@ -236,40 +232,16 @@ void __init setup_memory(void) paging_init(); } -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr; - - for (addr = begin; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10); -} - #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - int pages = 0; - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - pages++; - } - pr_notice("Freeing initrd memory: %dk freed\n", - (int)(pages * (PAGE_SIZE / 1024))); + free_reserved_area(start, end, 0, "initrd"); } #endif void free_initmem(void) { - free_init_pages("unused kernel memory", - (unsigned long)(&__init_begin), - (unsigned long)(&__init_end)); + free_initmem_default(0); } void __init mem_init(void) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 32eb3d6..e5f3794 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -18,7 +18,7 @@ config MIPS select HAVE_KRETPROBES select HAVE_DEBUG_KMEMLEAK select ARCH_BINFMT_ELF_RANDOMIZE_PIE - select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT select RTC_LIB if !MACH_LOONGSON select GENERIC_ATOMIC64 if !64BIT select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE @@ -38,7 +38,7 @@ config MIPS select GENERIC_CLOCKEVENTS select GENERIC_CMOS_UPDATE select HAVE_MOD_ARCH_SPECIFIC - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select MODULES_USE_ELF_REL if MODULES select MODULES_USE_ELF_RELA if MODULES && 64BIT select CLONE_BACKWARDS @@ -404,6 +404,8 @@ config PMC_MSP select IRQ_CPU select SERIAL_8250 select SERIAL_8250_CONSOLE + select USB_EHCI_BIG_ENDIAN_MMIO + select USB_EHCI_BIG_ENDIAN_DESC help This adds support for the PMC-Sierra family of Multi-Service Processor System-On-A-Chips. These parts include a number @@ -657,7 +659,7 @@ config SNI_RM bool "SNI RM200/300/400" select FW_ARC if CPU_LITTLE_ENDIAN select FW_ARC32 if CPU_LITTLE_ENDIAN - select SNIPROM if CPU_BIG_ENDIAN + select FW_SNIPROM if CPU_BIG_ENDIAN select ARCH_MAY_HAVE_PC_FDC select BOOT_ELF32 select CEVT_R4K @@ -1144,7 +1146,7 @@ config DEFAULT_SGI_PARTITION config FW_ARC32 bool -config SNIPROM +config FW_SNIPROM bool config BOOT_ELF32 @@ -1433,6 +1435,7 @@ config CPU_CAVIUM_OCTEON select CPU_SUPPORTS_HUGEPAGES select LIBFDT select USE_OF + select USB_EHCI_BIG_ENDIAN_MMIO help The Cavium Octeon processor is a highly integrated chip containing many ethernet hardware widgets for networking tasks. The processor @@ -1493,7 +1496,6 @@ config CPU_XLP select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM - select CPU_HAS_LLSC select WEAK_ORDERING select WEAK_REORDERING_BEYOND_LLSC select CPU_HAS_PREFETCH @@ -2538,7 +2540,14 @@ source "kernel/power/Kconfig" endmenu -source "arch/mips/kernel/cpufreq/Kconfig" +config MIPS_EXTERNAL_TIMER + bool + +if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER +menu "CPU Power Management" +source "drivers/cpufreq/Kconfig" +endmenu +endif source "net/Kconfig" diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index ed1949c..9aa7d44 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -745,10 +745,7 @@ void __init board_prom_init(void) strcpy(cfe_version, "unknown"); printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); - if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) { - printk(KERN_ERR PFX "invalid nvram checksum\n"); - return; - } + bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET); board_name = bcm63xx_nvram_get_name(); /* find board by name */ diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c index f1c9c3e..e97fd60 100644 --- a/arch/mips/bcm63xx/dev-spi.c +++ b/arch/mips/bcm63xx/dev-spi.c @@ -85,20 +85,9 @@ static struct platform_device bcm63xx_spi_device = { int __init bcm63xx_spi_register(void) { - struct clk *periph_clk; - if (BCMCPU_IS_6328() || BCMCPU_IS_6345()) return -ENODEV; - periph_clk = clk_get(NULL, "periph"); - if (IS_ERR(periph_clk)) { - pr_err("unable to get periph clock\n"); - return -ENODEV; - } - - /* Set bus frequency */ - spi_pdata.speed_hz = clk_get_rate(periph_clk); - spi_resources[0].start = bcm63xx_regset_address(RSET_SPI); spi_resources[0].end = spi_resources[0].start; spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI); diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c index 6206116..a4b8864 100644 --- a/arch/mips/bcm63xx/nvram.c +++ b/arch/mips/bcm63xx/nvram.c @@ -38,7 +38,7 @@ struct bcm963xx_nvram { static struct bcm963xx_nvram nvram; static int mac_addr_used; -int __init bcm63xx_nvram_init(void *addr) +void __init bcm63xx_nvram_init(void *addr) { unsigned int check_len; u32 crc, expected_crc; @@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr) crc = crc32_le(~0, (u8 *)&nvram, check_len); if (crc != expected_crc) - return -EINVAL; - - return 0; + pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n", + expected_crc, crc); } u8 *bcm63xx_nvram_get_name(void) diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index 314231b..35e18e9 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c @@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void) return board_register_devices(); } -device_initcall(bcm63xx_register_devices); +arch_initcall(bcm63xx_register_devices); diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index c594a3d..b0baa29 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -174,7 +174,10 @@ static int octeon_kexec_prepare(struct kimage *image) static void octeon_generic_shutdown(void) { - int cpu, i; + int i; +#ifdef CONFIG_SMP + int cpu; +#endif struct cvmx_bootmem_desc *bootmem_desc; void *named_block_array_ptr; diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index ef99db9..fe0d15d 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h @@ -10,6 +10,7 @@ #define __ASM_HUGETLB_H #include <asm/page.h> +#include <asm-generic/hugetlb.h> static inline int is_hugepage_only_range(struct mm_struct *mm, diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h index c9bae13..b0184cf 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h @@ -13,7 +13,6 @@ struct bcm63xx_spi_pdata { unsigned int msg_ctl_width; int bus_num; int num_chipselect; - u32 speed_hz; }; enum bcm63xx_regs_spi { diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h index 62d6a3b..4e0b6bc 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h @@ -9,10 +9,8 @@ * * Initialized the local nvram copy from the target address and checks * its checksum. - * - * Returns 0 on success. */ -int __init bcm63xx_nvram_init(void *nvram); +void bcm63xx_nvram_init(void *nvram); /** * bcm63xx_nvram_get_name() - returns the board name according to nvram diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h index d9c8284..193c091 100644 --- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h @@ -28,11 +28,7 @@ /* #define cpu_has_prefetch ? */ #define cpu_has_mcheck 1 /* #define cpu_has_ejtag ? */ -#ifdef CONFIG_CPU_HAS_LLSC #define cpu_has_llsc 1 -#else -#define cpu_has_llsc 0 -#endif /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 12b70c2..0da44d4 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -1166,7 +1166,10 @@ do { \ unsigned int __dspctl; \ \ __asm__ __volatile__( \ + " .set push \n" \ + " .set dsp \n" \ " rddsp %0, %x1 \n" \ + " .set pop \n" \ : "=r" (__dspctl) \ : "i" (mask)); \ __dspctl; \ @@ -1175,30 +1178,198 @@ do { \ #define wrdsp(val, mask) \ do { \ __asm__ __volatile__( \ + " .set push \n" \ + " .set dsp \n" \ " wrdsp %0, %x1 \n" \ + " .set pop \n" \ : \ : "r" (val), "i" (mask)); \ } while (0) -#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;}) -#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;}) -#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;}) -#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;}) - -#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;}) -#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;}) -#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;}) -#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;}) - -#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x)) -#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x)) -#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x)) -#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x)) - -#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x)) -#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x)) -#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x)) -#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x)) +#define mflo0() \ +({ \ + long mflo0; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac0 \n" \ + " .set pop \n" \ + : "=r" (mflo0)); \ + mflo0; \ +}) + +#define mflo1() \ +({ \ + long mflo1; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac1 \n" \ + " .set pop \n" \ + : "=r" (mflo1)); \ + mflo1; \ +}) + +#define mflo2() \ +({ \ + long mflo2; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac2 \n" \ + " .set pop \n" \ + : "=r" (mflo2)); \ + mflo2; \ +}) + +#define mflo3() \ +({ \ + long mflo3; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac3 \n" \ + " .set pop \n" \ + : "=r" (mflo3)); \ + mflo3; \ +}) + +#define mfhi0() \ +({ \ + long mfhi0; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac0 \n" \ + " .set pop \n" \ + : "=r" (mfhi0)); \ + mfhi0; \ +}) + +#define mfhi1() \ +({ \ + long mfhi1; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac1 \n" \ + " .set pop \n" \ + : "=r" (mfhi1)); \ + mfhi1; \ +}) + +#define mfhi2() \ +({ \ + long mfhi2; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac2 \n" \ + " .set pop \n" \ + : "=r" (mfhi2)); \ + mfhi2; \ +}) + +#define mfhi3() \ +({ \ + long mfhi3; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac3 \n" \ + " .set pop \n" \ + : "=r" (mfhi3)); \ + mfhi3; \ +}) + + +#define mtlo0(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac0 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo1(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac1 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo2(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac2 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo3(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac3 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi0(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac0 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi1(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac1 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi2(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac2 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi3(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac3 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) #else diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 99fc547..eab99e5 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -31,7 +31,7 @@ #define PAGE_SHIFT 16 #endif #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE - 1)) +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h index 197f636..8efe5a9 100644 --- a/arch/mips/include/asm/signal.h +++ b/arch/mips/include/asm/signal.h @@ -21,6 +21,6 @@ #include <asm/sigcontext.h> #include <asm/siginfo.h> -#define __ARCH_HAS_ODD_SIGACTION +#define __ARCH_HAS_IRIX_SIGACTION #endif /* _ASM_SIGNAL_H */ diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h index d6b18b4..addb9f5 100644 --- a/arch/mips/include/uapi/asm/signal.h +++ b/arch/mips/include/uapi/asm/signal.h @@ -72,6 +72,12 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ * * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single * Unix names RESETHAND and NODEFER respectively. + * + * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever + * supported its use and no libc was using it, so the entire sa-restorer + * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48 + * retaining only the SA_RESTORER definition as a reminder to avoid + * accidental reuse of the mask bit. */ #define SA_ONSTACK 0x08000000 #define SA_RESETHAND 0x80000000 @@ -84,8 +90,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_RESTORER 0x04000000 /* Only for o32 */ - #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index f81d98f..520a908 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -92,37 +92,22 @@ CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/n obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o -obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/ - obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o # -# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe -# to enable DSP assembler support here even if the MIPS Release 2 CPU we -# are targetting does not support DSP because all code-paths making use of -# it properly check that the running CPU *actually does* support these -# instructions. +# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not +# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches +# here because the compiler may use DSP ASE instructions (such as lwx) in +# code paths where we cannot check that the CPU we are running on supports it. +# Proper abstraction using HAVE_AS_DSP and macros is done in +# arch/mips/include/asm/mipsregs.h. # ifeq ($(CONFIG_CPU_MIPSR2), y) CFLAGS_DSP = -DHAVE_AS_DSP -# -# Check if assembler supports DSP ASE -# -ifeq ($(call cc-option-yn,-mdsp), y) -CFLAGS_DSP += -mdsp -endif - -# -# Check if assembler supports DSP ASE Rev2 -# -ifeq ($(call cc-option-yn,-mdspr2), y) -CFLAGS_DSP += -mdspr2 -endif - CFLAGS_signal.o = $(CFLAGS_DSP) CFLAGS_signal32.o = $(CFLAGS_DSP) CFLAGS_process.o = $(CFLAGS_DSP) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 6bfccc2..5fe66a0 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->tlbsize = 48; break; case PRID_IMP_VR41XX: + set_isa(c, MIPS_CPU_ISA_III); + c->options = R4K_OPTS; + c->tlbsize = 32; switch (c->processor_id & 0xf0) { case PRID_REV_VR4111: c->cputype = CPU_VR4111; @@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "NEC VR4131"; } else { c->cputype = CPU_VR4133; + c->options |= MIPS_CPU_LLSC; __cpu_name[cpu] = "NEC VR4133"; } break; @@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "NEC Vr41xx"; break; } - set_isa(c, MIPS_CPU_ISA_III); - c->options = R4K_OPTS; - c->tlbsize = 32; break; case PRID_IMP_R4300: c->cputype = CPU_R4300; @@ -1226,10 +1227,8 @@ __cpuinit void cpu_probe(void) if (c->options & MIPS_CPU_FPU) { c->fpu_id = cpu_get_fpu_id(); - if (c->isa_level == MIPS_CPU_ISA_M32R1 || - c->isa_level == MIPS_CPU_ISA_M32R2 || - c->isa_level == MIPS_CPU_ISA_M64R1 || - c->isa_level == MIPS_CPU_ISA_M64R2) { + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { if (c->fpu_id & MIPS_FPIR_3D) c->ases |= MIPS_ASE_MIPS3D; } diff --git a/arch/mips/kernel/cpufreq/Kconfig b/arch/mips/kernel/cpufreq/Kconfig deleted file mode 100644 index 58c601e..0000000 --- a/arch/mips/kernel/cpufreq/Kconfig +++ /dev/null @@ -1,41 +0,0 @@ -# -# CPU Frequency scaling -# - -config MIPS_EXTERNAL_TIMER - bool - -config MIPS_CPUFREQ - bool - default y - depends on CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER - -if MIPS_CPUFREQ - -menu "CPU Frequency scaling" - -source "drivers/cpufreq/Kconfig" - -if CPU_FREQ - -comment "CPUFreq processor drivers" - -config LOONGSON2_CPUFREQ - tristate "Loongson2 CPUFreq Driver" - select CPU_FREQ_TABLE - depends on MIPS_CPUFREQ - help - This option adds a CPUFreq driver for loongson processors which - support software configurable cpu frequency. - - Loongson2F and it's successors support this feature. - - For details, take a look at <file:Documentation/cpu-freq/>. - - If in doubt, say N. - -endif # CPU_FREQ - -endmenu - -endif # MIPS_CPUFREQ diff --git a/arch/mips/kernel/cpufreq/Makefile b/arch/mips/kernel/cpufreq/Makefile deleted file mode 100644 index 05a5715..0000000 --- a/arch/mips/kernel/cpufreq/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the Linux/MIPS cpufreq. -# - -obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c deleted file mode 100644 index 3237c52..0000000 --- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Cpufreq driver for the loongson-2 processors - * - * The 2E revision of loongson processor not support this feature. - * - * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology - * Author: Yanhua, yanh@lemote.com - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include <linux/cpufreq.h> -#include <linux/module.h> -#include <linux/err.h> -#include <linux/sched.h> /* set_cpus_allowed() */ -#include <linux/delay.h> -#include <linux/platform_device.h> - -#include <asm/clock.h> - -#include <asm/mach-loongson/loongson.h> - -static uint nowait; - -static struct clk *cpuclk; - -static void (*saved_cpu_wait) (void); - -static int loongson2_cpu_freq_notifier(struct notifier_block *nb, - unsigned long val, void *data); - -static struct notifier_block loongson2_cpufreq_notifier_block = { - .notifier_call = loongson2_cpu_freq_notifier -}; - -static int loongson2_cpu_freq_notifier(struct notifier_block *nb, - unsigned long val, void *data) -{ - if (val == CPUFREQ_POSTCHANGE) - current_cpu_data.udelay_val = loops_per_jiffy; - - return 0; -} - -static unsigned int loongson2_cpufreq_get(unsigned int cpu) -{ - return clk_get_rate(cpuclk); -} - -/* - * Here we notify other drivers of the proposed change and the final change. - */ -static int loongson2_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int cpu = policy->cpu; - unsigned int newstate = 0; - cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; - unsigned int freq; - - if (!cpu_online(cpu)) - return -ENODEV; - - cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - if (cpufreq_frequency_table_target - (policy, &loongson2_clockmod_table[0], target_freq, relation, - &newstate)) - return -EINVAL; - - freq = - ((cpu_clock_freq / 1000) * - loongson2_clockmod_table[newstate].index) / 8; - if (freq < policy->min || freq > policy->max) - return -EINVAL; - - pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); - - freqs.cpu = cpu; - freqs.old = loongson2_cpufreq_get(cpu); - freqs.new = freq; - freqs.flags = 0; - - if (freqs.new == freqs.old) - return 0; - - /* notifiers */ - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - set_cpus_allowed_ptr(current, &cpus_allowed); - - /* setting the cpu frequency */ - clk_set_rate(cpuclk, freq); - - /* notifiers */ - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - pr_debug("cpufreq: set frequency %u kHz\n", freq); - - return 0; -} - -static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - int i; - unsigned long rate; - int ret; - - if (!cpu_online(policy->cpu)) - return -ENODEV; - - cpuclk = clk_get(NULL, "cpu_clk"); - if (IS_ERR(cpuclk)) { - printk(KERN_ERR "cpufreq: couldn't get CPU clk\n"); - return PTR_ERR(cpuclk); - } - - rate = cpu_clock_freq / 1000; - if (!rate) { - clk_put(cpuclk); - return -EINVAL; - } - ret = clk_set_rate(cpuclk, rate); - if (ret) { - clk_put(cpuclk); - return ret; - } - - /* clock table init */ - for (i = 2; - (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END); - i++) - loongson2_clockmod_table[i].frequency = (rate * i) / 8; - - policy->cur = loongson2_cpufreq_get(policy->cpu); - - cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], - policy->cpu); - - return cpufreq_frequency_table_cpuinfo(policy, - &loongson2_clockmod_table[0]); -} - -static int loongson2_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &loongson2_clockmod_table[0]); -} - -static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) -{ - clk_put(cpuclk); - return 0; -} - -static struct freq_attr *loongson2_table_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver loongson2_cpufreq_driver = { - .owner = THIS_MODULE, - .name = "loongson2", - .init = loongson2_cpufreq_cpu_init, - .verify = loongson2_cpufreq_verify, - .target = loongson2_cpufreq_target, - .get = loongson2_cpufreq_get, - .exit = loongson2_cpufreq_exit, - .attr = loongson2_table_attr, -}; - -static struct platform_device_id platform_device_ids[] = { - { - .name = "loongson2_cpufreq", - }, - {} -}; - -MODULE_DEVICE_TABLE(platform, platform_device_ids); - -static struct platform_driver platform_driver = { - .driver = { - .name = "loongson2_cpufreq", - .owner = THIS_MODULE, - }, - .id_table = platform_device_ids, -}; - -/* - * This is the simple version of Loongson-2 wait, Maybe we need do this in - * interrupt disabled context. - */ - -static DEFINE_SPINLOCK(loongson2_wait_lock); - -static void loongson2_cpu_wait(void) -{ - unsigned long flags; - u32 cpu_freq; - - spin_lock_irqsave(&loongson2_wait_lock, flags); - cpu_freq = LOONGSON_CHIPCFG0; - LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ - LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ - spin_unlock_irqrestore(&loongson2_wait_lock, flags); -} - -static int __init cpufreq_init(void) -{ - int ret; - - /* Register platform stuff */ - ret = platform_driver_register(&platform_driver); - if (ret) - return ret; - - pr_info("cpufreq: Loongson-2F CPU frequency driver.\n"); - - cpufreq_register_notifier(&loongson2_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - - ret = cpufreq_register_driver(&loongson2_cpufreq_driver); - - if (!ret && !nowait) { - saved_cpu_wait = cpu_wait; - cpu_wait = loongson2_cpu_wait; - } - - return ret; -} - -static void __exit cpufreq_exit(void) -{ - if (!nowait && saved_cpu_wait) - cpu_wait = saved_cpu_wait; - cpufreq_unregister_driver(&loongson2_cpufreq_driver); - cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - - platform_driver_unregister(&platform_driver); -} - -module_init(cpufreq_init); -module_exit(cpufreq_exit); - -module_param(nowait, uint, 0644); -MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait"); - -MODULE_AUTHOR("Yanhua <yanh@lemote.com>"); -MODULE_DESCRIPTION("cpufreq driver for Loongson2F"); -MODULE_LICENSE("GPL"); diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c index 9e6440e..505cb77 100644 --- a/arch/mips/kernel/early_printk.c +++ b/arch/mips/kernel/early_printk.c @@ -7,7 +7,9 @@ * Copyright (C) 2007 MIPS Technologies, Inc. * written by Ralf Baechle (ralf@linux-mips.org) */ +#include <linux/kernel.h> #include <linux/console.h> +#include <linux/printk.h> #include <linux/init.h> #include <asm/setup.h> @@ -24,20 +26,18 @@ static void early_console_write(struct console *con, const char *s, unsigned n) } } -static struct console early_console = { +static struct console early_console_prom = { .name = "early", .write = early_console_write, .flags = CON_PRINTBUFFER | CON_BOOT, .index = -1 }; -static int early_console_initialized __initdata; - void __init setup_early_printk(void) { - if (early_console_initialized) + if (early_console) return; - early_console_initialized = 1; + early_console = &early_console_prom; - register_console(&early_console); + register_console(&early_console_prom); } diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 1658676..33d0671 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -46,10 +46,9 @@ PTR_L a5, PT_R9(sp) PTR_L a6, PT_R10(sp) PTR_L a7, PT_R11(sp) -#else - PTR_ADDIU sp, PT_SIZE #endif -.endm + PTR_ADDIU sp, PT_SIZE + .endm .macro RETURN_BACK jr ra @@ -68,7 +67,11 @@ NESTED(ftrace_caller, PT_SIZE, ra) .globl _mcount _mcount: b ftrace_stub - addiu sp,sp,8 +#ifdef CONFIG_32BIT + addiu sp,sp,8 +#else + nop +#endif /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ lw t1, function_trace_stop diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 135c4aa..7a54f74 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_mips_r) { seq_printf(m, "isa\t\t\t:"); if (cpu_has_mips_1) - seq_printf(m, "%s", "mips1"); + seq_printf(m, "%s", " mips1"); if (cpu_has_mips_2) seq_printf(m, "%s", " mips2"); if (cpu_has_mips_3) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 3be4405..cfc742d 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -41,44 +41,26 @@ #include <asm/inst.h> #include <asm/stacktrace.h> -/* - * The idle thread. There's no useful work to be done, so just try to conserve - * power and have a low exit latency (ie sit in a loop waiting for somebody to - * say that they'd like to reschedule) - */ -void __noreturn cpu_idle(void) +#ifdef CONFIG_HOTPLUG_CPU +void arch_cpu_idle_dead(void) { - int cpu; - - /* CPU is going idle. */ - cpu = smp_processor_id(); + /* What the heck is this check doing ? */ + if (!cpu_isset(smp_processor_id(), cpu_callin_map)) + play_dead(); +} +#endif - /* endless idle loop with no priority at all */ - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - while (!need_resched() && cpu_online(cpu)) { +void arch_cpu_idle(void) +{ #ifdef CONFIG_MIPS_MT_SMTC - extern void smtc_idle_loop_hook(void); + extern void smtc_idle_loop_hook(void); - smtc_idle_loop_hook(); + smtc_idle_loop_hook(); #endif - - if (cpu_wait) { - /* Don't trace irqs off for idle */ - stop_critical_timings(); - (*cpu_wait)(); - start_critical_timings(); - } - } -#ifdef CONFIG_HOTPLUG_CPU - if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map)) - play_dead(); -#endif - rcu_idle_exit(); - tick_nohz_idle_exit(); - schedule_preempt_disabled(); - } + if (cpu_wait) + (*cpu_wait)(); + else + local_irq_enable(); } asmlinkage void ret_from_fork(void); diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 66bf4e2..aee04af 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -139,7 +139,7 @@ asmlinkage __cpuinit void start_secondary(void) WARN_ON_ONCE(!irqs_disabled()); mp_ops->smp_finish(); - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } /* diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index a200b5b..2522551 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -206,19 +206,6 @@ void show_stack(struct task_struct *task, unsigned long *sp) show_stacktrace(task, ®s); } -/* - * The architecture-independent dump_stack generator - */ -void dump_stack(void) -{ - struct pt_regs regs; - - prepare_frametrace(®s); - show_backtrace(current, ®s); -} - -EXPORT_SYMBOL(dump_stack); - static void show_code(unsigned int __user *pc) { long i; @@ -244,7 +231,7 @@ static void __show_regs(const struct pt_regs *regs) unsigned int cause = regs->cp0_cause; int i; - printk("Cpu %d\n", smp_processor_id()); + show_regs_print_info(KERN_DEFAULT); /* * Saved main processor registers @@ -1571,7 +1558,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) #ifdef CONFIG_64BIT status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; #endif - if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) + if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV) status_set |= ST0_XX; if (cpu_has_dsp) status_set |= ST0_MX; diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c index 81f1dcf..a64daee 100644 --- a/arch/mips/lib/bitops.c +++ b/arch/mips/lib/bitops.c @@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr, unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a |= mask; raw_local_irq_restore(flags); return res; @@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr, unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a |= mask; raw_local_irq_restore(flags); return res; @@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a &= ~mask; raw_local_irq_restore(flags); return res; @@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a ^= mask; raw_local_irq_restore(flags); return res; diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 507147a..a6adffb 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -270,7 +270,7 @@ LEAF(csum_partial) #endif /* odd buffer alignment? */ -#ifdef CPU_MIPSR2 +#ifdef CONFIG_CPU_MIPSR2 wsbh v1, sum movn sum, v1, t7 #else @@ -670,7 +670,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc) addu sum, v1 #endif -#ifdef CPU_MIPSR2 +#ifdef CONFIG_CPU_MIPSR2 wsbh v1, sum movn sum, v1, odd #else diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index ecca559..2078915 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1247,10 +1247,8 @@ static void __cpuinit setup_scache(void) return; default: - if (c->isa_level == MIPS_CPU_ISA_M32R1 || - c->isa_level == MIPS_CPU_ISA_M32R2 || - c->isa_level == MIPS_CPU_ISA_M64R1 || - c->isa_level == MIPS_CPU_ISA_M64R2) { + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { #ifdef CONFIG_MIPS_CPU_SCACHE if (mips_sc_init ()) { scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 6792925..3d0346d 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -77,10 +77,9 @@ EXPORT_SYMBOL_GPL(empty_zero_page); /* * Not static inline because used by IP27 special magic initialization code */ -unsigned long setup_zero_pages(void) +void setup_zero_pages(void) { - unsigned int order; - unsigned long size; + unsigned int order, i; struct page *page; if (cpu_has_vce) @@ -94,15 +93,10 @@ unsigned long setup_zero_pages(void) page = virt_to_page((void *)empty_zero_page); split_page(page, order); - while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) { - SetPageReserved(page); - page++; - } - - size = PAGE_SIZE << order; - zero_page_mask = (size - 1) & PAGE_MASK; + for (i = 0; i < (1 << order); i++, page++) + mark_page_reserved(page); - return 1UL << order; + zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; } #ifdef CONFIG_MIPS_MT_SMTC @@ -380,7 +374,7 @@ void __init mem_init(void) high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); totalram_pages += free_all_bootmem(); - totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ + setup_zero_pages(); /* Setup zeroed pages. */ reservedpages = ram = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) @@ -399,12 +393,8 @@ void __init mem_init(void) SetPageReserved(page); continue; } - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalhigh_pages++; + free_highmem_page(page); } - totalram_pages += totalhigh_pages; num_physpages += totalhigh_pages; #endif @@ -440,11 +430,8 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end) struct page *page = pfn_to_page(pfn); void *addr = phys_to_virt(PFN_PHYS(pfn)); - ClearPageReserved(page); - init_page_count(page); memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); - __free_page(page); - totalram_pages++; + free_reserved_page(page); } printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); } @@ -452,18 +439,14 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", - virt_to_phys((void *)start), - virt_to_phys((void *)end)); + free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); } #endif void __init_refok free_initmem(void) { prom_free_prom_memory(); - free_init_pages("unused kernel memory", - __pa_symbol(&__init_begin), - __pa_symbol(&__init_end)); + free_initmem_default(POISON_FREE_INITMEM); } #ifndef CONFIG_MIPS_PGD_C0_CONTEXT diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 93d937b..df96da7 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void) c->scache.flags |= MIPS_CACHE_NOT_PRESENT; /* Ignore anything but MIPSxx processors */ - if (c->isa_level != MIPS_CPU_ISA_M32R1 && - c->isa_level != MIPS_CPU_ISA_M32R2 && - c->isa_level != MIPS_CPU_ISA_M64R1 && - c->isa_level != MIPS_CPU_ISA_M64R2) + if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) return 0; /* Does this MIPS32/MIPS64 CPU have a config2 register? */ diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c index 38a80c8..d1faece 100644 --- a/arch/mips/pci/pci-alchemy.c +++ b/arch/mips/pci/pci-alchemy.c @@ -19,7 +19,7 @@ #include <asm/mach-au1x00/au1000.h> #include <asm/tlbmisc.h> -#ifdef CONFIG_DEBUG_PCI +#ifdef CONFIG_PCI_DEBUG #define DBG(x...) printk(KERN_DEBUG x) #else #define DBG(x...) do {} while (0) @@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus, if (status & (1 << 29)) { *data = 0xffffffff; error = -1; - DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d", + DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n", access_type, bus->number, device); } else if ((status >> 28) & 0xf) { DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c index 0872f12..594e60d 100644 --- a/arch/mips/pci/pci.c +++ b/arch/mips/pci/pci.c @@ -115,7 +115,6 @@ static void pcibios_scanbus(struct pci_controller *hose) pci_bus_assign_resources(bus); pci_enable_bridges(bus); } - bus->dev.of_node = hose->of_node; } } @@ -169,6 +168,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node) } } } + +struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) +{ + struct pci_controller *hose = bus->sysdata; + + return of_node_get(hose->of_node); +} #endif static DEFINE_MUTEX(pci_scan_mutex); diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 3505d08..5f2bddb 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -457,7 +457,7 @@ void __init prom_free_prom_memory(void) /* We got nothing to free here ... */ } -extern unsigned long setup_zero_pages(void); +extern void setup_zero_pages(void); void __init paging_init(void) { @@ -492,7 +492,7 @@ void __init mem_init(void) totalram_pages += free_all_bootmem_node(NODE_DATA(node)); } - totalram_pages -= setup_zero_pages(); /* This comes from node 0 */ + setup_zero_pages(); /* This comes from node 0 */ codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index b06c736..428da17 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig @@ -8,7 +8,7 @@ config MN10300 select HAVE_ARCH_KGDB select GENERIC_ATOMIC64 select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select GENERIC_CLOCKEVENTS select MODULES_USE_ELF_RELA select OLD_SIGSUSPEND3 diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h index f90062b..224b426 100644 --- a/arch/mn10300/include/asm/thread_info.h +++ b/arch/mn10300/include/asm/thread_info.h @@ -165,8 +165,6 @@ void arch_release_thread_info(struct thread_info *ti); #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ -#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) - #endif /* __KERNEL__ */ #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index 84f4e97..3707da5 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c @@ -50,77 +50,19 @@ unsigned long thread_saved_pc(struct task_struct *tsk) void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); -#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) -/* - * we use this if we don't have any better idle routine - */ -static void default_idle(void) -{ - local_irq_disable(); - if (!need_resched()) - safe_halt(); - else - local_irq_enable(); -} - -#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ /* * On SMP it's slightly faster (but much more power-consuming!) * to poll the ->work.need_resched flag instead of waiting for the * cross-CPU IPI to arrive. Use this option with caution. + * + * tglx: No idea why this depends on HOTPLUG_CPU !?! */ -static inline void poll_idle(void) -{ - int oldval; - - local_irq_enable(); - - /* - * Deal with another CPU just having chosen a thread to - * run here: - */ - oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); - - if (!oldval) { - set_thread_flag(TIF_POLLING_NRFLAG); - while (!need_resched()) - cpu_relax(); - clear_thread_flag(TIF_POLLING_NRFLAG); - } else { - set_need_resched(); - } -} -#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ - -/* - * the idle thread - * - there's no useful work to be done, so just try to conserve power and have - * a low exit latency (ie sit in a loop waiting for somebody to say that - * they'd like to reschedule) - */ -void cpu_idle(void) +#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) +void arch_cpu_idle(void) { - /* endless idle loop with no priority at all */ - for (;;) { - rcu_idle_enter(); - while (!need_resched()) { - void (*idle)(void); - - smp_rmb(); - if (!idle) { -#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) - idle = poll_idle; -#else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ - idle = default_idle; -#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ - } - idle(); - } - rcu_idle_exit(); - - schedule_preempt_disabled(); - } + safe_halt(); } +#endif void release_segments(struct mm_struct *mm) { @@ -155,6 +97,7 @@ void machine_power_off(void) void show_regs(struct pt_regs *regs) { + show_regs_print_info(KERN_DEFAULT); } /* diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c index 5d7e152..a17f9c9 100644 --- a/arch/mn10300/kernel/smp.c +++ b/arch/mn10300/kernel/smp.c @@ -675,7 +675,7 @@ int __init start_secondary(void *unused) #ifdef CONFIG_GENERIC_CLOCKEVENTS init_clockevents(); #endif - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); return 0; } @@ -935,8 +935,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) int timeout; #ifdef CONFIG_HOTPLUG_CPU - if (num_online_cpus() == 1) - disable_hlt(); if (sleep_mode[cpu]) run_wakeup_cpu(cpu); #endif /* CONFIG_HOTPLUG_CPU */ @@ -1003,9 +1001,6 @@ int __cpu_disable(void) void __cpu_die(unsigned int cpu) { run_sleep_cpu(cpu); - - if (num_online_cpus() == 1) - enable_hlt(); } #ifdef CONFIG_MN10300_CACHE_ENABLED diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c index b900e5a..a7a987c 100644 --- a/arch/mn10300/kernel/traps.c +++ b/arch/mn10300/kernel/traps.c @@ -294,17 +294,6 @@ void show_stack(struct task_struct *task, unsigned long *sp) } /* - * the architecture-independent dump_stack generator - */ -void dump_stack(void) -{ - unsigned long stack; - - show_stack(current, &stack); -} -EXPORT_SYMBOL(dump_stack); - -/* * dump the register file in the specified exception frame */ void show_registers_only(struct pt_regs *regs) diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c index e57e5bc..5a8ace6 100644 --- a/arch/mn10300/mm/init.c +++ b/arch/mn10300/mm/init.c @@ -139,30 +139,11 @@ void __init mem_init(void) } /* - * - */ -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr; - - for (addr = begin; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *) addr, 0xcc, PAGE_SIZE); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); -} - -/* * recycle memory containing stuff only required for initialisation */ void free_initmem(void) { - free_init_pages("unused kernel memory", - (unsigned long) &__init_begin, - (unsigned long) &__init_end); + free_initmem_default(POISON_FREE_INITMEM); } /* @@ -171,6 +152,6 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", start, end); + free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); } #endif diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 014a648..9ab3bf2 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -9,10 +9,9 @@ config OPENRISC select OF_EARLY_FLATTREE select IRQ_DOMAIN select HAVE_MEMBLOCK - select ARCH_WANT_OPTIONAL_GPIOLIB + select ARCH_REQUIRE_GPIOLIB select HAVE_ARCH_TRACEHOOK select HAVE_GENERIC_HARDIRQS - select HAVE_VIRT_TO_BUS select GENERIC_IRQ_CHIP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h index 07f3212..d797acc 100644 --- a/arch/openrisc/include/asm/thread_info.h +++ b/arch/openrisc/include/asm/thread_info.h @@ -128,8 +128,6 @@ register struct thread_info *current_thread_info_reg asm("r10"); /* For OpenRISC, this is anything in the LSW other than syscall trace */ #define _TIF_WORK_MASK (0xff & ~(_TIF_SYSCALL_TRACE|_TIF_SINGLESTEP)) -#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) - #endif /* __KERNEL__ */ #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile index 35f92ce..ec6d9d3 100644 --- a/arch/openrisc/kernel/Makefile +++ b/arch/openrisc/kernel/Makefile @@ -4,7 +4,7 @@ extra-y := head.o vmlinux.lds -obj-y := setup.o idle.o or32_ksyms.o process.o dma.o \ +obj-y := setup.o or32_ksyms.o process.o dma.o \ traps.o time.o irq.o entry.o ptrace.o signal.o \ sys_call_table.o diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c deleted file mode 100644 index 5e8a3b6..0000000 --- a/arch/openrisc/kernel/idle.c +++ /dev/null @@ -1,73 +0,0 @@ -/* - * OpenRISC idle.c - * - * Linux architectural port borrowing liberally from similar works of - * others. All original copyrights apply as per the original source - * declaration. - * - * Modifications for the OpenRISC architecture: - * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> - * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * Idle daemon for or32. Idle daemon will handle any action - * that needs to be taken when the system becomes idle. - */ - -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/stddef.h> -#include <linux/unistd.h> -#include <linux/ptrace.h> -#include <linux/slab.h> -#include <linux/tick.h> - -#include <asm/pgtable.h> -#include <asm/uaccess.h> -#include <asm/io.h> -#include <asm/processor.h> -#include <asm/mmu.h> -#include <asm/cache.h> -#include <asm/pgalloc.h> - -void (*powersave) (void) = NULL; - -void cpu_idle(void) -{ - set_thread_flag(TIF_POLLING_NRFLAG); - - /* endless idle loop with no priority at all */ - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - - while (!need_resched()) { - check_pgt_cache(); - rmb(); - - clear_thread_flag(TIF_POLLING_NRFLAG); - - local_irq_disable(); - /* Don't trace irqs off for idle */ - stop_critical_timings(); - if (!need_resched() && powersave != NULL) - powersave(); - start_critical_timings(); - local_irq_enable(); - set_thread_flag(TIF_POLLING_NRFLAG); - } - - rcu_idle_exit(); - tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); - } -} diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index 00c233b..386af25 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c @@ -90,6 +90,7 @@ void show_regs(struct pt_regs *regs) { extern void show_registers(struct pt_regs *regs); + show_regs_print_info(KERN_DEFAULT); /* __PHX__ cleanup this mess */ show_registers(regs); } diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 5cce396..3d3f606 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -105,17 +105,6 @@ void show_trace_task(struct task_struct *tsk) */ } -/* - * The architecture-independent backtrace generator - */ -void dump_stack(void) -{ - unsigned long stack; - - show_stack(current, &stack); -} -EXPORT_SYMBOL(dump_stack); - void show_registers(struct pt_regs *regs) { int i; diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index e7fdc50..b3cbc67 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -43,6 +43,7 @@ #include <asm/kmap_types.h> #include <asm/fixmap.h> #include <asm/tlbflush.h> +#include <asm/sections.h> int mem_init_done; @@ -201,9 +202,6 @@ void __init paging_init(void) /* References to section boundaries */ -extern char _stext, _etext, _edata, __bss_start, _end; -extern char __init_begin, __init_end; - static int __init free_pages_init(void) { int reservedpages, pfn; @@ -263,30 +261,11 @@ void __init mem_init(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", - (end - start) >> 10); - - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } + free_reserved_area(start, end, 0, "initrd"); } #endif void free_initmem(void) { - unsigned long addr; - - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n", - ((unsigned long)&__init_end - - (unsigned long)&__init_begin) >> 10); + free_initmem_default(0); } diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index a9ff712..433e75a 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -1,5 +1,6 @@ config PARISC def_bool y + select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS select HAVE_IDE select HAVE_OPROFILE select HAVE_FUNCTION_TRACER if 64BIT @@ -21,7 +22,7 @@ config PARISC select GENERIC_STRNCPY_FROM_USER select SYSCTL_ARCH_UNALIGN_ALLOW select HAVE_MOD_ARCH_SPECIFIC - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select MODULES_USE_ELF_RELA select CLONE_BACKWARDS select TTY # Needed for pdc_cons.c diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug index 7305ac8..bc989e5 100644 --- a/arch/parisc/Kconfig.debug +++ b/arch/parisc/Kconfig.debug @@ -12,18 +12,4 @@ config DEBUG_RODATA portion of the kernel code won't be covered by a TLB anymore. If in doubt, say "N". -config DEBUG_STRICT_USER_COPY_CHECKS - bool "Strict copy size checks" - depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING - ---help--- - Enabling this option turns a certain set of sanity checks for user - copy operations into compile time failures. - - The copy_from_user() etc checks are there to help test if there - are sufficient security checks on the length argument of - the copy operation, by having gcc prove that the argument is - within bounds. - - If unsure, or if you run an older (pre 4.4) gcc, say N. - endmenu diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 01d95e2..113e282 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -65,8 +65,10 @@ ifndef CONFIG_FUNCTION_TRACER endif # Use long jumps instead of long branches (needed if your linker fails to -# link a too big vmlinux executable) -cflags-$(CONFIG_MLONGCALLS) += -mlong-calls +# link a too big vmlinux executable). Not enabled for building modules. +ifdef CONFIG_MLONGCALLS +KBUILD_CFLAGS_KERNEL += -mlong-calls +endif # select which processor to optimise for cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100 diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 79f694f..f0e2784 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -140,7 +140,10 @@ static inline void *kmap(struct page *page) return page_address(page); } -#define kunmap(page) kunmap_parisc(page_address(page)) +static inline void kunmap(struct page *page) +{ + kunmap_parisc(page_address(page)); +} static inline void *kmap_atomic(struct page *page) { diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 7df49fa..1e40d7f 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -16,6 +16,8 @@ #include <asm/processor.h> #include <asm/cache.h> +extern spinlock_t pa_dbit_lock; + /* * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel * memory. For the return value to be meaningful, ADDR must be >= @@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long); #define set_pte_at(mm, addr, ptep, pteval) \ do { \ + unsigned long flags; \ + spin_lock_irqsave(&pa_dbit_lock, flags); \ set_pte(ptep, pteval); \ purge_tlb_entries(mm, addr); \ + spin_unlock_irqrestore(&pa_dbit_lock, flags); \ } while (0) #endif /* !__ASSEMBLY__ */ @@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { -#ifdef CONFIG_SMP + pte_t pte; + unsigned long flags; + if (!pte_young(*ptep)) return 0; - return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep)); -#else - pte_t pte = *ptep; - if (!pte_young(pte)) + + spin_lock_irqsave(&pa_dbit_lock, flags); + pte = *ptep; + if (!pte_young(pte)) { + spin_unlock_irqrestore(&pa_dbit_lock, flags); return 0; - set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); + } + set_pte(ptep, pte_mkold(pte)); + purge_tlb_entries(vma->vm_mm, addr); + spin_unlock_irqrestore(&pa_dbit_lock, flags); return 1; -#endif } -extern spinlock_t pa_dbit_lock; - struct mm_struct; static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t old_pte; + unsigned long flags; - spin_lock(&pa_dbit_lock); + spin_lock_irqsave(&pa_dbit_lock, flags); old_pte = *ptep; pte_clear(mm,addr,ptep); - spin_unlock(&pa_dbit_lock); + purge_tlb_entries(mm, addr); + spin_unlock_irqrestore(&pa_dbit_lock, flags); return old_pte; } static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { -#ifdef CONFIG_SMP - unsigned long new, old; - - do { - old = pte_val(*ptep); - new = pte_val(pte_wrprotect(__pte (old))); - } while (cmpxchg((unsigned long *) ptep, old, new) != old); + unsigned long flags; + spin_lock_irqsave(&pa_dbit_lock, flags); + set_pte(ptep, pte_wrprotect(*ptep)); purge_tlb_entries(mm, addr); -#else - pte_t old_pte = *ptep; - set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); -#endif + spin_unlock_irqrestore(&pa_dbit_lock, flags); } #define pte_same(A,B) (pte_val(A) == pte_val(B)) diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index d1fb79a..6182832 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -77,8 +77,6 @@ struct thread_info { #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ _TIF_BLOCKSTEP) -#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) - #endif /* __KERNEL__ */ #endif /* _ASM_PARISC_THREAD_INFO_H */ diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 4ba2c93..e0a8235 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -181,30 +181,24 @@ struct exception_data { #if !defined(CONFIG_64BIT) #define __put_kernel_asm64(__val,ptr) do { \ - u64 __val64 = (u64)(__val); \ - u32 hi = (__val64) >> 32; \ - u32 lo = (__val64) & 0xffffffff; \ __asm__ __volatile__ ( \ "\n1:\tstw %2,0(%1)" \ - "\n2:\tstw %3,4(%1)\n\t" \ + "\n2:\tstw %R2,4(%1)\n\t" \ ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ : "=r"(__pu_err) \ - : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ + : "r"(ptr), "r"(__val), "0"(__pu_err) \ : "r1"); \ } while (0) #define __put_user_asm64(__val,ptr) do { \ - u64 __val64 = (u64)(__val); \ - u32 hi = (__val64) >> 32; \ - u32 lo = (__val64) & 0xffffffff; \ __asm__ __volatile__ ( \ "\n1:\tstw %2,0(%%sr3,%1)" \ - "\n2:\tstw %3,4(%%sr3,%1)\n\t" \ + "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \ ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ : "=r"(__pu_err) \ - : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ + : "r"(ptr), "r"(__val), "0"(__pu_err) \ : "r1"); \ } while (0) diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 4b12890..83ded26 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -421,14 +421,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) /* Note: purge_tlb_entries can be called at startup with no context. */ - /* Disable preemption while we play with %sr1. */ - preempt_disable(); - mtsp(mm->context, 1); purge_tlb_start(flags); + mtsp(mm->context, 1); pdtlb(addr); pitlb(addr); purge_tlb_end(flags); - preempt_enable(); } EXPORT_SYMBOL(purge_tlb_entries); diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 6795dc6..568b2c6 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c @@ -120,11 +120,13 @@ extern void __ashrdi3(void); extern void __ashldi3(void); extern void __lshrdi3(void); extern void __muldi3(void); +extern void __ucmpdi2(void); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__muldi3); +EXPORT_SYMBOL(__ucmpdi2); asmlinkage void * __canonicalize_funcptr_for_compare(void *); EXPORT_SYMBOL(__canonicalize_funcptr_for_compare); diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index d135072..55f92b6 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -59,28 +59,6 @@ #include <asm/unwind.h> #include <asm/sections.h> -/* - * The idle thread. There's no useful work to be - * done, so just try to conserve power and have a - * low exit latency (ie sit in a loop waiting for - * somebody to say that they'd like to reschedule) - */ -void cpu_idle(void) -{ - set_thread_flag(TIF_POLLING_NRFLAG); - - /* endless idle loop with no priority at all */ - while (1) { - rcu_idle_enter(); - while (!need_resched()) - barrier(); - rcu_idle_exit(); - schedule_preempt_disabled(); - check_pgt_cache(); - } -} - - #define COMMAND_GLOBAL F_EXTEND(0xfffe0030) #define CMD_RESET 5 /* reset any module */ diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 6266730..fd1bb15 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -329,7 +329,7 @@ void __init smp_callin(void) local_irq_enable(); /* Interrupts have been off until now */ - cpu_idle(); /* Wait for timer to schedule some work */ + cpu_startup_entry(CPUHP_ONLINE); /* NOTREACHED */ panic("smp_callin() AAAAaaaaahhhh....\n"); diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index aeb8f8f..f702bff 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -126,6 +126,8 @@ void show_regs(struct pt_regs *regs) user = user_mode(regs); level = user ? KERN_DEBUG : KERN_CRIT; + show_regs_print_info(level); + print_gr(level, regs); for (i = 0; i < 8; i += 4) @@ -158,14 +160,6 @@ void show_regs(struct pt_regs *regs) } } - -void dump_stack(void) -{ - show_stack(NULL, NULL); -} - -EXPORT_SYMBOL(dump_stack); - static void do_show_stack(struct unwind_frame_info *info) { int i = 1; diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile index 5f2e690..5651536 100644 --- a/arch/parisc/lib/Makefile +++ b/arch/parisc/lib/Makefile @@ -2,6 +2,7 @@ # Makefile for parisc-specific library files # -lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o +lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ + ucmpdi2.o obj-y := iomap.o diff --git a/arch/parisc/lib/ucmpdi2.c b/arch/parisc/lib/ucmpdi2.c new file mode 100644 index 0000000..149c016 --- /dev/null +++ b/arch/parisc/lib/ucmpdi2.c @@ -0,0 +1,25 @@ +#include <linux/module.h> + +union ull_union { + unsigned long long ull; + struct { + unsigned int high; + unsigned int low; + } ui; +}; + +int __ucmpdi2(unsigned long long a, unsigned long long b) +{ + union ull_union au = {.ull = a}; + union ull_union bu = {.ull = b}; + + if (au.ui.high < bu.ui.high) + return 0; + else if (au.ui.high > bu.ui.high) + return 2; + if (au.ui.low < bu.ui.low) + return 0; + else if (au.ui.low > bu.ui.low) + return 2; + return 1; +} diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 3ac462d..157b931 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -505,7 +505,6 @@ static void __init map_pages(unsigned long start_vaddr, void free_initmem(void) { - unsigned long addr; unsigned long init_begin = (unsigned long)__init_begin; unsigned long init_end = (unsigned long)__init_end; @@ -533,19 +532,10 @@ void free_initmem(void) * pages are no-longer executable */ flush_icache_range(init_begin, init_end); - for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - num_physpages++; - totalram_pages++; - } + num_physpages += free_initmem_default(0); /* set up a new led state on systems shipped LED State panel */ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); - - printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n", - (init_end - init_begin) >> 10); } @@ -697,6 +687,8 @@ void show_mem(unsigned int filter) printk(KERN_INFO "Mem-info:\n"); show_free_areas(filter); + if (filter & SHOW_MEM_FILTER_PAGE_COUNT) + return; #ifndef CONFIG_DISCONTIGMEM i = max_mapnr; while (i-- > 0) { @@ -1107,15 +1099,6 @@ void flush_tlb_all(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - if (start >= end) - return; - printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - num_physpages++; - totalram_pages++; - } + num_physpages += free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index f460e32..a0259ed 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -90,6 +90,7 @@ config GENERIC_GPIO config PPC bool default y + select BINFMT_ELF select OF select OF_EARLY_FLATTREE select HAVE_FTRACE_MCOUNT_RECORD @@ -98,7 +99,7 @@ config PPC select HAVE_FUNCTION_GRAPH_TRACER select SYSCTL_EXCEPTION_TRACE select ARCH_WANT_OPTIONAL_GPIOLIB - select HAVE_VIRT_TO_BUS if !PPC64 + select VIRT_TO_BUS if !PPC64 select HAVE_IDE select HAVE_IOREMAP_PROT select HAVE_EFFICIENT_UNALIGNED_ACCESS diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S index a5f8264..125e165 100644 --- a/arch/powerpc/crypto/sha1-powerpc-asm.S +++ b/arch/powerpc/crypto/sha1-powerpc-asm.S @@ -113,7 +113,7 @@ STEPUP4((t)+16, fn) _GLOBAL(powerpc_sha_transform) - PPC_STLU r1,-STACKFRAMESIZE(r1) + PPC_STLU r1,-INT_FRAME_SIZE(r1) SAVE_8GPRS(14, r1) SAVE_10GPRS(22, r1) @@ -175,5 +175,5 @@ _GLOBAL(powerpc_sha_transform) REST_8GPRS(14, r1) REST_10GPRS(22, r1) - addi r1,r1,STACKFRAMESIZE + addi r1,r1,INT_FRAME_SIZE blr diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index ef918a2..08bd299 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -52,8 +52,6 @@ #define smp_mb__before_clear_bit() smp_mb() #define smp_mb__after_clear_bit() smp_mb() -#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) - /* Macro for generating the ***_bits() functions */ #define DEFINE_BITOP(fn, op, prefix, postfix) \ static __inline__ void fn(unsigned long mask, \ diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 62e11a3..4fcbd6b 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -3,6 +3,7 @@ #ifdef CONFIG_HUGETLB_PAGE #include <asm/page.h> +#include <asm-generic/hugetlb.h> extern struct kmem_cache *hugepte_cache; diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 2fdb47a..b59e06f 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -343,17 +343,16 @@ extern void slb_set_size(u16 size); /* * VSID allocation (256MB segment) * - * We first generate a 38-bit "proto-VSID". For kernel addresses this - * is equal to the ESID | 1 << 37, for user addresses it is: - * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1) + * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated + * from mmu context id and effective segment id of the address. * - * This splits the proto-VSID into the below range - * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range - * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range - * - * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1 - * That is, we assign half of the space to user processes and half - * to the kernel. + * For user processes max context id is limited to ((1ul << 19) - 5) + * for kernel space, we use the top 4 context ids to map address as below + * NOTE: each context only support 64TB now. + * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] + * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] + * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] + * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] * * The proto-VSIDs are then scrambled into real VSIDs with the * multiplicative hash: @@ -363,41 +362,49 @@ extern void slb_set_size(u16 size); * VSID_MULTIPLIER is prime, so in particular it is * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. * Because the modulus is 2^n-1 we can compute it efficiently without - * a divide or extra multiply (see below). - * - * This scheme has several advantages over older methods: - * - * - We have VSIDs allocated for every kernel address - * (i.e. everything above 0xC000000000000000), except the very top - * segment, which simplifies several things. + * a divide or extra multiply (see below). The scramble function gives + * robust scattering in the hash table (at least based on some initial + * results). * - * - We allow for USER_ESID_BITS significant bits of ESID and - * CONTEXT_BITS bits of context for user addresses. - * i.e. 64T (46 bits) of address space for up to half a million contexts. + * We also consider VSID 0 special. We use VSID 0 for slb entries mapping + * bad address. This enables us to consolidate bad address handling in + * hash_page. * - * - The scramble function gives robust scattering in the hash - * table (at least based on some initial results). The previous - * method was more susceptible to pathological cases giving excessive - * hash collisions. + * We also need to avoid the last segment of the last context, because that + * would give a protovsid of 0x1fffffffff. That will result in a VSID 0 + * because of the modulo operation in vsid scramble. But the vmemmap + * (which is what uses region 0xf) will never be close to 64TB in size + * (it's 56 bytes per page of system memory). */ +#define CONTEXT_BITS 19 +#define ESID_BITS 18 +#define ESID_BITS_1T 6 + +/* + * 256MB segment + * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments + * available for user + kernel mapping. The top 4 contexts are used for + * kernel mapping. Each segment contains 2^28 bytes. Each + * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts + * (19 == 37 + 28 - 46). + */ +#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5) + /* * This should be computed such that protovosid * vsid_mulitplier * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus */ #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ -#define VSID_BITS_256M 38 +#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS) #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ -#define VSID_BITS_1T 26 +#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T) #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) -#define CONTEXT_BITS 19 -#define USER_ESID_BITS 18 -#define USER_ESID_BITS_1T 6 -#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) +#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT)) /* * This macro generates asm code to compute the VSID scramble @@ -421,7 +428,8 @@ extern void slb_set_size(u16 size); srdi rx,rt,VSID_BITS_##size; \ clrldi rt,rt,(64-VSID_BITS_##size); \ add rt,rt,rx; /* add high and low bits */ \ - /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ + /* NOTE: explanation based on VSID_BITS_##size = 36 \ + * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ * 2^36-1+2^28-1. That in particular means that if r3 >= \ * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ * the bit clear, r3 already has the answer we want, if it \ @@ -513,34 +521,6 @@ typedef struct { }) #endif /* 1 */ -/* - * This is only valid for addresses >= PAGE_OFFSET - * The proto-VSID space is divided into two class - * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1 - * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1 - * - * With KERNEL_START at 0xc000000000000000, the proto vsid for - * the kernel ends up with 0xc00000000 (36 bits). With 64TB - * support we need to have kernel proto-VSID in the - * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS. - */ -static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) -{ - unsigned long proto_vsid; - /* - * We need to make sure proto_vsid for the kernel is - * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T]) - */ - if (ssize == MMU_SEGSIZE_256M) { - proto_vsid = ea >> SID_SHIFT; - proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS)); - return vsid_scramble(proto_vsid, 256M); - } - proto_vsid = ea >> SID_SHIFT_1T; - proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T)); - return vsid_scramble(proto_vsid, 1T); -} - /* Returns the segment size indicator for a user address */ static inline int user_segment_size(unsigned long addr) { @@ -550,17 +530,41 @@ static inline int user_segment_size(unsigned long addr) return MMU_SEGSIZE_256M; } -/* This is only valid for user addresses (which are below 2^44) */ static inline unsigned long get_vsid(unsigned long context, unsigned long ea, int ssize) { + /* + * Bad address. We return VSID 0 for that + */ + if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) + return 0; + if (ssize == MMU_SEGSIZE_256M) - return vsid_scramble((context << USER_ESID_BITS) + return vsid_scramble((context << ESID_BITS) | (ea >> SID_SHIFT), 256M); - return vsid_scramble((context << USER_ESID_BITS_1T) + return vsid_scramble((context << ESID_BITS_1T) | (ea >> SID_SHIFT_1T), 1T); } +/* + * This is only valid for addresses >= PAGE_OFFSET + * + * For kernel space, we use the top 4 context ids to map address as below + * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] + * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] + * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] + * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] + */ +static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) +{ + unsigned long context; + + /* + * kernel take the top 4 context from the available range + */ + context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1; + return get_vsid(context, ea, ssize); +} #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_MMU_HASH64_H_ */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e665861..c9c67fc 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -266,7 +266,8 @@ #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ #define SPRN_FSCR 0x099 /* Facility Status & Control Register */ -#define FSCR_TAR (1<<8) /* Enable Target Adress Register */ +#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ +#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ #define SPRN_TAR 0x32f /* Target Address Register */ #define SPRN_LPCR 0x13E /* LPAR Control Register */ #define LPCR_VPM0 (1ul << (63-0)) diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 9db88cd..43523fe 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -358,3 +358,4 @@ SYSCALL_SPU(setns) COMPAT_SYS(process_vm_readv) COMPAT_SYS(process_vm_writev) SYSCALL(finit_module) +SYSCALL(ni_syscall) /* sys_kcmp */ diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 406b7b9..8ceea14 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -182,8 +182,6 @@ static inline bool test_thread_local_flags(unsigned int flags) #define is_32bit_task() (1) #endif -#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) - #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 91586d9..3ca819f 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls 354 +#define __NR_syscalls 355 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h index b532060..2301602 100644 --- a/arch/powerpc/include/asm/uprobes.h +++ b/arch/powerpc/include/asm/uprobes.h @@ -51,4 +51,5 @@ extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); +extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); #endif /* _ASM_UPROBES_H */ diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 8c478c6..74cb4d7 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -376,6 +376,7 @@ #define __NR_process_vm_readv 351 #define __NR_process_vm_writev 352 #define __NR_finit_module 353 +#define __NR_kcmp 354 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index d29facb..ea847ab 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -48,6 +48,7 @@ _GLOBAL(__restore_cpu_power7) _GLOBAL(__setup_cpu_power8) mflr r11 + bl __init_FSCR bl __init_hvmode_206 mtlr r11 beqlr @@ -56,13 +57,13 @@ _GLOBAL(__setup_cpu_power8) mfspr r3,SPRN_LPCR oris r3, r3, LPCR_AIL_3@h bl __init_LPCR - bl __init_FSCR bl __init_TLB mtlr r11 blr _GLOBAL(__restore_cpu_power8) mflr r11 + bl __init_FSCR mfmsr r3 rldicl. r0,r3,4,63 beqlr @@ -115,7 +116,7 @@ __init_LPCR: __init_FSCR: mfspr r3,SPRN_FSCR - ori r3,r3,FSCR_TAR + ori r3,r3,FSCR_TAR|FSCR_DSCR mtspr SPRN_FSCR,r3 blr diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 75a3d71..19599ef 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index b3ba516..9ec3fe1 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -150,10 +150,7 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start)) continue; - ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); - init_page_count(pfn_to_page(addr >> PAGE_SHIFT)); - free_page((unsigned long)__va(addr)); - totalram_pages++; + free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); } } #endif diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 256c5bf..04d69c4 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -304,7 +304,7 @@ syscall_exit_work: subi r12,r12,TI_FLAGS 4: /* Anything else left to do? */ - SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */ + SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) beq .ret_from_except_lite @@ -657,7 +657,7 @@ resume_kernel: /* Clear _TIF_EMULATE_STACK_STORE flag */ lis r11,_TIF_EMULATE_STACK_STORE@h addi r5,r9,TI_FLAGS - ldarx r4,0,r5 +0: ldarx r4,0,r5 andc r4,r4,r11 stdcx. r4,0,r5 bne- 0b diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index f3eab85..d44a571 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -23,8 +23,10 @@ #include <asm/code-patching.h> #include <asm/machdep.h> +#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) extern void epapr_ev_idle(void); extern u32 epapr_ev_idle_start[]; +#endif bool epapr_paravirt_enabled; @@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void) for (i = 0; i < (len / 4); i++) { patch_instruction(epapr_hypercall_start + i, insts[i]); +#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) patch_instruction(epapr_ev_idle_start + i, insts[i]); +#endif } +#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) if (of_get_property(hyper_node, "has-idle", NULL)) ppc_md.power_save = epapr_ev_idle; +#endif epapr_paravirt_enabled = true; diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index a8a5361..56bd923 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -74,13 +74,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ mflr r10 ; \ ld r12,PACAKBASE(r13) ; \ LOAD_HANDLER(r12, system_call_entry_direct) ; \ - mtlr r12 ; \ + mtctr r12 ; \ mfspr r12,SPRN_SRR1 ; \ /* Re-use of r13... No spare regs to do this */ \ li r13,MSR_RI ; \ mtmsrd r13,1 ; \ GET_PACA(r13) ; /* get r13 back */ \ - blr ; + bctr ; #else /* We can branch directly */ #define SYSCALL_PSERIES_2_DIRECT \ @@ -1066,78 +1066,6 @@ unrecov_user_slb: #endif /* __DISABLED__ */ -/* - * r13 points to the PACA, r9 contains the saved CR, - * r12 contain the saved SRR1, SRR0 is still ready for return - * r3 has the faulting address - * r9 - r13 are saved in paca->exslb. - * r3 is saved in paca->slb_r3 - * We assume we aren't going to take any exceptions during this procedure. - */ -_GLOBAL(slb_miss_realmode) - mflr r10 -#ifdef CONFIG_RELOCATABLE - mtctr r11 -#endif - - stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ - std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ - - bl .slb_allocate_realmode - - /* All done -- return from exception. */ - - ld r10,PACA_EXSLB+EX_LR(r13) - ld r3,PACA_EXSLB+EX_R3(r13) - lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ - - mtlr r10 - - andi. r10,r12,MSR_RI /* check for unrecoverable exception */ - beq- 2f - -.machine push -.machine "power4" - mtcrf 0x80,r9 - mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ -.machine pop - - RESTORE_PPR_PACA(PACA_EXSLB, r9) - ld r9,PACA_EXSLB+EX_R9(r13) - ld r10,PACA_EXSLB+EX_R10(r13) - ld r11,PACA_EXSLB+EX_R11(r13) - ld r12,PACA_EXSLB+EX_R12(r13) - ld r13,PACA_EXSLB+EX_R13(r13) - rfid - b . /* prevent speculative execution */ - -2: mfspr r11,SPRN_SRR0 - ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10,unrecov_slb) - mtspr SPRN_SRR0,r10 - ld r10,PACAKMSR(r13) - mtspr SPRN_SRR1,r10 - rfid - b . - -unrecov_slb: - EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) - DISABLE_INTS - bl .save_nvgprs -1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception - b 1b - - -#ifdef CONFIG_PPC_970_NAP -power4_fixup_nap: - andc r9,r9,r10 - std r9,TI_LOCAL_FLAGS(r11) - ld r10,_LINK(r1) /* make idle task do the */ - std r10,_NIP(r1) /* equivalent of a blr */ - blr -#endif - .align 7 .globl alignment_common alignment_common: @@ -1336,6 +1264,78 @@ _GLOBAL(opal_mc_secondary_handler) /* + * r13 points to the PACA, r9 contains the saved CR, + * r12 contain the saved SRR1, SRR0 is still ready for return + * r3 has the faulting address + * r9 - r13 are saved in paca->exslb. + * r3 is saved in paca->slb_r3 + * We assume we aren't going to take any exceptions during this procedure. + */ +_GLOBAL(slb_miss_realmode) + mflr r10 +#ifdef CONFIG_RELOCATABLE + mtctr r11 +#endif + + stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ + std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ + + bl .slb_allocate_realmode + + /* All done -- return from exception. */ + + ld r10,PACA_EXSLB+EX_LR(r13) + ld r3,PACA_EXSLB+EX_R3(r13) + lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ + + mtlr r10 + + andi. r10,r12,MSR_RI /* check for unrecoverable exception */ + beq- 2f + +.machine push +.machine "power4" + mtcrf 0x80,r9 + mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ +.machine pop + + RESTORE_PPR_PACA(PACA_EXSLB, r9) + ld r9,PACA_EXSLB+EX_R9(r13) + ld r10,PACA_EXSLB+EX_R10(r13) + ld r11,PACA_EXSLB+EX_R11(r13) + ld r12,PACA_EXSLB+EX_R12(r13) + ld r13,PACA_EXSLB+EX_R13(r13) + rfid + b . /* prevent speculative execution */ + +2: mfspr r11,SPRN_SRR0 + ld r10,PACAKBASE(r13) + LOAD_HANDLER(r10,unrecov_slb) + mtspr SPRN_SRR0,r10 + ld r10,PACAKMSR(r13) + mtspr SPRN_SRR1,r10 + rfid + b . + +unrecov_slb: + EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) + DISABLE_INTS + bl .save_nvgprs +1: addi r3,r1,STACK_FRAME_OVERHEAD + bl .unrecoverable_exception + b 1b + + +#ifdef CONFIG_PPC_970_NAP +power4_fixup_nap: + andc r9,r9,r10 + std r9,TI_LOCAL_FLAGS(r11) + ld r10,_LINK(r1) /* make idle task do the */ + std r10,_NIP(r1) /* equivalent of a blr */ + blr +#endif + +/* * Hash table stuff */ .align 7 @@ -1452,20 +1452,36 @@ do_ste_alloc: _GLOBAL(do_stab_bolted) stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ + mfspr r11,SPRN_DAR /* ea */ + /* + * check for bad kernel/user address + * (ea & ~REGION_MASK) >= PGTABLE_RANGE + */ + rldicr. r9,r11,4,(63 - 46 - 4) + li r9,0 /* VSID = 0 for bad address */ + bne- 0f + + /* + * Calculate VSID: + * This is the kernel vsid, we take the top for context from + * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 + * Here we know that (ea >> 60) == 0xc + */ + lis r9,(MAX_USER_CONTEXT + 1)@ha + addi r9,r9,(MAX_USER_CONTEXT + 1)@l + + srdi r10,r11,SID_SHIFT + rldimi r10,r9,ESID_BITS,0 /* proto vsid */ + ASM_VSID_SCRAMBLE(r10, r9, 256M) + rldic r9,r10,12,16 /* r9 = vsid << 12 */ + +0: /* Hash to the primary group */ ld r10,PACASTABVIRT(r13) - mfspr r11,SPRN_DAR - srdi r11,r11,28 + srdi r11,r11,SID_SHIFT rldimi r10,r11,7,52 /* r10 = first ste of the group */ - /* Calculate VSID */ - /* This is a kernel address, so protovsid = ESID | 1 << 37 */ - li r9,0x1 - rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0 - ASM_VSID_SCRAMBLE(r11, r9, 256M) - rldic r9,r11,12,16 /* r9 = vsid << 12 */ - /* Search the primary group for a free entry */ 1: ld r11,0(r10) /* Test valid bit of the current ste */ andi. r11,r11,0x80 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 06c8202..2230fd0 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1045,10 +1045,7 @@ static void fadump_release_memory(unsigned long begin, unsigned long end) if (addr <= ra_end && ((addr + PAGE_SIZE) > ra_start)) continue; - ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); - init_page_count(pfn_to_page(addr >> PAGE_SHIFT)); - free_page((unsigned long)__va(addr)); - totalram_pages++; + free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); } } diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index ea78761..939ea7e 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -33,11 +33,6 @@ #include <asm/runlatch.h> #include <asm/smp.h> -#ifdef CONFIG_HOTPLUG_CPU -#define cpu_should_die() cpu_is_offline(smp_processor_id()) -#else -#define cpu_should_die() 0 -#endif unsigned long cpuidle_disable = IDLE_NO_OVERRIDE; EXPORT_SYMBOL(cpuidle_disable); @@ -50,64 +45,38 @@ static int __init powersave_off(char *arg) } __setup("powersave=off", powersave_off); -/* - * The body of the idle task. - */ -void cpu_idle(void) +#ifdef CONFIG_HOTPLUG_CPU +void arch_cpu_idle_dead(void) { - set_thread_flag(TIF_POLLING_NRFLAG); - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - - while (!need_resched() && !cpu_should_die()) { - ppc64_runlatch_off(); - - if (ppc_md.power_save) { - clear_thread_flag(TIF_POLLING_NRFLAG); - /* - * smp_mb is so clearing of TIF_POLLING_NRFLAG - * is ordered w.r.t. need_resched() test. - */ - smp_mb(); - local_irq_disable(); - - /* Don't trace irqs off for idle */ - stop_critical_timings(); - - /* check again after disabling irqs */ - if (!need_resched() && !cpu_should_die()) - ppc_md.power_save(); - - start_critical_timings(); - - /* Some power_save functions return with - * interrupts enabled, some don't. - */ - if (irqs_disabled()) - local_irq_enable(); - set_thread_flag(TIF_POLLING_NRFLAG); - - } else { - /* - * Go into low thread priority and possibly - * low power mode. - */ - HMT_low(); - HMT_very_low(); - } - } + sched_preempt_enable_no_resched(); + cpu_die(); +} +#endif - HMT_medium(); - ppc64_runlatch_on(); - rcu_idle_exit(); - tick_nohz_idle_exit(); - if (cpu_should_die()) { - sched_preempt_enable_no_resched(); - cpu_die(); - } - schedule_preempt_disabled(); +void arch_cpu_idle(void) +{ + ppc64_runlatch_off(); + + if (ppc_md.power_save) { + ppc_md.power_save(); + /* + * Some power_save functions return with + * interrupts enabled, some don't. + */ + if (irqs_disabled()) + local_irq_enable(); + } else { + local_irq_enable(); + /* + * Go into low thread priority and possibly + * low power mode. + */ + HMT_low(); + HMT_very_low(); } + + HMT_medium(); + ppc64_runlatch_on(); } int powersave_nap; diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index a61b133..6782221 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -756,12 +756,7 @@ static __init void kvm_free_tmp(void) end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK; /* Free the tmp space we don't need */ - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } + free_reserved_area(start, end, 0, NULL); } static int __init kvm_guest_init(void) diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index bec1e93..48fbc2b 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -511,8 +511,7 @@ int __init nvram_scan_partitions(void) "detected: 0-length partition\n"); goto out; } - tmp_part = (struct nvram_partition *) - kmalloc(sizeof(struct nvram_partition), GFP_KERNEL); + tmp_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL); err = -ENOMEM; if (!tmp_part) { printk(KERN_ERR "nvram_scan_partitions: kmalloc failed\n"); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 59dd545..13a8d9d 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new) new->thread.regs->msr |= (MSR_FP | new->thread.fpexc_mode); } +#ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(&new->thread); new->thread.regs->msr |= MSR_VEC; } +#endif /* We may as well turn on VSX too since all the state is restored now */ if (msr & MSR_VSX) new->thread.regs->msr |= MSR_VSX; @@ -829,6 +831,8 @@ void show_regs(struct pt_regs * regs) { int i, trap; + show_regs_print_info(KERN_DEFAULT); + printk("NIP: "REG" LR: "REG" CTR: "REG"\n", regs->nip, regs->link, regs->ctr); printk("REGS: %p TRAP: %04lx %s (%s)\n", @@ -848,12 +852,6 @@ void show_regs(struct pt_regs * regs) #else printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); #endif - printk("TASK = %p[%d] '%s' THREAD: %p", - current, task_pid_nr(current), current->comm, task_thread_info(current)); - -#ifdef CONFIG_SMP - printk(" CPU: %d", raw_smp_processor_id()); -#endif /* CONFIG_SMP */ for (i = 0; i < 32; i++) { if ((i % REGS_PER_LINE) == 0) @@ -1360,12 +1358,6 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) } while (count++ < kstack_depth_to_print); } -void dump_stack(void) -{ - show_stack(current, NULL); -} -EXPORT_SYMBOL(dump_stack); - #ifdef CONFIG_PPC64 /* Called with hard IRQs off */ void __ppc64_runlatch_on(void) diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 7f7fb7f..13f8d16 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2832,11 +2832,13 @@ static void unreloc_toc(void) { } #else -static void __reloc_toc(void *tocstart, unsigned long offset, - unsigned long nr_entries) +static void __reloc_toc(unsigned long offset, unsigned long nr_entries) { unsigned long i; - unsigned long *toc_entry = (unsigned long *)tocstart; + unsigned long *toc_entry; + + /* Get the start of the TOC by using r2 directly. */ + asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); for (i = 0; i < nr_entries; i++) { *toc_entry = *toc_entry + offset; @@ -2850,8 +2852,7 @@ static void reloc_toc(void) unsigned long nr_entries = (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); - /* Need to add offset to get at __prom_init_toc_start */ - __reloc_toc(__prom_init_toc_start + offset, offset, nr_entries); + __reloc_toc(offset, nr_entries); mb(); } @@ -2864,8 +2865,7 @@ static void unreloc_toc(void) mb(); - /* __prom_init_toc_start has been relocated, no need to add offset */ - __reloc_toc(__prom_init_toc_start, -offset, nr_entries); + __reloc_toc(-offset, nr_entries); } #endif #endif diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 245c1b6..f9b30c6 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -1428,6 +1428,7 @@ static long ppc_set_hwdebug(struct task_struct *child, brk.address = bp_info->addr & ~7UL; brk.type = HW_BRK_TYPE_TRANSLATE; + brk.len = 8; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) brk.type |= HW_BRK_TYPE_READ; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 3acb28e..95068bf 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs, do_load_up_transact_fpu(¤t->thread); regs->msr |= (MSR_FP | current->thread.fpexc_mode); } +#ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(¤t->thread); regs->msr |= MSR_VEC; } +#endif return 0; } diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 995f854..c179428 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, do_load_up_transact_fpu(¤t->thread); regs->msr |= (MSR_FP | current->thread.fpexc_mode); } +#ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(¤t->thread); regs->msr |= MSR_VEC; } +#endif return err; } diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 76bd9da..ee7ac5e 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -669,7 +669,7 @@ __cpuinit void start_secondary(void *unused) local_irq_enable(); - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); BUG(); } diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 84dbace..2da67e7 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint) or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */ mtmsr r5 +#ifdef CONFIG_ALTIVEC /* FP and VEC registers: These are recheckpointed from thread.fpr[] * and thread.vr[] respectively. The thread.transact_fpr[] version * is more modern, and will be loaded subsequently by any FPUnavailable @@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint) REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ ld r5, THREAD_VRSAVE(r3) mtspr SPRN_VRSAVE, r5 +#endif dont_restore_vec: andi. r0, r4, MSR_FP diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index f974849..13b8670 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -156,15 +156,13 @@ static struct console udbg_console = { .index = 0, }; -static int early_console_initialized; - /* * Called by setup_system after ppc_md->probe and ppc_md->early_init. * Call it again after setting udbg_putc in ppc_md->setup_arch. */ void __init register_early_udbg_console(void) { - if (early_console_initialized) + if (early_console) return; if (!udbg_putc) @@ -174,7 +172,7 @@ void __init register_early_udbg_console(void) printk(KERN_INFO "early console immortal !\n"); udbg_console.flags &= ~CON_BOOT; } - early_console_initialized = 1; + early_console = &udbg_console; register_console(&udbg_console); } diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index bc77834..59f419b 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c @@ -31,6 +31,16 @@ #define UPROBE_TRAP_NR UINT_MAX /** + * is_trap_insn - check if the instruction is a trap variant + * @insn: instruction to be checked. + * Returns true if @insn is a trap variant. + */ +bool is_trap_insn(uprobe_opcode_t *insn) +{ + return (is_trap(*insn)); +} + +/** * arch_uprobe_analyze_insn * @mm: the probed address space. * @arch_uprobe: the probepoint information. @@ -43,12 +53,6 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, if (addr & 0x03) return -EINVAL; - /* - * We currently don't support a uprobe on an already - * existing breakpoint instruction underneath - */ - if (is_trap(auprobe->ainsn)) - return -ENOTSUPP; return 0; } @@ -188,3 +192,16 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) return false; } + +unsigned long +arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) +{ + unsigned long orig_ret_vaddr; + + orig_ret_vaddr = regs->link; + + /* Replace the return addr with trampoline addr */ + regs->link = trampoline_vaddr; + + return orig_ret_vaddr; +} diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index ead58e3..5d7d29a 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) vcpu3s->context_id[0] = err; vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) - << USER_ESID_BITS) - 1; - vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; + << ESID_BITS) - 1; + vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS; vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; kvmppc_mmu_hpte_init(vcpu); diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 5e93438..dbdc15a 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1039,7 +1039,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) if (!vcpu_book3s) goto out; - vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *) + vcpu_book3s->shadow_vcpu = kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); if (!vcpu_book3s->shadow_vcpu) goto free_vcpu; diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 41cefd4..33db48a 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -26,17 +26,20 @@ #define E500_PID_NUM 3 #define E500_TLB_NUM 2 -#define E500_TLB_VALID 1 -#define E500_TLB_BITMAP 2 +/* entry is mapped somewhere in host TLB */ +#define E500_TLB_VALID (1 << 0) +/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */ +#define E500_TLB_BITMAP (1 << 1) +/* TLB1 entry is mapped by host TLB0 */ #define E500_TLB_TLB0 (1 << 2) struct tlbe_ref { - pfn_t pfn; - unsigned int flags; /* E500_TLB_* */ + pfn_t pfn; /* valid only for TLB0, except briefly */ + unsigned int flags; /* E500_TLB_* */ }; struct tlbe_priv { - struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ + struct tlbe_ref ref; }; #ifdef CONFIG_KVM_E500V2 @@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 { unsigned int gtlb_nv[E500_TLB_NUM]; - /* - * information associated with each host TLB entry -- - * TLB1 only for now. If/when guest TLB1 entries can be - * mapped with host TLB0, this will be used for that too. - * - * We don't want to use this for guest TLB0 because then we'd - * have the overhead of doing the translation again even if - * the entry is still in the guest TLB (e.g. we swapped out - * and back, and our host TLB entries got evicted). - */ - struct tlbe_ref *tlb_refs[E500_TLB_NUM]; unsigned int host_tlb1_nv; u32 svr; diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index a222edf..1c6a9d72 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; /* Don't bother with unmapped entries */ - if (!(ref->flags & E500_TLB_VALID)) - return; + if (!(ref->flags & E500_TLB_VALID)) { + WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), + "%s: flags %x\n", __func__, ref->flags); + WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); + } if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; @@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, pfn_t pfn) { ref->pfn = pfn; - ref->flags = E500_TLB_VALID; + ref->flags |= E500_TLB_VALID; if (tlbe_is_writable(gtlbe)) kvm_set_pfn_dirty(pfn); @@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) { if (ref->flags & E500_TLB_VALID) { + /* FIXME: don't log bogus pfn for TLB1 */ trace_kvm_booke206_ref_release(ref->pfn, ref->flags); ref->flags = 0; } @@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) { - int tlbsel = 0; - int i; - - for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { - struct tlbe_ref *ref = - &vcpu_e500->gtlb_priv[tlbsel][i].ref; - kvmppc_e500_ref_release(ref); - } -} - -static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) -{ - int stlbsel = 1; + int tlbsel; int i; - kvmppc_e500_tlbil_all(vcpu_e500); - - for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { - struct tlbe_ref *ref = - &vcpu_e500->tlb_refs[stlbsel][i]; - kvmppc_e500_ref_release(ref); + for (tlbsel = 0; tlbsel <= 1; tlbsel++) { + for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { + struct tlbe_ref *ref = + &vcpu_e500->gtlb_priv[tlbsel][i].ref; + kvmppc_e500_ref_release(ref); + } } - - clear_tlb_privs(vcpu_e500); } void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - clear_tlb_refs(vcpu_e500); + kvmppc_e500_tlbil_all(vcpu_e500); + clear_tlb_privs(vcpu_e500); clear_tlb1_bitmap(vcpu_e500); } @@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); } - /* Drop old ref and setup new one. */ - kvmppc_e500_ref_release(ref); kvmppc_e500_ref_setup(ref, gtlbe, pfn); kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, @@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) vcpu_e500->host_tlb1_nv = 0; - vcpu_e500->tlb_refs[1][sesel] = *ref; - vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; - vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; if (vcpu_e500->h2g_tlb1_rmap[sesel]) { - unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; + unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); } - vcpu_e500->h2g_tlb1_rmap[sesel] = esel; + + vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; + vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; + vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; + WARN_ON(!(ref->flags & E500_TLB_VALID)); return sesel; } @@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *stlbe, int esel) { - struct tlbe_ref ref; + struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; int sesel; int r; - ref.flags = 0; r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, - &ref); + ref); if (r) return r; @@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, } /* Otherwise map into TLB1 */ - sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel); + sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); return 0; @@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, case 0: priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; - /* Triggers after clear_tlb_refs or on initial mapping */ + /* Triggers after clear_tlb_privs or on initial mapping */ if (!(priv->ref.flags & E500_TLB_VALID)) { kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); } else { @@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) host_tlb_params[0].entries / host_tlb_params[0].ways; host_tlb_params[1].sets = 1; - vcpu_e500->tlb_refs[0] = - kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, - GFP_KERNEL); - if (!vcpu_e500->tlb_refs[0]) - goto err; - - vcpu_e500->tlb_refs[1] = - kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, - GFP_KERNEL); - if (!vcpu_e500->tlb_refs[1]) - goto err; - vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * host_tlb_params[1].entries, GFP_KERNEL); if (!vcpu_e500->h2g_tlb1_rmap) - goto err; + return -EINVAL; return 0; - -err: - kfree(vcpu_e500->tlb_refs[0]); - kfree(vcpu_e500->tlb_refs[1]); - return -EINVAL; } void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) { kfree(vcpu_e500->h2g_tlb1_rmap); - kfree(vcpu_e500->tlb_refs[0]); - kfree(vcpu_e500->tlb_refs[1]); } diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 1f89d26..2f4baa0 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) { } +static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); + void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); @@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); mtspr(SPRN_GESR, vcpu->arch.shared->esr); - if (vcpu->arch.oldpir != mfspr(SPRN_PIR)) + if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || + __get_cpu_var(last_vcpu_on_cpu) != vcpu) { kvmppc_e500_tlbil_all(vcpu_e500); + __get_cpu_var(last_vcpu_on_cpu) = vcpu; + } kvmppc_load_guest_fp(vcpu); } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 1b6e127..f410c3e 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); unsigned long tprot = prot; + /* + * If we hit a bad address return error. + */ + if (!vsid) + return -1; /* Make kernel text executable */ if (overlaps_kernel_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; @@ -759,6 +764,8 @@ void __init early_init_mmu(void) /* Initialize stab / SLB management */ if (mmu_has_feature(MMU_FTR_SLB)) slb_initialize(); + else + stab_initialize(get_paca()->stab_real); } #ifdef CONFIG_SMP @@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", ea, access, trap); - if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) { - DBG_LOW(" out of pgtable range !\n"); - return 1; - } - /* Get region & vsid */ switch (REGION_ID(ea)) { case USER_REGION_ID: @@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) } DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); + /* Bad address. */ + if (!vsid) { + DBG_LOW("Bad address!\n"); + return 1; + } /* Get pgdir */ pgdir = mm->pgd; if (pgdir == NULL) @@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, /* Get VSID */ ssize = user_segment_size(ea); vsid = get_vsid(mm->context.id, ea, ssize); + if (!vsid) + return; /* Hash doesn't like irqs */ local_irq_save(flags); @@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); + /* Don't create HPTE entries for bad address */ + if (!vsid) + return; ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), mode, HPTE_V_BOLTED, mmu_linear_psize, mmu_kernel_ssize); diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 7e2246f..5a535b7 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -263,19 +263,14 @@ static __meminit void vmemmap_list_populate(unsigned long phys, vmemmap_list = vmem_back; } -int __meminit vmemmap_populate(struct page *start_page, - unsigned long nr_pages, int node) +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) { - unsigned long start = (unsigned long)start_page; - unsigned long end = (unsigned long)(start_page + nr_pages); unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; /* Align to the page size of the linear mapping. */ start = _ALIGN_DOWN(start, page_size); - pr_debug("vmemmap_populate page %p, %ld pages, node %d\n", - start_page, nr_pages, node); - pr_debug(" -> map %lx..%lx\n", start, end); + pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); for (; start < end; start += page_size) { void *p; @@ -298,7 +293,7 @@ int __meminit vmemmap_populate(struct page *start_page, return 0; } -void vmemmap_free(struct page *memmap, unsigned long nr_pages) +void vmemmap_free(unsigned long start, unsigned long end) { } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index f1f7409..cd76c45 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -352,13 +352,9 @@ void __init mem_init(void) struct page *page = pfn_to_page(pfn); if (memblock_is_reserved(paddr)) continue; - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalhigh_pages++; + free_highmem_page(page); reservedpages--; } - totalram_pages += totalhigh_pages; printk(KERN_DEBUG "High memory: %luk\n", totalhigh_pages << (PAGE_SHIFT-10)); } @@ -405,39 +401,14 @@ void __init mem_init(void) void free_initmem(void) { - unsigned long addr; - ppc_md.progress = ppc_printk_progress; - - addr = (unsigned long)__init_begin; - for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { - memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - pr_info("Freeing unused kernel memory: %luk freed\n", - ((unsigned long)__init_end - - (unsigned long)__init_begin) >> 10); + free_initmem_default(POISON_FREE_INITMEM); } #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - if (start >= end) - return; - - start = _ALIGN_DOWN(start, PAGE_SIZE); - end = _ALIGN_UP(end, PAGE_SIZE); - pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); - - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } + free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index 40bc5b0..d1d1b92 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c @@ -29,15 +29,6 @@ static DEFINE_SPINLOCK(mmu_context_lock); static DEFINE_IDA(mmu_context_ida); -/* - * 256MB segment - * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments - * available for user mappings. Each segment contains 2^28 bytes. Each - * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts - * (19 == 37 + 28 - 46). - */ -#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1) - int __init_new_context(void) { int index; @@ -56,7 +47,7 @@ again: else if (err) return err; - if (index > MAX_CONTEXT) { + if (index > MAX_USER_CONTEXT) { spin_lock(&mmu_context_lock); ida_remove(&mmu_context_ida, index); spin_unlock(&mmu_context_lock); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index bba87ca..fa33c54 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -22,6 +22,7 @@ #include <linux/pfn.h> #include <linux/cpuset.h> #include <linux/node.h> +#include <linux/slab.h> #include <asm/sparsemem.h> #include <asm/prom.h> #include <asm/smp.h> @@ -62,14 +63,11 @@ static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; */ static void __init setup_node_to_cpumask_map(void) { - unsigned int node, num = 0; + unsigned int node; /* setup nr_node_ids if not done yet */ - if (nr_node_ids == MAX_NUMNODES) { - for_each_node_mask(node, node_possible_map) - num = node; - nr_node_ids = num + 1; - } + if (nr_node_ids == MAX_NUMNODES) + setup_nr_node_ids(); /* allocate the map */ for (node = 0; node < nr_node_ids; node++) diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index e212a27..654258f 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -61,7 +61,7 @@ #endif #ifdef CONFIG_PPC_STD_MMU_64 -#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) +#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) #error TASK_SIZE_USER64 exceeds user VSID range #endif #endif diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 1a16ca2..17aa6df 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -31,10 +31,15 @@ * No other registers are examined or changed. */ _GLOBAL(slb_allocate_realmode) - /* r3 = faulting address */ + /* + * check for bad kernel/user address + * (ea & ~REGION_MASK) >= PGTABLE_RANGE + */ + rldicr. r9,r3,4,(63 - 46 - 4) + bne- 8f srdi r9,r3,60 /* get region */ - srdi r10,r3,28 /* get esid */ + srdi r10,r3,SID_SHIFT /* get esid */ cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ @@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode) */ _GLOBAL(slb_miss_kernel_load_linear) li r11,0 - li r9,0x1 /* - * for 1T we shift 12 bits more. slb_finish_load_1T will do - * the necessary adjustment + * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 + * r9 = region id. */ - rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 + addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha + addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l + + BEGIN_FTR_SECTION b slb_finish_load END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) @@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) _GLOBAL(slb_miss_kernel_load_io) li r11,0 6: - li r9,0x1 /* - * for 1T we shift 12 bits more. slb_finish_load_1T will do - * the necessary adjustment + * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 + * r9 = region id. */ - rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 + addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha + addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l + BEGIN_FTR_SECTION b slb_finish_load END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) b slb_finish_load_1T -0: /* user address: proto-VSID = context << 15 | ESID. First check - * if the address is within the boundaries of the user region - */ - srdi. r9,r10,USER_ESID_BITS - bne- 8f /* invalid ea bits set */ - - +0: /* when using slices, we extract the psize off the slice bitmaps * and then we need to get the sllp encoding off the mmu_psize_defs * array. @@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) ld r9,PACACONTEXTID(r13) BEGIN_FTR_SECTION cmpldi r10,0x1000 -END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) - rldimi r10,r9,USER_ESID_BITS,0 -BEGIN_FTR_SECTION bge slb_finish_load_1T END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) b slb_finish_load 8: /* invalid EA */ li r10,0 /* BAD_VSID */ + li r9,0 /* BAD_VSID */ li r11,SLB_VSID_USER /* flags don't much matter */ b slb_finish_load @@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user) /* get context to calculate proto-VSID */ ld r9,PACACONTEXTID(r13) - rldimi r10,r9,USER_ESID_BITS,0 - /* fall through slb_finish_load */ #endif /* __DISABLED__ */ @@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user) /* * Finish loading of an SLB entry and return * - * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET + * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET */ slb_finish_load: + rldimi r10,r9,ESID_BITS,0 ASM_VSID_SCRAMBLE(r10,r9,256M) /* * bits above VSID_BITS_256M need to be ignored from r10 @@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size) /* * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. * - * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 + * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9 */ slb_finish_load_1T: - srdi r10,r10,40-28 /* get 1T ESID */ + srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ + rldimi r10,r9,ESID_BITS_1T,0 ASM_VSID_SCRAMBLE(r10,r9,1T) /* * bits above VSID_BITS_1T need to be ignored from r10 diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 0d82ef5..023ec8a 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, if (!is_kernel_addr(addr)) { ssize = user_segment_size(addr); vsid = get_vsid(mm->context.id, addr, ssize); - WARN_ON(vsid == 0); } else { vsid = get_kernel_vsid(addr, mmu_kernel_ssize); ssize = mmu_kernel_ssize; } + WARN_ON(vsid == 0); vpn = hpt_vpn(addr, vsid, ssize); rpte = __real_pte(__pte(pte), ptep); diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index b554879..3c475d6 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -420,7 +420,20 @@ static struct attribute_group power7_pmu_events_group = { .attrs = power7_events_attr, }; +PMU_FORMAT_ATTR(event, "config:0-19"); + +static struct attribute *power7_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +struct attribute_group power7_pmu_format_group = { + .name = "format", + .attrs = power7_pmu_format_attr, +}; + static const struct attribute_group *power7_pmu_attr_groups[] = { + &power7_pmu_format_group, &power7_pmu_events_group, NULL, }; diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 0effe9f..7be9336 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -274,6 +274,8 @@ config 440EPX select IBM_EMAC_EMAC4 select IBM_EMAC_RGMII select IBM_EMAC_ZMII + select USB_EHCI_BIG_ENDIAN_MMIO + select USB_EHCI_BIG_ENDIAN_DESC config 440GRX bool diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig index c169998..381a592 100644 --- a/arch/powerpc/platforms/512x/Kconfig +++ b/arch/powerpc/platforms/512x/Kconfig @@ -7,6 +7,8 @@ config PPC_MPC512x select PPC_PCI_CHOICE select FSL_PCI if PCI select ARCH_WANT_OPTIONAL_GPIOLIB + select USB_EHCI_BIG_ENDIAN_MMIO + select USB_EHCI_BIG_ENDIAN_DESC config MPC5121_ADS bool "Freescale MPC5121E ADS" diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c index d30235b..db6ac38 100644 --- a/arch/powerpc/platforms/512x/mpc512x_shared.c +++ b/arch/powerpc/platforms/512x/mpc512x_shared.c @@ -172,12 +172,9 @@ static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb; static inline void mpc512x_free_bootmem(struct page *page) { - __ClearPageReserved(page); BUG_ON(PageTail(page)); BUG_ON(atomic_read(&page->_count) > 1); - atomic_set(&page->_count, 1); - __free_page(page); - totalram_pages++; + free_reserved_page(page); } void mpc512x_release_bootmem(void) diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c index 611e92f..7179726 100644 --- a/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c @@ -69,7 +69,7 @@ static irqreturn_t gpio_halt_irq(int irq, void *__data) return IRQ_HANDLED; }; -static int __devinit gpio_halt_probe(struct platform_device *pdev) +static int gpio_halt_probe(struct platform_device *pdev) { enum of_gpio_flags flags; struct device_node *node = pdev->dev.of_node; @@ -128,7 +128,7 @@ static int __devinit gpio_halt_probe(struct platform_device *pdev) return 0; } -static int __devexit gpio_halt_remove(struct platform_device *pdev) +static int gpio_halt_remove(struct platform_device *pdev) { if (halt_node) { int gpio = of_get_gpio(halt_node, 0); @@ -165,7 +165,7 @@ static struct platform_driver gpio_halt_driver = { .of_match_table = gpio_halt_match, }, .probe = gpio_halt_probe, - .remove = __devexit_p(gpio_halt_remove), + .remove = gpio_halt_remove, }; module_platform_driver(gpio_halt_driver); diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index cea2f09..18e3b76 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -124,9 +124,8 @@ config 6xx select PPC_HAVE_PMU_SUPPORT config POWER3 - bool depends on PPC64 && PPC_BOOK3S - default y if !POWER4_ONLY + def_bool y config POWER4 depends on PPC64 && PPC_BOOK3S @@ -145,8 +144,7 @@ config TUNE_CELL but somewhat slower on other machines. This option only changes the scheduling of instructions, not the selection of instructions itself, so the resulting kernel will keep running on all other - machines. When building a kernel that is supposed to run only - on Cell, you should also select the POWER4_ONLY option. + machines. # this is temp to handle compat with arch=ppc config 8xx diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 53aaefe..9978f59 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -113,34 +113,10 @@ config CBE_THERM default m depends on CBE_RAS && SPU_BASE -config CBE_CPUFREQ - tristate "CBE frequency scaling" - depends on CBE_RAS && CPU_FREQ - default m - help - This adds the cpufreq driver for Cell BE processors. - For details, take a look at <file:Documentation/cpu-freq/>. - If you don't have such processor, say N - -config CBE_CPUFREQ_PMI_ENABLE - bool "CBE frequency scaling using PMI interface" - depends on CBE_CPUFREQ - default n - help - Select this, if you want to use the PMI interface - to switch frequencies. Using PMI, the - processor will not only be able to run at lower speed, - but also at lower core voltage. - -config CBE_CPUFREQ_PMI - tristate - depends on CBE_CPUFREQ_PMI_ENABLE - default CBE_CPUFREQ - config PPC_PMI tristate default y - depends on CBE_CPUFREQ_PMI || PPC_IBM_CELL_POWERBUTTON + depends on CPU_FREQ_CBE_PMI || PPC_IBM_CELL_POWERBUTTON help PMI (Platform Management Interrupt) is a way to communicate with the BMC (Baseboard Management Controller). diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile index a4a8935..fe053e7 100644 --- a/arch/powerpc/platforms/cell/Makefile +++ b/arch/powerpc/platforms/cell/Makefile @@ -5,9 +5,6 @@ obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \ obj-$(CONFIG_CBE_RAS) += ras.o obj-$(CONFIG_CBE_THERM) += cbe_thermal.o -obj-$(CONFIG_CBE_CPUFREQ_PMI) += cbe_cpufreq_pmi.o -obj-$(CONFIG_CBE_CPUFREQ) += cbe-cpufreq.o -cbe-cpufreq-y += cbe_cpufreq_pervasive.o cbe_cpufreq.o obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR) += cpufreq_spudemand.o obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c deleted file mode 100644 index d4c39e3..0000000 --- a/arch/powerpc/platforms/cell/cbe_cpufreq.c +++ /dev/null @@ -1,209 +0,0 @@ -/* - * cpufreq driver for the cell processor - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft <krafft@de.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/cpufreq.h> -#include <linux/module.h> -#include <linux/of_platform.h> - -#include <asm/machdep.h> -#include <asm/prom.h> -#include <asm/cell-regs.h> -#include "cbe_cpufreq.h" - -static DEFINE_MUTEX(cbe_switch_mutex); - - -/* the CBE supports an 8 step frequency scaling */ -static struct cpufreq_frequency_table cbe_freqs[] = { - {1, 0}, - {2, 0}, - {3, 0}, - {4, 0}, - {5, 0}, - {6, 0}, - {8, 0}, - {10, 0}, - {0, CPUFREQ_TABLE_END}, -}; - -/* - * hardware specific functions - */ - -static int set_pmode(unsigned int cpu, unsigned int slow_mode) -{ - int rc; - - if (cbe_cpufreq_has_pmi) - rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode); - else - rc = cbe_cpufreq_set_pmode(cpu, slow_mode); - - pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu)); - - return rc; -} - -/* - * cpufreq functions - */ - -static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - const u32 *max_freqp; - u32 max_freq; - int i, cur_pmode; - struct device_node *cpu; - - cpu = of_get_cpu_node(policy->cpu, NULL); - - if (!cpu) - return -ENODEV; - - pr_debug("init cpufreq on CPU %d\n", policy->cpu); - - /* - * Let's check we can actually get to the CELL regs - */ - if (!cbe_get_cpu_pmd_regs(policy->cpu) || - !cbe_get_cpu_mic_tm_regs(policy->cpu)) { - pr_info("invalid CBE regs pointers for cpufreq\n"); - return -EINVAL; - } - - max_freqp = of_get_property(cpu, "clock-frequency", NULL); - - of_node_put(cpu); - - if (!max_freqp) - return -EINVAL; - - /* we need the freq in kHz */ - max_freq = *max_freqp / 1000; - - pr_debug("max clock-frequency is at %u kHz\n", max_freq); - pr_debug("initializing frequency table\n"); - - /* initialize frequency table */ - for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) { - cbe_freqs[i].frequency = max_freq / cbe_freqs[i].index; - pr_debug("%d: %d\n", i, cbe_freqs[i].frequency); - } - - /* if DEBUG is enabled set_pmode() measures the latency - * of a transition */ - policy->cpuinfo.transition_latency = 25000; - - cur_pmode = cbe_cpufreq_get_pmode(policy->cpu); - pr_debug("current pmode is at %d\n",cur_pmode); - - policy->cur = cbe_freqs[cur_pmode].frequency; - -#ifdef CONFIG_SMP - cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); -#endif - - cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); - - /* this ensures that policy->cpuinfo_min - * and policy->cpuinfo_max are set correctly */ - return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs); -} - -static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static int cbe_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, cbe_freqs); -} - -static int cbe_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - int rc; - struct cpufreq_freqs freqs; - unsigned int cbe_pmode_new; - - cpufreq_frequency_table_target(policy, - cbe_freqs, - target_freq, - relation, - &cbe_pmode_new); - - freqs.old = policy->cur; - freqs.new = cbe_freqs[cbe_pmode_new].frequency; - freqs.cpu = policy->cpu; - - mutex_lock(&cbe_switch_mutex); - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - pr_debug("setting frequency for cpu %d to %d kHz, " \ - "1/%d of max frequency\n", - policy->cpu, - cbe_freqs[cbe_pmode_new].frequency, - cbe_freqs[cbe_pmode_new].index); - - rc = set_pmode(policy->cpu, cbe_pmode_new); - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - mutex_unlock(&cbe_switch_mutex); - - return rc; -} - -static struct cpufreq_driver cbe_cpufreq_driver = { - .verify = cbe_cpufreq_verify, - .target = cbe_cpufreq_target, - .init = cbe_cpufreq_cpu_init, - .exit = cbe_cpufreq_cpu_exit, - .name = "cbe-cpufreq", - .owner = THIS_MODULE, - .flags = CPUFREQ_CONST_LOOPS, -}; - -/* - * module init and destoy - */ - -static int __init cbe_cpufreq_init(void) -{ - if (!machine_is(cell)) - return -ENODEV; - - return cpufreq_register_driver(&cbe_cpufreq_driver); -} - -static void __exit cbe_cpufreq_exit(void) -{ - cpufreq_unregister_driver(&cbe_cpufreq_driver); -} - -module_init(cbe_cpufreq_init); -module_exit(cbe_cpufreq_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.h b/arch/powerpc/platforms/cell/cbe_cpufreq.h deleted file mode 100644 index c1d86bf..0000000 --- a/arch/powerpc/platforms/cell/cbe_cpufreq.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * cbe_cpufreq.h - * - * This file contains the definitions used by the cbe_cpufreq driver. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft <krafft@de.ibm.com> - * - */ - -#include <linux/cpufreq.h> -#include <linux/types.h> - -int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode); -int cbe_cpufreq_get_pmode(int cpu); - -int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode); - -#if defined(CONFIG_CBE_CPUFREQ_PMI) || defined(CONFIG_CBE_CPUFREQ_PMI_MODULE) -extern bool cbe_cpufreq_has_pmi; -#else -#define cbe_cpufreq_has_pmi (0) -#endif diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c deleted file mode 100644 index 20472e4..0000000 --- a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c +++ /dev/null @@ -1,115 +0,0 @@ -/* - * pervasive backend for the cbe_cpufreq driver - * - * This driver makes use of the pervasive unit to - * engage the desired frequency. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft <krafft@de.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/io.h> -#include <linux/kernel.h> -#include <linux/time.h> -#include <asm/machdep.h> -#include <asm/hw_irq.h> -#include <asm/cell-regs.h> - -#include "cbe_cpufreq.h" - -/* to write to MIC register */ -static u64 MIC_Slow_Fast_Timer_table[] = { - [0 ... 7] = 0x007fc00000000000ull, -}; - -/* more values for the MIC */ -static u64 MIC_Slow_Next_Timer_table[] = { - 0x0000240000000000ull, - 0x0000268000000000ull, - 0x000029C000000000ull, - 0x00002D0000000000ull, - 0x0000300000000000ull, - 0x0000334000000000ull, - 0x000039C000000000ull, - 0x00003FC000000000ull, -}; - - -int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode) -{ - struct cbe_pmd_regs __iomem *pmd_regs; - struct cbe_mic_tm_regs __iomem *mic_tm_regs; - unsigned long flags; - u64 value; -#ifdef DEBUG - long time; -#endif - - local_irq_save(flags); - - mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu); - pmd_regs = cbe_get_cpu_pmd_regs(cpu); - -#ifdef DEBUG - time = jiffies; -#endif - - out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]); - out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]); - - out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]); - out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]); - - value = in_be64(&pmd_regs->pmcr); - /* set bits to zero */ - value &= 0xFFFFFFFFFFFFFFF8ull; - /* set bits to next pmode */ - value |= pmode; - - out_be64(&pmd_regs->pmcr, value); - -#ifdef DEBUG - /* wait until new pmode appears in status register */ - value = in_be64(&pmd_regs->pmsr) & 0x07; - while (value != pmode) { - cpu_relax(); - value = in_be64(&pmd_regs->pmsr) & 0x07; - } - - time = jiffies - time; - time = jiffies_to_msecs(time); - pr_debug("had to wait %lu ms for a transition using " \ - "pervasive unit\n", time); -#endif - local_irq_restore(flags); - - return 0; -} - - -int cbe_cpufreq_get_pmode(int cpu) -{ - int ret; - struct cbe_pmd_regs __iomem *pmd_regs; - - pmd_regs = cbe_get_cpu_pmd_regs(cpu); - ret = in_be64(&pmd_regs->pmsr) & 0x07; - - return ret; -} - diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c deleted file mode 100644 index 60a07a4..0000000 --- a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c +++ /dev/null @@ -1,156 +0,0 @@ -/* - * pmi backend for the cbe_cpufreq driver - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft <krafft@de.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/timer.h> -#include <linux/module.h> -#include <linux/of_platform.h> - -#include <asm/processor.h> -#include <asm/prom.h> -#include <asm/pmi.h> -#include <asm/cell-regs.h> - -#ifdef DEBUG -#include <asm/time.h> -#endif - -#include "cbe_cpufreq.h" - -static u8 pmi_slow_mode_limit[MAX_CBE]; - -bool cbe_cpufreq_has_pmi = false; -EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi); - -/* - * hardware specific functions - */ - -int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode) -{ - int ret; - pmi_message_t pmi_msg; -#ifdef DEBUG - long time; -#endif - pmi_msg.type = PMI_TYPE_FREQ_CHANGE; - pmi_msg.data1 = cbe_cpu_to_node(cpu); - pmi_msg.data2 = pmode; - -#ifdef DEBUG - time = jiffies; -#endif - pmi_send_message(pmi_msg); - -#ifdef DEBUG - time = jiffies - time; - time = jiffies_to_msecs(time); - pr_debug("had to wait %lu ms for a transition using " \ - "PMI\n", time); -#endif - ret = pmi_msg.data2; - pr_debug("PMI returned slow mode %d\n", ret); - - return ret; -} -EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi); - - -static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) -{ - u8 node, slow_mode; - - BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE); - - node = pmi_msg.data1; - slow_mode = pmi_msg.data2; - - pmi_slow_mode_limit[node] = slow_mode; - - pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode); -} - -static int pmi_notifier(struct notifier_block *nb, - unsigned long event, void *data) -{ - struct cpufreq_policy *policy = data; - struct cpufreq_frequency_table *cbe_freqs; - u8 node; - - /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE - * and CPUFREQ_NOTIFY policy events?) - */ - if (event == CPUFREQ_START) - return 0; - - cbe_freqs = cpufreq_frequency_get_table(policy->cpu); - node = cbe_cpu_to_node(policy->cpu); - - pr_debug("got notified, event=%lu, node=%u\n", event, node); - - if (pmi_slow_mode_limit[node] != 0) { - pr_debug("limiting node %d to slow mode %d\n", - node, pmi_slow_mode_limit[node]); - - cpufreq_verify_within_limits(policy, 0, - - cbe_freqs[pmi_slow_mode_limit[node]].frequency); - } - - return 0; -} - -static struct notifier_block pmi_notifier_block = { - .notifier_call = pmi_notifier, -}; - -static struct pmi_handler cbe_pmi_handler = { - .type = PMI_TYPE_FREQ_CHANGE, - .handle_pmi_message = cbe_cpufreq_handle_pmi, -}; - - - -static int __init cbe_cpufreq_pmi_init(void) -{ - cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0; - - if (!cbe_cpufreq_has_pmi) - return -ENODEV; - - cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); - - return 0; -} - -static void __exit cbe_cpufreq_pmi_exit(void) -{ - cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); - pmi_unregister_handler(&cbe_pmi_handler); -} - -module_init(cbe_cpufreq_pmi_init); -module_exit(cbe_cpufreq_pmi_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 863184b..3f3bb4c 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -749,6 +749,7 @@ static struct file_system_type spufs_type = { .mount = spufs_mount, .kill_sb = kill_litter_super, }; +MODULE_ALIAS_FS("spufs"); static int __init spufs_init(void) { diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c index 890f30e..be1e795 100644 --- a/arch/powerpc/platforms/pasemi/cpufreq.c +++ b/arch/powerpc/platforms/pasemi/cpufreq.c @@ -273,10 +273,9 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy, freqs.old = policy->cur; freqs.new = pas_freqs[pas_astate_new].frequency; - freqs.cpu = policy->cpu; mutex_lock(&pas_switch_mutex); - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n", policy->cpu, @@ -288,7 +287,7 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy, for_each_online_cpu(i) set_astate(i, pas_astate_new); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); mutex_unlock(&pas_switch_mutex); ppc_proc_freq = freqs.new * 1000ul; diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c index 311b804..3104fad 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ b/arch/powerpc/platforms/powermac/cpufreq_32.c @@ -335,7 +335,8 @@ static int pmu_set_cpu_speed(int low_speed) return 0; } -static int do_set_cpu_speed(int speed_mode, int notify) +static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode, + int notify) { struct cpufreq_freqs freqs; unsigned long l3cr; @@ -343,13 +344,12 @@ static int do_set_cpu_speed(int speed_mode, int notify) freqs.old = cur_freq; freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; - freqs.cpu = smp_processor_id(); if (freqs.old == freqs.new) return 0; if (notify) - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); if (speed_mode == CPUFREQ_LOW && cpu_has_feature(CPU_FTR_L3CR)) { l3cr = _get_L3CR(); @@ -366,7 +366,7 @@ static int do_set_cpu_speed(int speed_mode, int notify) _set_L3CR(prev_l3cr); } if (notify) - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; return 0; @@ -393,7 +393,7 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy, target_freq, relation, &newstate)) return -EINVAL; - rc = do_set_cpu_speed(newstate, 1); + rc = do_set_cpu_speed(policy, newstate, 1); ppc_proc_freq = cur_freq * 1000ul; return rc; @@ -442,7 +442,7 @@ static int pmac_cpufreq_suspend(struct cpufreq_policy *policy) no_schedule = 1; sleep_freq = cur_freq; if (cur_freq == low_freq && !is_pmu_based) - do_set_cpu_speed(CPUFREQ_HIGH, 0); + do_set_cpu_speed(policy, CPUFREQ_HIGH, 0); return 0; } @@ -458,7 +458,7 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy) * is that we force a switch to whatever it was, which is * probably high speed due to our suspend() routine */ - do_set_cpu_speed(sleep_freq == low_freq ? + do_set_cpu_speed(policy, sleep_freq == low_freq ? CPUFREQ_LOW : CPUFREQ_HIGH, 0); ppc_proc_freq = cur_freq * 1000ul; diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c index 9650c602..7ba4234 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_64.c +++ b/arch/powerpc/platforms/powermac/cpufreq_64.c @@ -339,11 +339,10 @@ static int g5_cpufreq_target(struct cpufreq_policy *policy, freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; freqs.new = g5_cpu_freqs[newstate].frequency; - freqs.cpu = 0; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); rc = g5_switch_freq(newstate); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); mutex_unlock(&g5_switch_mutex); diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 2372c60..9a432de 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -72,6 +72,7 @@ unsigned long memory_block_size_bytes(void) return get_memblock_size(); } +#ifdef CONFIG_MEMORY_HOTREMOVE static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { unsigned long start, start_pfn; @@ -153,6 +154,17 @@ static int pseries_remove_memory(struct device_node *np) ret = pseries_remove_memblock(base, lmb_size); return ret; } +#else +static inline int pseries_remove_memblock(unsigned long base, + unsigned int memblock_size) +{ + return -EOPNOTSUPP; +} +static inline int pseries_remove_memory(struct device_node *np) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_MEMORY_HOTREMOVE */ static int pseries_add_memory(struct device_node *np) { diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c index fcf4b4c..4557e91 100644 --- a/arch/powerpc/platforms/pseries/hvcserver.c +++ b/arch/powerpc/platforms/pseries/hvcserver.c @@ -23,6 +23,7 @@ #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/string.h> #include <asm/hvcall.h> #include <asm/hvcserver.h> @@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head, = (unsigned int)last_p_partition_ID; /* copy the Null-term char too */ - strncpy(&next_partner_info->location_code[0], + strlcpy(&next_partner_info->location_code[0], (char *)&pi_buff[2], - strlen((char *)&pi_buff[2]) + 1); + sizeof(next_partner_info->location_code)); list_add_tail(&(next_partner_info->node), head); next_partner_info = NULL; diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 0da39fe..299731e 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group) (0x1UL << 4), &dummy1, &dummy2); if (lpar_rc == H_SUCCESS) return i; - BUG_ON(lpar_rc != H_NOT_FOUND); + + /* + * The test for adjunct partition is performed before the + * ANDCOND test. H_RESOURCE may be returned, so we need to + * check for that as well. + */ + BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); slot_offset++; slot_offset &= 0x7; diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c index 4d806b4..4644efa0 100644 --- a/arch/powerpc/platforms/pseries/processor_idle.c +++ b/arch/powerpc/platforms/pseries/processor_idle.c @@ -23,8 +23,8 @@ #include "pseries.h" struct cpuidle_driver pseries_idle_driver = { - .name = "pseries_idle", - .owner = THIS_MODULE, + .name = "pseries_idle", + .owner = THIS_MODULE, }; #define MAX_IDLE_STATE_COUNT 2 @@ -33,10 +33,8 @@ static int max_idle_state = MAX_IDLE_STATE_COUNT - 1; static struct cpuidle_device __percpu *pseries_cpuidle_devices; static struct cpuidle_state *cpuidle_state_table; -static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before) +static inline void idle_loop_prolog(unsigned long *in_purr) { - - *kt_before = ktime_get(); *in_purr = mfspr(SPRN_PURR); /* * Indicate to the HV that we are idle. Now would be @@ -45,12 +43,10 @@ static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before) get_lppaca()->idle = 1; } -static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before) +static inline void idle_loop_epilog(unsigned long in_purr) { get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; get_lppaca()->idle = 0; - - return ktime_to_us(ktime_sub(ktime_get(), kt_before)); } static int snooze_loop(struct cpuidle_device *dev, @@ -58,10 +54,9 @@ static int snooze_loop(struct cpuidle_device *dev, int index) { unsigned long in_purr; - ktime_t kt_before; int cpu = dev->cpu; - idle_loop_prolog(&in_purr, &kt_before); + idle_loop_prolog(&in_purr); local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); @@ -75,8 +70,8 @@ static int snooze_loop(struct cpuidle_device *dev, clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); - dev->last_residency = - (int)idle_loop_epilog(in_purr, kt_before); + idle_loop_epilog(in_purr); + return index; } @@ -102,9 +97,8 @@ static int dedicated_cede_loop(struct cpuidle_device *dev, int index) { unsigned long in_purr; - ktime_t kt_before; - idle_loop_prolog(&in_purr, &kt_before); + idle_loop_prolog(&in_purr); get_lppaca()->donate_dedicated_cpu = 1; ppc64_runlatch_off(); @@ -112,8 +106,9 @@ static int dedicated_cede_loop(struct cpuidle_device *dev, check_and_cede_processor(); get_lppaca()->donate_dedicated_cpu = 0; - dev->last_residency = - (int)idle_loop_epilog(in_purr, kt_before); + + idle_loop_epilog(in_purr); + return index; } @@ -122,9 +117,8 @@ static int shared_cede_loop(struct cpuidle_device *dev, int index) { unsigned long in_purr; - ktime_t kt_before; - idle_loop_prolog(&in_purr, &kt_before); + idle_loop_prolog(&in_purr); /* * Yield the processor to the hypervisor. We return if @@ -135,8 +129,8 @@ static int shared_cede_loop(struct cpuidle_device *dev, */ check_and_cede_processor(); - dev->last_residency = - (int)idle_loop_epilog(in_purr, kt_before); + idle_loop_epilog(in_purr); + return index; } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 13f85de..3e34cd2 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2947,7 +2947,7 @@ static void sysrq_handle_xmon(int key) static struct sysrq_key_op sysrq_xmon_op = { .handler = sysrq_handle_xmon, - .help_msg = "Xmon", + .help_msg = "xmon(x)", .action_msg = "Entering xmon", }; diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index f6cc152..2c9789d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -91,6 +91,7 @@ config S390 select ARCH_INLINE_WRITE_UNLOCK_BH select ARCH_INLINE_WRITE_UNLOCK_IRQ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE + select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS select ARCH_SAVE_PAGE_KEYS if HIBERNATION select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT @@ -133,7 +134,7 @@ config S390 select HAVE_SYSCALL_TRACEPOINTS select HAVE_UID16 if 32BIT select HAVE_VIRT_CPU_ACCOUNTING - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select INIT_ALL_POSSIBLE select KTIME_SCALAR if 32BIT select MODULES_USE_ELF_RELA @@ -374,19 +375,6 @@ config PACK_STACK Say Y if you are unsure. -config SMALL_STACK - def_bool n - prompt "Use 8kb for kernel stack instead of 16kb" - depends on PACK_STACK && 64BIT && !LOCKDEP - help - If you say Y here and the compiler supports the -mkernel-backchain - option the kernel will use a smaller kernel stack size. The reduced - size is 8kb instead of 16kb. This allows to run more threads on a - system and reduces the pressure on the memory management for higher - order page allocations. - - Say N if you are unsure. - config CHECK_STACK def_bool y prompt "Detect kernel stack overflow" diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index fc32a2d..c56878e 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug @@ -17,20 +17,6 @@ config STRICT_DEVMEM If you are unsure, say Y. -config DEBUG_STRICT_USER_COPY_CHECKS - def_bool n - prompt "Strict user copy size checks" - ---help--- - Enabling this option turns a certain set of sanity checks for user - copy operations into compile time warnings. - - The copy_from_user() etc checks are there to help test if there - are sufficient security checks on the length argument of - the copy operation, by having gcc prove that the argument is - within bounds. - - If unsure, or if you run an older (pre 4.4) gcc, say N. - config S390_PTDUMP bool "Export kernel pagetable layout to userspace via debugfs" depends on DEBUG_KERNEL diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 7e3ce78..a7d68a4 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -55,22 +55,12 @@ cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls ifeq ($(call cc-option-yn,-mkernel-backchain),y) cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK -cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK -aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK -ifdef CONFIG_SMALL_STACK -STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) ) -endif endif # new style option for packed stacks ifeq ($(call cc-option-yn,-mpacked-stack),y) cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK -cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK -aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK -ifdef CONFIG_SMALL_STACK -STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) ) -endif endif ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y) diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c index 9fd4a40..bb5dd49 100644 --- a/arch/s390/hypfs/hypfs_dbfs.c +++ b/arch/s390/hypfs/hypfs_dbfs.c @@ -105,9 +105,7 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df) int hypfs_dbfs_init(void) { dbfs_dir = debugfs_create_dir("s390_hypfs", NULL); - if (IS_ERR(dbfs_dir)) - return PTR_ERR(dbfs_dir); - return 0; + return PTR_RET(dbfs_dir); } void hypfs_dbfs_exit(void) diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 8538015..5f7d7ba 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -456,6 +456,7 @@ static struct file_system_type hypfs_type = { .mount = hypfs_mount, .kill_sb = hypfs_kill_super }; +MODULE_ALIAS_FS("s390_hypfs"); static const struct super_operations hypfs_s_ops = { .statfs = simple_statfs, diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 1542293..4d8604e 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -61,8 +61,6 @@ extern const char _sb_findmap[]; #ifndef CONFIG_64BIT -#define __BITOPS_ALIGN 3 -#define __BITOPS_WORDSIZE 32 #define __BITOPS_OR "or" #define __BITOPS_AND "nr" #define __BITOPS_XOR "xr" @@ -81,8 +79,6 @@ extern const char _sb_findmap[]; #else /* CONFIG_64BIT */ -#define __BITOPS_ALIGN 7 -#define __BITOPS_WORDSIZE 64 #define __BITOPS_OR "ogr" #define __BITOPS_AND "ngr" #define __BITOPS_XOR "xgr" @@ -101,8 +97,7 @@ extern const char _sb_findmap[]; #endif /* CONFIG_64BIT */ -#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) -#define __BITOPS_BARRIER() asm volatile("" : : : "memory") +#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) #ifdef CONFIG_SMP /* @@ -114,9 +109,9 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr; /* calculate address for CS */ - addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; + addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; /* make OR mask */ - mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); + mask = 1UL << (nr & (BITS_PER_LONG - 1)); /* Do the atomic update. */ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); } @@ -130,9 +125,9 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr; /* calculate address for CS */ - addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; + addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; /* make AND mask */ - mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); + mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); /* Do the atomic update. */ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); } @@ -146,9 +141,9 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr; /* calculate address for CS */ - addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; + addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; /* make XOR mask */ - mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); + mask = 1UL << (nr & (BITS_PER_LONG - 1)); /* Do the atomic update. */ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); } @@ -163,12 +158,12 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr; /* calculate address for CS */ - addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; + addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; /* make OR/test mask */ - mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); + mask = 1UL << (nr & (BITS_PER_LONG - 1)); /* Do the atomic update. */ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); - __BITOPS_BARRIER(); + barrier(); return (old & mask) != 0; } @@ -182,12 +177,12 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr; /* calculate address for CS */ - addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; + addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; /* make AND/test mask */ - mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); + mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); /* Do the atomic update. */ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); - __BITOPS_BARRIER(); + barrier(); return (old ^ new) != 0; } @@ -201,12 +196,12 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr; /* calculate address for CS */ - addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; + addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; /* make XOR/test mask */ - mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); + mask = 1UL << (nr & (BITS_PER_LONG - 1)); /* Do the atomic update. */ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); - __BITOPS_BARRIER(); + barrier(); return (old & mask) != 0; } #endif /* CONFIG_SMP */ @@ -218,7 +213,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long addr; - addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); asm volatile( " oc %O0(1,%R0),%1" : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); @@ -229,7 +224,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr) { unsigned long addr; - addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); *(unsigned char *) addr |= 1 << (nr & 7); } @@ -246,7 +241,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long addr; - addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); asm volatile( " nc %O0(1,%R0),%1" : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); @@ -257,7 +252,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr) { unsigned long addr; - addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); *(unsigned char *) addr &= ~(1 << (nr & 7)); } @@ -273,7 +268,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long addr; - addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); asm volatile( " xc %O0(1,%R0),%1" : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); @@ -284,7 +279,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) { unsigned long addr; - addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); *(unsigned char *) addr ^= 1 << (nr & 7); } @@ -302,7 +297,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) unsigned long addr; unsigned char ch; - addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); ch = *(unsigned char *) addr; asm volatile( " oc %O0(1,%R0),%1" @@ -321,7 +316,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) unsigned long addr; unsigned char ch; - addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); ch = *(unsigned char *) addr; asm volatile( " nc %O0(1,%R0),%1" @@ -340,7 +335,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) unsigned long addr; unsigned char ch; - addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); ch = *(unsigned char *) addr; asm volatile( " xc %O0(1,%R0),%1" @@ -376,7 +371,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr unsigned long addr; unsigned char ch; - addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); ch = *(volatile unsigned char *) addr; return (ch >> (nr & 7)) & 1; } @@ -384,7 +379,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr static inline int __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { return (((volatile char *) addr) - [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0; + [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0; } #define test_bit(nr,addr) \ @@ -693,18 +688,18 @@ static inline int find_next_bit_left(const unsigned long *addr, if (offset >= size) return size; - bit = offset & (__BITOPS_WORDSIZE - 1); + bit = offset & (BITS_PER_LONG - 1); offset -= bit; size -= offset; - p = addr + offset / __BITOPS_WORDSIZE; + p = addr + offset / BITS_PER_LONG; if (bit) { set = __flo_word(0, *p & (~0UL << bit)); if (set >= size) return size + offset; - if (set < __BITOPS_WORDSIZE) + if (set < BITS_PER_LONG) return set + offset; - offset += __BITOPS_WORDSIZE; - size -= __BITOPS_WORDSIZE; + offset += BITS_PER_LONG; + size -= BITS_PER_LONG; p++; } return offset + find_first_bit_left(p, size); @@ -736,22 +731,22 @@ static inline int find_next_zero_bit (const unsigned long * addr, if (offset >= size) return size; - bit = offset & (__BITOPS_WORDSIZE - 1); + bit = offset & (BITS_PER_LONG - 1); offset -= bit; size -= offset; - p = addr + offset / __BITOPS_WORDSIZE; + p = addr + offset / BITS_PER_LONG; if (bit) { /* - * __ffz_word returns __BITOPS_WORDSIZE + * __ffz_word returns BITS_PER_LONG * if no zero bit is present in the word. */ set = __ffz_word(bit, *p >> bit); if (set >= size) return size + offset; - if (set < __BITOPS_WORDSIZE) + if (set < BITS_PER_LONG) return set + offset; - offset += __BITOPS_WORDSIZE; - size -= __BITOPS_WORDSIZE; + offset += BITS_PER_LONG; + size -= BITS_PER_LONG; p++; } return offset + find_first_zero_bit(p, size); @@ -773,22 +768,22 @@ static inline int find_next_bit (const unsigned long * addr, if (offset >= size) return size; - bit = offset & (__BITOPS_WORDSIZE - 1); + bit = offset & (BITS_PER_LONG - 1); offset -= bit; size -= offset; - p = addr + offset / __BITOPS_WORDSIZE; + p = addr + offset / BITS_PER_LONG; if (bit) { /* - * __ffs_word returns __BITOPS_WORDSIZE + * __ffs_word returns BITS_PER_LONG * if no one bit is present in the word. */ set = __ffs_word(0, *p & (~0UL << bit)); if (set >= size) return size + offset; - if (set < __BITOPS_WORDSIZE) + if (set < BITS_PER_LONG) return set + offset; - offset += __BITOPS_WORDSIZE; - size -= __BITOPS_WORDSIZE; + offset += BITS_PER_LONG; + size -= BITS_PER_LONG; p++; } return offset + find_first_bit(p, size); @@ -843,22 +838,22 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, if (offset >= size) return size; - bit = offset & (__BITOPS_WORDSIZE - 1); + bit = offset & (BITS_PER_LONG - 1); offset -= bit; size -= offset; - p = addr + offset / __BITOPS_WORDSIZE; + p = addr + offset / BITS_PER_LONG; if (bit) { /* - * s390 version of ffz returns __BITOPS_WORDSIZE + * s390 version of ffz returns BITS_PER_LONG * if no zero bit is present in the word. */ set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit); if (set >= size) return size + offset; - if (set < __BITOPS_WORDSIZE) + if (set < BITS_PER_LONG) return set + offset; - offset += __BITOPS_WORDSIZE; - size -= __BITOPS_WORDSIZE; + offset += BITS_PER_LONG; + size -= BITS_PER_LONG; p++; } return offset + find_first_zero_bit_le(p, size); @@ -885,22 +880,22 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size, if (offset >= size) return size; - bit = offset & (__BITOPS_WORDSIZE - 1); + bit = offset & (BITS_PER_LONG - 1); offset -= bit; size -= offset; - p = addr + offset / __BITOPS_WORDSIZE; + p = addr + offset / BITS_PER_LONG; if (bit) { /* - * s390 version of ffz returns __BITOPS_WORDSIZE + * s390 version of ffz returns BITS_PER_LONG * if no zero bit is present in the word. */ set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit)); if (set >= size) return size + offset; - if (set < __BITOPS_WORDSIZE) + if (set < BITS_PER_LONG) return set + offset; - offset += __BITOPS_WORDSIZE; - size -= __BITOPS_WORDSIZE; + offset += BITS_PER_LONG; + size -= BITS_PER_LONG; p++; } return offset + find_first_bit_le(p, size); diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index e606161..f201af8 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -220,7 +220,8 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *); #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) extern struct ccw_device *ccw_device_probe_console(void); -extern int ccw_device_force_console(void); +extern void ccw_device_wait_idle(struct ccw_device *); +extern int ccw_device_force_console(struct ccw_device *); int ccw_device_siosl(struct ccw_device *); diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index ad2b924..ffb8989 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -296,8 +296,6 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1, return 0; } -extern void wait_cons_dev(void); - extern void css_schedule_reprobe(void); extern void reipl_ccw_dev(struct ccw_dev_id *id); diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index f8c6df6..c1e7c64 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -70,6 +70,22 @@ typedef u32 compat_ulong_t; typedef u64 compat_u64; typedef u32 compat_uptr_t; +typedef struct { + u32 mask; + u32 addr; +} __aligned(8) psw_compat_t; + +typedef struct { + psw_compat_t psw; + u32 gprs[NUM_GPRS]; + u32 acrs[NUM_ACRS]; + u32 orig_gpr2; +} s390_compat_regs; + +typedef struct { + u32 gprs_high[NUM_GPRS]; +} s390_compat_regs_high; + struct compat_timespec { compat_time_t tv_sec; s32 tv_nsec; @@ -124,18 +140,33 @@ struct compat_flock64 { }; struct compat_statfs { - s32 f_type; - s32 f_bsize; - s32 f_blocks; - s32 f_bfree; - s32 f_bavail; - s32 f_files; - s32 f_ffree; + u32 f_type; + u32 f_bsize; + u32 f_blocks; + u32 f_bfree; + u32 f_bavail; + u32 f_files; + u32 f_ffree; + compat_fsid_t f_fsid; + u32 f_namelen; + u32 f_frsize; + u32 f_flags; + u32 f_spare[4]; +}; + +struct compat_statfs64 { + u32 f_type; + u32 f_bsize; + u64 f_blocks; + u64 f_bfree; + u64 f_bavail; + u64 f_files; + u64 f_ffree; compat_fsid_t f_fsid; - s32 f_namelen; - s32 f_frsize; - s32 f_flags; - s32 f_spare[5]; + u32 f_namelen; + u32 f_frsize; + u32 f_flags; + u32 f_spare[4]; }; #define COMPAT_RLIM_OLD_INFINITY 0x7fffffff @@ -248,8 +279,6 @@ static inline int is_compat_task(void) return is_32bit_task(); } -#endif - static inline void __user *arch_compat_alloc_user_space(long len) { unsigned long stack; @@ -260,6 +289,8 @@ static inline void __user *arch_compat_alloc_user_space(long len) return (void __user *) (stack - len); } +#endif + struct compat_ipc64_perm { compat_key_t key; __compat_uid32_t uid; diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index f1eddd1..c879fad 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -12,6 +12,7 @@ #ifndef _ASM_S390_CPU_MF_H #define _ASM_S390_CPU_MF_H +#include <linux/errno.h> #include <asm/facility.h> #define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */ diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h index 8d48471..dc9200c 100644 --- a/arch/s390/include/asm/eadm.h +++ b/arch/s390/include/asm/eadm.h @@ -34,6 +34,8 @@ struct arsb { u32 reserved[4]; } __packed; +#define EQC_WR_PROHIBIT 22 + struct msb { u8 fmt:4; u8 oc:4; @@ -96,11 +98,13 @@ struct scm_device { #define OP_STATE_TEMP_ERR 2 #define OP_STATE_PERM_ERR 3 +enum scm_event {SCM_CHANGE, SCM_AVAIL}; + struct scm_driver { struct device_driver drv; int (*probe) (struct scm_device *scmdev); int (*remove) (struct scm_device *scmdev); - void (*notify) (struct scm_device *scmdev); + void (*notify) (struct scm_device *scmdev, enum scm_event event); void (*handler) (struct scm_device *scmdev, void *data, int error); }; diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 1bfdf24..78f4f87 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -119,6 +119,8 @@ */ #include <asm/ptrace.h> +#include <asm/compat.h> +#include <asm/syscall.h> #include <asm/user.h> typedef s390_fp_regs elf_fpregset_t; @@ -180,18 +182,31 @@ extern unsigned long elf_hwcap; extern char elf_platform[]; #define ELF_PLATFORM (elf_platform) -#ifdef CONFIG_64BIT +#ifndef CONFIG_COMPAT +#define SET_PERSONALITY(ex) \ +do { \ + set_personality(PER_LINUX | \ + (current->personality & (~PER_MASK))); \ + current_thread_info()->sys_call_table = \ + (unsigned long) &sys_call_table; \ +} while (0) +#else /* CONFIG_COMPAT */ #define SET_PERSONALITY(ex) \ do { \ if (personality(current->personality) != PER_LINUX32) \ set_personality(PER_LINUX | \ (current->personality & ~PER_MASK)); \ - if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ + if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ set_thread_flag(TIF_31BIT); \ - else \ + current_thread_info()->sys_call_table = \ + (unsigned long) &sys_call_table_emu; \ + } else { \ clear_thread_flag(TIF_31BIT); \ + current_thread_info()->sys_call_table = \ + (unsigned long) &sys_call_table; \ + } \ } while (0) -#endif /* CONFIG_64BIT */ +#endif /* CONFIG_COMPAT */ #define STACK_RND_MASK 0x7ffUL diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 593753e..bd90359 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -114,7 +114,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \ ({ \ pte_t __pte = huge_ptep_get(__ptep); \ - if (pte_write(__pte)) { \ + if (huge_pte_write(__pte)) { \ huge_ptep_invalidate(__mm, __addr, __ptep); \ set_huge_pte_at(__mm, __addr, __ptep, \ huge_pte_wrprotect(__pte)); \ @@ -127,4 +127,58 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, huge_ptep_invalidate(vma->vm_mm, address, ptep); } +static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) +{ + pte_t pte; + pmd_t pmd; + + pmd = mk_pmd_phys(page_to_phys(page), pgprot); + pte_val(pte) = pmd_val(pmd); + return pte; +} + +static inline int huge_pte_write(pte_t pte) +{ + pmd_t pmd; + + pmd_val(pmd) = pte_val(pte); + return pmd_write(pmd); +} + +static inline int huge_pte_dirty(pte_t pte) +{ + /* No dirty bit in the segment table entry. */ + return 0; +} + +static inline pte_t huge_pte_mkwrite(pte_t pte) +{ + pmd_t pmd; + + pmd_val(pmd) = pte_val(pte); + pte_val(pte) = pmd_val(pmd_mkwrite(pmd)); + return pte; +} + +static inline pte_t huge_pte_mkdirty(pte_t pte) +{ + /* No dirty bit in the segment table entry. */ + return pte; +} + +static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) +{ + pmd_t pmd; + + pmd_val(pmd) = pte_val(pte); + pte_val(pte) = pmd_val(pmd_modify(pmd, newprot)); + return pte; +} + +static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pmd_clear((pmd_t *) ptep); +} + #endif /* _ASM_S390_HUGETLB_H */ diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 27cb321..379d96e 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr); #define ioremap_nocache(addr, size) ioremap(addr, size) #define ioremap_wc ioremap_nocache -/* TODO: s390 cannot support io_remap_pfn_range... */ -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - static inline void __iomem *ioremap(unsigned long offset, unsigned long size) { return (void __iomem *) offset; diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 05333b7..6c18012 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -140,6 +140,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev) struct zpci_dev *zpci_alloc_device(void); int zpci_create_device(struct zpci_dev *); int zpci_enable_device(struct zpci_dev *); +int zpci_disable_device(struct zpci_dev *); void zpci_stop_device(struct zpci_dev *); void zpci_free_device(struct zpci_dev *); int zpci_scan_device(struct zpci_dev *); diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h index 6bbec42..1ca5d10 100644 --- a/arch/s390/include/asm/pci_debug.h +++ b/arch/s390/include/asm/pci_debug.h @@ -7,14 +7,11 @@ extern debug_info_t *pci_debug_msg_id; extern debug_info_t *pci_debug_err_id; #ifdef CONFIG_PCI_DEBUG -#define zpci_dbg(fmt, args...) \ - do { \ - if (pci_debug_msg_id->level >= 2) \ - debug_sprintf_event(pci_debug_msg_id, 2, fmt , ## args);\ - } while (0) +#define zpci_dbg(imp, fmt, args...) \ + debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args) #else /* !CONFIG_PCI_DEBUG */ -#define zpci_dbg(fmt, args...) do { } while (0) +#define zpci_dbg(imp, fmt, args...) do { } while (0) #endif #define zpci_err(text...) \ diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index 1486a98..e6a2bdd 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h @@ -1,10 +1,6 @@ #ifndef _ASM_S390_PCI_INSN_H #define _ASM_S390_PCI_INSN_H -#include <linux/delay.h> - -#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */ - /* Load/Store status codes */ #define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4 #define ZPCI_PCI_ST_FUNC_IN_ERR 8 @@ -82,199 +78,12 @@ struct zpci_fib { u64 reserved7; } __packed; -/* Modify PCI Function Controls */ -static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status) -{ - u8 cc; - - asm volatile ( - " .insn rxy,0xe300000000d0,%[req],%[fib]\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib) - : : "cc"); - *status = req >> 24 & 0xff; - return cc; -} - -static inline int mpcifc_instr(u64 req, struct zpci_fib *fib) -{ - u8 cc, status; - - do { - cc = __mpcifc(req, fib, &status); - if (cc == 2) - msleep(ZPCI_INSN_BUSY_DELAY); - } while (cc == 2); - - if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d\n", - __func__, cc, status); - return (cc) ? -EIO : 0; -} - -/* Refresh PCI Translations */ -static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status) -{ - register u64 __addr asm("2") = addr; - register u64 __range asm("3") = range; - u8 cc; - - asm volatile ( - " .insn rre,0xb9d30000,%[fn],%[addr]\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=d" (cc), [fn] "+d" (fn) - : [addr] "d" (__addr), "d" (__range) - : "cc"); - *status = fn >> 24 & 0xff; - return cc; -} - -static inline int rpcit_instr(u64 fn, u64 addr, u64 range) -{ - u8 cc, status; - - do { - cc = __rpcit(fn, addr, range, &status); - if (cc == 2) - udelay(ZPCI_INSN_BUSY_DELAY); - } while (cc == 2); - - if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n", - __func__, cc, status, addr, range); - return (cc) ? -EIO : 0; -} - -/* Store PCI function controls */ -static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status) -{ - u64 fn = (u64) handle << 32 | space << 16; - u8 cc; - - asm volatile ( - " .insn rxy,0xe300000000d4,%[fn],%[fib]\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib) - : : "cc"); - *status = fn >> 24 & 0xff; - return cc; -} - -/* Set Interruption Controls */ -static inline void sic_instr(u16 ctl, char *unused, u8 isc) -{ - asm volatile ( - " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" - : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused)); -} - -/* PCI Load */ -static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status) -{ - register u64 __req asm("2") = req; - register u64 __offset asm("3") = offset; - u64 __data; - u8 cc; - - asm volatile ( - " .insn rre,0xb9d20000,%[data],%[req]\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req) - : "d" (__offset) - : "cc"); - *status = __req >> 24 & 0xff; - *data = __data; - return cc; -} - -static inline int pcilg_instr(u64 *data, u64 req, u64 offset) -{ - u8 cc, status; - - do { - cc = __pcilg(data, req, offset, &status); - if (cc == 2) - udelay(ZPCI_INSN_BUSY_DELAY); - } while (cc == 2); - - if (cc) { - printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", - __func__, cc, status, req, offset); - /* TODO: on IO errors set data to 0xff... - * here or in users of pcilg (le conversion)? - */ - } - return (cc) ? -EIO : 0; -} - -/* PCI Store */ -static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status) -{ - register u64 __req asm("2") = req; - register u64 __offset asm("3") = offset; - u8 cc; - - asm volatile ( - " .insn rre,0xb9d00000,%[data],%[req]\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=d" (cc), [req] "+d" (__req) - : "d" (__offset), [data] "d" (data) - : "cc"); - *status = __req >> 24 & 0xff; - return cc; -} - -static inline int pcistg_instr(u64 data, u64 req, u64 offset) -{ - u8 cc, status; - - do { - cc = __pcistg(data, req, offset, &status); - if (cc == 2) - udelay(ZPCI_INSN_BUSY_DELAY); - } while (cc == 2); - - if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", - __func__, cc, status, req, offset); - return (cc) ? -EIO : 0; -} - -/* PCI Store Block */ -static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) -{ - u8 cc; - - asm volatile ( - " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=d" (cc), [req] "+d" (req) - : [offset] "d" (offset), [data] "Q" (*data) - : "cc"); - *status = req >> 24 & 0xff; - return cc; -} - -static inline int pcistb_instr(const u64 *data, u64 req, u64 offset) -{ - u8 cc, status; - - do { - cc = __pcistb(data, req, offset, &status); - if (cc == 2) - udelay(ZPCI_INSN_BUSY_DELAY); - } while (cc == 2); - if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", - __func__, cc, status, req, offset); - return (cc) ? -EIO : 0; -} +int s390pci_mod_fc(u64 req, struct zpci_fib *fib); +int s390pci_refresh_trans(u64 fn, u64 addr, u64 range); +int s390pci_load(u64 *data, u64 req, u64 offset); +int s390pci_store(u64 data, u64 req, u64 offset); +int s390pci_store_block(const u64 *data, u64 req, u64 offset); +void set_irq_ctrl(u16 ctl, char *unused, u8 isc); #endif diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index 5fd81f3..83a9caa 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h @@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \ u64 data; \ int rc; \ \ - rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \ + rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \ if (rc) \ data = -1ULL; \ return (RETTYPE) data; \ @@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \ u64 data = (VALTYPE) val; \ \ - pcistg_instr(data, req, ZPCI_OFFSET(addr)); \ + s390pci_store(data, req, ZPCI_OFFSET(addr)); \ } zpci_read(8, u64) @@ -83,15 +83,18 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len val = 0; /* let FW report error */ break; } - return pcistg_instr(val, req, offset); + return s390pci_store(val, req, offset); } static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) { u64 data; - u8 cc; + int cc; + + cc = s390pci_load(&data, req, offset); + if (cc) + goto out; - cc = pcilg_instr(&data, req, offset); switch (len) { case 1: *((u8 *) dst) = (u8) data; @@ -106,12 +109,13 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) *((u64 *) dst) = (u64) data; break; } +out: return cc; } static inline int zpci_write_block(u64 req, const u64 *data, u64 offset) { - return pcistb_instr(data, req, offset); + return s390pci_store_block(data, req, offset); } static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4a29308..b462291 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -57,6 +57,10 @@ extern unsigned long zero_page_mask; (((unsigned long)(vaddr)) &zero_page_mask)))) #define __HAVE_COLOR_ZERO_PAGE +/* TODO: s390 cannot support io_remap_pfn_range... */ +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #endif /* !__ASSEMBLY__ */ /* @@ -344,6 +348,7 @@ extern unsigned long MODULES_END; #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ /* Bits in the segment table entry */ +#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ @@ -419,6 +424,13 @@ extern unsigned long MODULES_END; #define __S110 PAGE_RW #define __S111 PAGE_RW +/* + * Segment entry (large page) protection definitions. + */ +#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) +#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) +#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) + static inline int mm_exclusive(struct mm_struct *mm) { return likely(mm == current->active_mm && @@ -759,6 +771,8 @@ void gmap_disable(struct gmap *gmap); int gmap_map_segment(struct gmap *gmap, unsigned long from, unsigned long to, unsigned long length); int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); +unsigned long __gmap_translate(unsigned long address, struct gmap *); +unsigned long gmap_translate(unsigned long address, struct gmap *); unsigned long __gmap_fault(unsigned long address, struct gmap *); unsigned long gmap_fault(unsigned long address, struct gmap *); void gmap_discard(unsigned long from, unsigned long to, struct gmap *); @@ -907,26 +921,6 @@ static inline pte_t pte_mkspecial(pte_t pte) #ifdef CONFIG_HUGETLB_PAGE static inline pte_t pte_mkhuge(pte_t pte) { - /* - * PROT_NONE needs to be remapped from the pte type to the ste type. - * The HW invalid bit is also different for pte and ste. The pte - * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE - * bit, so we don't have to clear it. - */ - if (pte_val(pte) & _PAGE_INVALID) { - if (pte_val(pte) & _PAGE_SWT) - pte_val(pte) |= _HPAGE_TYPE_NONE; - pte_val(pte) |= _SEGMENT_ENTRY_INV; - } - /* - * Clear SW pte bits, there are no SW bits in a segment table entry. - */ - pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC | - _PAGE_SWR | _PAGE_SWW); - /* - * Also set the change-override bit because we don't need dirty bit - * tracking for hugetlbfs pages. - */ pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); return pte; } @@ -1271,31 +1265,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) } } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - -#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) -#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) -#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) - -#define __HAVE_ARCH_PGTABLE_DEPOSIT -extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); - -#define __HAVE_ARCH_PGTABLE_WITHDRAW -extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); - -static inline int pmd_trans_splitting(pmd_t pmd) -{ - return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; -} - -static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, pmd_t entry) -{ - if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) - pmd_val(entry) |= _SEGMENT_ENTRY_CO; - *pmdp = entry; -} - +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) { /* @@ -1316,10 +1286,11 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) return pmd; } -static inline pmd_t pmd_mkhuge(pmd_t pmd) +static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) { - pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; - return pmd; + pmd_t __pmd; + pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); + return __pmd; } static inline pmd_t pmd_mkwrite(pmd_t pmd) @@ -1329,6 +1300,34 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; return pmd; } +#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + +#define __HAVE_ARCH_PGTABLE_DEPOSIT +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); + +#define __HAVE_ARCH_PGTABLE_WITHDRAW +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); + +static inline int pmd_trans_splitting(pmd_t pmd) +{ + return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; +} + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t entry) +{ + if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) + pmd_val(entry) |= _SEGMENT_ENTRY_CO; + *pmdp = entry; +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; + return pmd; +} static inline pmd_t pmd_wrprotect(pmd_t pmd) { @@ -1425,13 +1424,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, } } -static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) -{ - pmd_t __pmd; - pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); - return __pmd; -} - #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) @@ -1531,7 +1523,8 @@ extern int s390_enable_sie(void); /* * No page table caches to initialise */ -#define pgtable_cache_init() do { } while (0) +static inline void pgtable_cache_init(void) { } +static inline void check_pgt_cache(void) { } #include <asm-generic/pgtable.h> diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 94e749c..6b49987 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -161,7 +161,8 @@ extern unsigned long thread_saved_pc(struct task_struct *t); extern void show_code(struct pt_regs *regs); extern void print_fn_code(unsigned char *code, unsigned long len); -extern int insn_to_mnemonic(unsigned char *instruction, char buf[8]); +extern int insn_to_mnemonic(unsigned char *instruction, char *buf, + unsigned int len); unsigned long get_wchan(struct task_struct *p); #define task_pt_regs(tsk) ((struct pt_regs *) \ diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 3ee5da3..559512a 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -9,9 +9,7 @@ #include <uapi/asm/ptrace.h> #ifndef __ASSEMBLY__ -#ifndef __s390x__ -#else /* __s390x__ */ -#endif /* __s390x__ */ + extern long psw_kernel_bits; extern long psw_user_bits; @@ -77,8 +75,6 @@ struct per_struct_kernel { #define PER_CONTROL_SUSPENSION 0x00400000UL #define PER_CONTROL_ALTERATION 0x00200000UL -#ifdef __s390x__ -#endif /* __s390x__ */ /* * These are defined as per linux/ptrace.h, which see. */ diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index fe7b997..cd29d2f 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -23,6 +23,7 @@ * type here is what we want [need] for both 32 bit and 64 bit systems. */ extern const unsigned int sys_call_table[]; +extern const unsigned int sys_call_table_emu[]; static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 9e2cfe0..eb5f64d 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -14,13 +14,8 @@ #define THREAD_ORDER 1 #define ASYNC_ORDER 1 #else /* CONFIG_64BIT */ -#ifndef __SMALL_STACK #define THREAD_ORDER 2 #define ASYNC_ORDER 2 -#else -#define THREAD_ORDER 1 -#define ASYNC_ORDER 1 -#endif #endif /* CONFIG_64BIT */ #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) @@ -41,6 +36,7 @@ struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ + unsigned long sys_call_table; /* System call table address */ unsigned int cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ struct restart_block restart_block; diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 1d8fe2b..6b32af3 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsigned long asce) static inline void __tlb_flush_mm(struct mm_struct * mm) { - if (unlikely(cpumask_empty(mm_cpumask(mm)))) - return; /* * If the machine has IDTE we prefer to do a per mm flush * on all cpus instead of doing a local flush if the mm diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h index a5ca214..3aa9f1e 100644 --- a/arch/s390/include/uapi/asm/ptrace.h +++ b/arch/s390/include/uapi/asm/ptrace.h @@ -215,12 +215,6 @@ typedef struct unsigned long addr; } __attribute__ ((aligned(8))) psw_t; -typedef struct -{ - __u32 mask; - __u32 addr; -} __attribute__ ((aligned(8))) psw_compat_t; - #ifndef __s390x__ #define PSW_MASK_PER 0x40000000UL @@ -295,20 +289,6 @@ typedef struct unsigned long orig_gpr2; } s390_regs; -typedef struct -{ - psw_compat_t psw; - __u32 gprs[NUM_GPRS]; - __u32 acrs[NUM_ACRS]; - __u32 orig_gpr2; -} s390_compat_regs; - -typedef struct -{ - __u32 gprs_high[NUM_GPRS]; -} s390_compat_regs_high; - - /* * Now for the user space program event recording (trace) definitions. * The following structures are used only for the ptrace interface, don't diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h index 5acca0a..a61d538 100644 --- a/arch/s390/include/uapi/asm/statfs.h +++ b/arch/s390/include/uapi/asm/statfs.h @@ -7,9 +7,6 @@ #ifndef _S390_STATFS_H #define _S390_STATFS_H -#ifndef __s390x__ -#include <asm-generic/statfs.h> -#else /* * We can't use <asm-generic/statfs.h> because in 64-bit mode * we mix ints of different sizes in our struct statfs. @@ -21,49 +18,33 @@ typedef __kernel_fsid_t fsid_t; #endif struct statfs { - int f_type; - int f_bsize; - long f_blocks; - long f_bfree; - long f_bavail; - long f_files; - long f_ffree; + unsigned int f_type; + unsigned int f_bsize; + unsigned long f_blocks; + unsigned long f_bfree; + unsigned long f_bavail; + unsigned long f_files; + unsigned long f_ffree; __kernel_fsid_t f_fsid; - int f_namelen; - int f_frsize; - int f_flags; - int f_spare[4]; + unsigned int f_namelen; + unsigned int f_frsize; + unsigned int f_flags; + unsigned int f_spare[4]; }; struct statfs64 { - int f_type; - int f_bsize; - long f_blocks; - long f_bfree; - long f_bavail; - long f_files; - long f_ffree; + unsigned int f_type; + unsigned int f_bsize; + unsigned long f_blocks; + unsigned long f_bfree; + unsigned long f_bavail; + unsigned long f_files; + unsigned long f_ffree; __kernel_fsid_t f_fsid; - int f_namelen; - int f_frsize; - int f_flags; - int f_spare[4]; + unsigned int f_namelen; + unsigned int f_frsize; + unsigned int f_flags; + unsigned int f_spare[4]; }; -struct compat_statfs64 { - __u32 f_type; - __u32 f_bsize; - __u64 f_blocks; - __u64 f_bfree; - __u64 f_bavail; - __u64 f_files; - __u64 f_ffree; - __kernel_fsid_t f_fsid; - __u32 f_namelen; - __u32 f_frsize; - __u32 f_flags; - __u32 f_spare[4]; -}; - -#endif /* __s390x__ */ #endif diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 2ac311e..1386fca 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -14,16 +14,25 @@ endif CFLAGS_smp.o := -Wno-nonnull # +# Disable tailcall optimizations for stack / callchain walking functions +# since this might generate broken code when accessing register 15 and +# passing its content to other functions. +# +CFLAGS_stacktrace.o += -fno-optimize-sibling-calls +CFLAGS_dumpstack.o += -fno-optimize-sibling-calls + +# # Pass UTS_MACHINE for user_regset definition # CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w -obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ - processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \ - debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \ - sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o +obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o +obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o +obj-y += debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o +obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o +obj-y += dumpstack.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index fface87..7a82f9f 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -35,6 +35,7 @@ int main(void) DEFINE(__TI_task, offsetof(struct thread_info, task)); DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); DEFINE(__TI_flags, offsetof(struct thread_info, flags)); + DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table)); DEFINE(__TI_cpu, offsetof(struct thread_info, cpu)); DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 6de049f..c439ac9 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -362,6 +362,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka, /* set extra registers only for synchronous signals */ regs->gprs[4] = regs->int_code & 127; regs->gprs[5] = regs->int_parm_long; + regs->gprs[6] = task_thread_info(current)->last_break; } /* Place signal number on stack to allow backtrace from handler. */ @@ -421,6 +422,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, regs->gprs[2] = map_signal(sig); regs->gprs[3] = (__force __u64) &frame->info; regs->gprs[4] = (__force __u64) &frame->uc; + regs->gprs[5] = task_thread_info(current)->last_break; return 0; give_sigsegv: diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 3ad5e95..7f4a4a8 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -1696,14 +1696,15 @@ static struct insn *find_insn(unsigned char *code) * insn_to_mnemonic - decode an s390 instruction * @instruction: instruction to decode * @buf: buffer to fill with mnemonic + * @len: length of buffer * * Decode the instruction at @instruction and store the corresponding - * mnemonic into @buf. + * mnemonic into @buf of length @len. * @buf is left unchanged if the instruction could not be decoded. * Returns: * %0 on success, %-ENOENT if the instruction was not found. */ -int insn_to_mnemonic(unsigned char *instruction, char buf[8]) +int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len) { struct insn *insn; @@ -1711,10 +1712,10 @@ int insn_to_mnemonic(unsigned char *instruction, char buf[8]) if (!insn) return -ENOENT; if (insn->name[0] == '\0') - snprintf(buf, 8, "%s", + snprintf(buf, len, "%s", long_insn_name[(int) insn->name[1]]); else - snprintf(buf, 8, "%.5s", insn->name); + snprintf(buf, len, "%.5s", insn->name); return 0; } EXPORT_SYMBOL_GPL(insn_to_mnemonic); diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c new file mode 100644 index 0000000..2982974 --- /dev/null +++ b/arch/s390/kernel/dumpstack.c @@ -0,0 +1,212 @@ +/* + * Stack dumping functions + * + * Copyright IBM Corp. 1999, 2013 + */ + +#include <linux/kallsyms.h> +#include <linux/hardirq.h> +#include <linux/kprobes.h> +#include <linux/utsname.h> +#include <linux/export.h> +#include <linux/kdebug.h> +#include <linux/ptrace.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <asm/processor.h> +#include <asm/debug.h> +#include <asm/ipl.h> + +#ifndef CONFIG_64BIT +#define LONG "%08lx " +#define FOURLONG "%08lx %08lx %08lx %08lx\n" +static int kstack_depth_to_print = 12; +#else /* CONFIG_64BIT */ +#define LONG "%016lx " +#define FOURLONG "%016lx %016lx %016lx %016lx\n" +static int kstack_depth_to_print = 20; +#endif /* CONFIG_64BIT */ + +/* + * For show_trace we have tree different stack to consider: + * - the panic stack which is used if the kernel stack has overflown + * - the asynchronous interrupt stack (cpu related) + * - the synchronous kernel stack (process related) + * The stack trace can start at any of the three stack and can potentially + * touch all of them. The order is: panic stack, async stack, sync stack. + */ +static unsigned long +__show_trace(unsigned long sp, unsigned long low, unsigned long high) +{ + struct stack_frame *sf; + struct pt_regs *regs; + + while (1) { + sp = sp & PSW_ADDR_INSN; + if (sp < low || sp > high - sizeof(*sf)) + return sp; + sf = (struct stack_frame *) sp; + printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); + print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); + /* Follow the backchain. */ + while (1) { + low = sp; + sp = sf->back_chain & PSW_ADDR_INSN; + if (!sp) + break; + if (sp <= low || sp > high - sizeof(*sf)) + return sp; + sf = (struct stack_frame *) sp; + printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); + print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); + } + /* Zero backchain detected, check for interrupt frame. */ + sp = (unsigned long) (sf + 1); + if (sp <= low || sp > high - sizeof(*regs)) + return sp; + regs = (struct pt_regs *) sp; + printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); + print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); + low = sp; + sp = regs->gprs[15]; + } +} + +static void show_trace(struct task_struct *task, unsigned long *stack) +{ + register unsigned long __r15 asm ("15"); + unsigned long sp; + + sp = (unsigned long) stack; + if (!sp) + sp = task ? task->thread.ksp : __r15; + printk("Call Trace:\n"); +#ifdef CONFIG_CHECK_STACK + sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, + S390_lowcore.panic_stack); +#endif + sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, + S390_lowcore.async_stack); + if (task) + __show_trace(sp, (unsigned long) task_stack_page(task), + (unsigned long) task_stack_page(task) + THREAD_SIZE); + else + __show_trace(sp, S390_lowcore.thread_info, + S390_lowcore.thread_info + THREAD_SIZE); + if (!task) + task = current; + debug_show_held_locks(task); +} + +void show_stack(struct task_struct *task, unsigned long *sp) +{ + register unsigned long *__r15 asm ("15"); + unsigned long *stack; + int i; + + if (!sp) + stack = task ? (unsigned long *) task->thread.ksp : __r15; + else + stack = sp; + + for (i = 0; i < kstack_depth_to_print; i++) { + if (((addr_t) stack & (THREAD_SIZE-1)) == 0) + break; + if ((i * sizeof(long) % 32) == 0) + printk("%s ", i == 0 ? "" : "\n"); + printk(LONG, *stack++); + } + printk("\n"); + show_trace(task, sp); +} + +static void show_last_breaking_event(struct pt_regs *regs) +{ +#ifdef CONFIG_64BIT + printk("Last Breaking-Event-Address:\n"); + printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); + print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); +#endif +} + +static inline int mask_bits(struct pt_regs *regs, unsigned long bits) +{ + return (regs->psw.mask & bits) / ((~bits + 1) & bits); +} + +void show_registers(struct pt_regs *regs) +{ + char *mode; + + mode = user_mode(regs) ? "User" : "Krnl"; + printk("%s PSW : %p %p", + mode, (void *) regs->psw.mask, + (void *) regs->psw.addr); + print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); + printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " + "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), + mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), + mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), + mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), + mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), + mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); +#ifdef CONFIG_64BIT + printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); +#endif + printk("\n%s GPRS: " FOURLONG, mode, + regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); + printk(" " FOURLONG, + regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); + printk(" " FOURLONG, + regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); + printk(" " FOURLONG, + regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); + show_code(regs); +} + +void show_regs(struct pt_regs *regs) +{ + show_regs_print_info(KERN_DEFAULT); + show_registers(regs); + /* Show stack backtrace if pt_regs is from kernel mode */ + if (!user_mode(regs)) + show_trace(NULL, (unsigned long *) regs->gprs[15]); + show_last_breaking_event(regs); +} + +static DEFINE_SPINLOCK(die_lock); + +void die(struct pt_regs *regs, const char *str) +{ + static int die_counter; + + oops_enter(); + lgr_info_log(); + debug_stop_all(); + console_verbose(); + spin_lock_irq(&die_lock); + bust_spinlocks(1); + printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); +#ifdef CONFIG_PREEMPT + printk("PREEMPT "); +#endif +#ifdef CONFIG_SMP + printk("SMP "); +#endif +#ifdef CONFIG_DEBUG_PAGEALLOC + printk("DEBUG_PAGEALLOC"); +#endif + printk("\n"); + notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); + print_modules(); + show_regs(regs); + bust_spinlocks(0); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + spin_unlock_irq(&die_lock); + if (in_interrupt()) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception: panic_on_oops"); + oops_exit(); + do_exit(SIGSEGV); +} diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 5502285..4d5e6f8 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -45,6 +45,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SIZE = 1 << STACK_SHIFT +STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE #define BASED(name) name-system_call(%r13) @@ -97,10 +98,10 @@ STACK_SIZE = 1 << STACK_SHIFT sra %r14,\shift jnz 1f CHECK_STACK 1<<\shift,\savearea + ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f 1: l %r15,\stack # load target stack -2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) +2: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm .macro ADD64 high,low,timer @@ -150,7 +151,7 @@ ENTRY(__switch_to) l %r4,__THREAD_info(%r2) # get thread_info of prev l %r5,__THREAD_info(%r3) # get thread_info of next lr %r15,%r5 - ahi %r15,STACK_SIZE # end of kernel stack of next + ahi %r15,STACK_INIT # end of kernel stack of next st %r3,__LC_CURRENT # store task struct of next st %r5,__LC_THREAD_INFO # store thread info of next st %r15,__LC_KERNEL_STACK # store end of kernel stack @@ -178,7 +179,6 @@ sysc_stm: l %r13,__LC_SVC_NEW_PSW+4 sysc_per: l %r15,__LC_KERNEL_STACK - ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs sysc_vtime: UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER @@ -188,6 +188,7 @@ sysc_vtime: mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC sysc_do_svc: oi __TI_flags+3(%r12),_TIF_SYSCALL + l %r10,__TI_sysc_table(%r12) # 31 bit system call table lh %r8,__PT_INT_CODE+2(%r11) sla %r8,2 # shift and test for svc0 jnz sysc_nr_ok @@ -198,7 +199,6 @@ sysc_do_svc: lr %r8,%r1 sla %r8,2 sysc_nr_ok: - l %r10,BASED(.Lsys_call_table) # 31 bit system call table xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) st %r2,__PT_ORIG_GPR2(%r11) st %r7,STACK_FRAME_OVERHEAD(%r15) @@ -359,11 +359,11 @@ ENTRY(pgm_check_handler) tm __LC_PGM_ILC+3,0x80 # check for per exception jnz pgm_svcper # -> single stepped svc 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC + ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f 1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER l %r15,__LC_KERNEL_STACK -2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) +2: la %r11,STACK_FRAME_OVERHEAD(%r15) stm %r0,%r7,__PT_R0(%r11) mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC stm %r8,%r9,__PT_PSW(%r11) @@ -485,7 +485,6 @@ io_work: # io_work_user: l %r1,__LC_KERNEL_STACK - ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) @@ -636,7 +635,8 @@ ENTRY(mcck_int_handler) UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER mcck_skip: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT - mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA + stm %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 stm %r8,%r9,__PT_PSW(%r11) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) l %r1,BASED(.Ldo_machine_check) @@ -645,7 +645,6 @@ mcck_skip: tm __PT_PSW+1(%r11),0x01 # returning to user ? jno mcck_return l %r1,__LC_KERNEL_STACK # switch to kernel stack - ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r15) @@ -673,6 +672,7 @@ mcck_panic: sra %r14,PAGE_SHIFT jz 0f l %r15,__LC_PANIC_STACK + j mcck_skip 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j mcck_skip @@ -713,12 +713,10 @@ ENTRY(restart_int_handler) */ stack_overflow: l %r15,__LC_PANIC_STACK # change to panic stack - ahi %r15,-__PT_SIZE # create pt_regs - stm %r0,%r7,__PT_R0(%r15) - stm %r8,%r9,__PT_PSW(%r15) + la %r11,STACK_FRAME_OVERHEAD(%r15) + stm %r0,%r7,__PT_R0(%r11) + stm %r8,%r9,__PT_PSW(%r11) mvc __PT_R8(32,%r11),0(%r14) - lr %r15,%r11 - ahi %r15,-STACK_FRAME_OVERHEAD l %r1,BASED(1f) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) lr %r2,%r11 # pass pointer to pt_regs @@ -798,15 +796,14 @@ cleanup_system_call: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER # set up saved register 11 l %r15,__LC_KERNEL_STACK - ahi %r15,-__PT_SIZE - st %r15,12(%r11) # r11 pt_regs pointer + la %r9,STACK_FRAME_OVERHEAD(%r15) + st %r9,12(%r11) # r11 pt_regs pointer # fill pt_regs - mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC - stm %r0,%r7,__PT_R0(%r15) - mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW - mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC + mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC + stm %r0,%r7,__PT_R0(%r9) + mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW + mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC # setup saved register 15 - ahi %r15,-STACK_FRAME_OVERHEAD st %r15,28(%r11) # r15 stack pointer # set new psw address and exit l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 @@ -909,7 +906,6 @@ cleanup_idle_wait: .Ltrace_enter: .long do_syscall_trace_enter .Ltrace_exit: .long do_syscall_trace_exit .Lschedule_tail: .long schedule_tail -.Lsys_call_table: .long sys_call_table .Lsysc_per: .long sysc_per + 0x80000000 #ifdef CONFIG_TRACE_IRQFLAGS .Lhardirqs_on: .long trace_hardirqs_on_caller diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index c3a736a..aa0ab02e 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -7,6 +7,7 @@ #include <asm/cputime.h> extern void *restart_stack; +extern unsigned long suspend_zero_pages; void system_call(void); void pgm_check_handler(void); diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 9c837c1..4c17eec 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -39,6 +39,7 @@ __PT_R15 = __PT_GPRS + 120 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SIZE = 1 << STACK_SHIFT +STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_MCCK_PENDING | _TIF_PER_TRAP ) @@ -124,10 +125,10 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) srag %r14,%r14,\shift jnz 1f CHECK_STACK 1<<\shift,\savearea + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f 1: lg %r15,\stack # load target stack -2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) +2: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm .macro UPDATE_VTIME scratch,enter_timer @@ -177,7 +178,7 @@ ENTRY(__switch_to) lg %r4,__THREAD_info(%r2) # get thread_info of prev lg %r5,__THREAD_info(%r3) # get thread_info of next lgr %r15,%r5 - aghi %r15,STACK_SIZE # end of kernel stack of next + aghi %r15,STACK_INIT # end of kernel stack of next stg %r3,__LC_CURRENT # store task struct of next stg %r5,__LC_THREAD_INFO # store thread info of next stg %r15,__LC_KERNEL_STACK # store end of kernel stack @@ -203,10 +204,8 @@ sysc_stmg: stmg %r8,%r15,__LC_SAVE_AREA_SYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call sysc_per: lg %r15,__LC_KERNEL_STACK - aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs sysc_vtime: UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER @@ -217,6 +216,7 @@ sysc_vtime: mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC sysc_do_svc: oi __TI_flags+7(%r12),_TIF_SYSCALL + lg %r10,__TI_sysc_table(%r12) # address of system call table llgh %r8,__PT_INT_CODE+2(%r11) slag %r8,%r8,2 # shift and test for svc 0 jnz sysc_nr_ok @@ -227,13 +227,6 @@ sysc_do_svc: sth %r1,__PT_INT_CODE+2(%r11) slag %r8,%r1,2 sysc_nr_ok: - larl %r10,sys_call_table # 64 bit system call table -#ifdef CONFIG_COMPAT - tm __TI_flags+5(%r12),(_TIF_31BIT>>16) - jno sysc_noemu - larl %r10,sys_call_table_emu # 31 bit system call table -sysc_noemu: -#endif xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) stg %r2,__PT_ORIG_GPR2(%r11) stg %r7,STACK_FRAME_OVERHEAD(%r15) @@ -389,6 +382,7 @@ ENTRY(pgm_check_handler) tm __LC_PGM_ILC+3,0x80 # check for per exception jnz pgm_svcper # -> single stepped svc 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f 1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER LAST_BREAK %r14 @@ -398,8 +392,7 @@ ENTRY(pgm_check_handler) tm __LC_PGM_ILC+2,0x02 # check for transaction abort jz 2f mvc __THREAD_trap_tdb(256,%r14),0(%r13) -2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) +2: la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -526,7 +519,6 @@ io_work: # io_work_user: lg %r1,__LC_KERNEL_STACK - aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) @@ -678,8 +670,9 @@ ENTRY(mcck_int_handler) UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER LAST_BREAK %r14 mcck_skip: - lghi %r14,__LC_GPREGS_SAVE_AREA - mvc __PT_R0(128,%r11),0(%r14) + lghi %r14,__LC_GPREGS_SAVE_AREA+64 + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),0(%r14) stmg %r8,%r9,__PT_PSW(%r11) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs @@ -687,7 +680,6 @@ mcck_skip: tm __PT_PSW+1(%r11),0x01 # returning to user ? jno mcck_return lg %r1,__LC_KERNEL_STACK # switch to kernel stack - aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) @@ -754,14 +746,12 @@ ENTRY(restart_int_handler) * Setup a pt_regs so that show_trace can provide a good call trace. */ stack_overflow: - lg %r11,__LC_PANIC_STACK # change to panic stack - aghi %r11,-__PT_SIZE # create pt_regs + lg %r15,__LC_PANIC_STACK # change to panic stack + la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_R8(64,%r11),0(%r14) stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 - lgr %r15,%r11 - aghi %r15,-STACK_FRAME_OVERHEAD xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs jg kernel_stack_overflow @@ -845,15 +835,14 @@ cleanup_system_call: mvc __TI_last_break(8,%r12),16(%r11) 0: # set up saved register r11 lg %r15,__LC_KERNEL_STACK - aghi %r15,-__PT_SIZE - stg %r15,24(%r11) # r11 pt_regs pointer + la %r9,STACK_FRAME_OVERHEAD(%r15) + stg %r9,24(%r11) # r11 pt_regs pointer # fill pt_regs - mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC - stmg %r0,%r7,__PT_R0(%r15) - mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW - mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC + mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC + stmg %r0,%r7,__PT_R0(%r9) + mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW + mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC # setup saved register r15 - aghi %r15,-STACK_FRAME_OVERHEAD stg %r15,56(%r11) # r15 stack pointer # set new psw address and exit larl %r9,sysc_do_svc @@ -1010,6 +999,7 @@ sys_call_table: #ifdef CONFIG_COMPAT #define SYSCALL(esa,esame,emu) .long emu + .globl sys_call_table_emu sys_call_table_emu: #include "syscalls.S" #undef SYSCALL diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 1630f43..4f5ef629 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -33,7 +33,7 @@ struct irq_class { }; /* - * The list of "main" irq classes on s390. This is the list of interrrupts + * The list of "main" irq classes on s390. This is the list of interrupts * that appear both in /proc/stat ("intr" line) and /proc/interrupts. * Historically only external and I/O interrupts have been part of /proc/stat. * We can't add the split external and I/O sub classes since the first field diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index b3de277..ac21781 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -13,6 +13,7 @@ #include <linux/reboot.h> #include <linux/ftrace.h> #include <linux/debug_locks.h> +#include <linux/suspend.h> #include <asm/cio.h> #include <asm/setup.h> #include <asm/pgtable.h> @@ -67,6 +68,35 @@ void setup_regs(void) memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); } +/* + * PM notifier callback for kdump + */ +static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action, + void *ptr) +{ + switch (action) { + case PM_SUSPEND_PREPARE: + case PM_HIBERNATION_PREPARE: + if (crashk_res.start) + crash_map_reserved_pages(); + break; + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + if (crashk_res.start) + crash_unmap_reserved_pages(); + break; + default: + return NOTIFY_DONE; + } + return NOTIFY_OK; +} + +static int __init machine_kdump_pm_init(void) +{ + pm_notifier(machine_kdump_pm_cb, 0); + return 0; +} +arch_initcall(machine_kdump_pm_init); #endif /* diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 536d645..2bc3edd 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -61,18 +61,8 @@ unsigned long thread_saved_pc(struct task_struct *tsk) return sf->gprs[8]; } -/* - * The idle loop on a S390... - */ -static void default_idle(void) +void arch_cpu_idle(void) { - if (cpu_is_offline(smp_processor_id())) - cpu_die(); - local_irq_disable(); - if (need_resched()) { - local_irq_enable(); - return; - } local_mcck_disable(); if (test_thread_flag(TIF_MCCK_PENDING)) { local_mcck_enable(); @@ -83,19 +73,15 @@ static void default_idle(void) vtime_stop_cpu(); } -void cpu_idle(void) +void arch_cpu_idle_exit(void) { - for (;;) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - while (!need_resched() && !test_thread_flag(TIF_MCCK_PENDING)) - default_idle(); - rcu_idle_exit(); - tick_nohz_idle_exit(); - if (test_thread_flag(TIF_MCCK_PENDING)) - s390_handle_mcck(); - schedule_preempt_disabled(); - } + if (test_thread_flag(TIF_MCCK_PENDING)) + s390_handle_mcck(); +} + +void arch_cpu_idle_dead(void) +{ + cpu_die(); } extern void __kprobes kernel_thread_starter(void); diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index a5360de..0f419c5 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -377,11 +377,14 @@ static void __init setup_lowcore(void) PSW_MASK_DAT | PSW_MASK_MCHECK; lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; lc->clock_comparator = -1ULL; - lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; + lc->kernel_stack = ((unsigned long) &init_thread_union) + + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->async_stack = (unsigned long) - __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; + __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->panic_stack = (unsigned long) - __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; + __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->current_task = (unsigned long) init_thread_union.thread_info.task; lc->thread_info = (unsigned long) &init_thread_union; lc->machine_flags = S390_lowcore.machine_flags; @@ -571,6 +574,8 @@ static void __init setup_memory_end(void) /* Split remaining virtual space between 1:1 mapping & vmemmap array */ tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); + /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ + tmp = SECTION_ALIGN_UP(tmp); tmp = VMALLOC_START - tmp * sizeof(struct page); tmp &= ~((vmax >> 11) - 1); /* align to page table level */ tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 549c9d1..8074cb4 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -181,8 +181,10 @@ static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) lc = pcpu->lowcore; memcpy(lc, &S390_lowcore, 512); memset((char *) lc + 512, 0, sizeof(*lc) - 512); - lc->async_stack = pcpu->async_stack + ASYNC_SIZE; - lc->panic_stack = pcpu->panic_stack + PAGE_SIZE; + lc->async_stack = pcpu->async_stack + ASYNC_SIZE + - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); + lc->panic_stack = pcpu->panic_stack + PAGE_SIZE + - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->cpu_nr = cpu; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { @@ -253,7 +255,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) struct _lowcore *lc = pcpu->lowcore; struct thread_info *ti = task_thread_info(tsk); - lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE; + lc->kernel_stack = (unsigned long) task_stack_page(tsk) + + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->thread_info = (unsigned long) task_thread_info(tsk); lc->current_task = (unsigned long) tsk; lc->user_timer = ti->user_timer; @@ -711,8 +714,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid) set_cpu_online(smp_processor_id(), true); inc_irq_stat(CPU_RST); local_irq_enable(); - /* cpu_idle will call schedule for us */ - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } /* Upping and downing of CPUs */ @@ -810,8 +812,10 @@ void __init smp_prepare_boot_cpu(void) pcpu->state = CPU_STATE_CONFIGURED; pcpu->address = boot_cpu_address; pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); - pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE; - pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE; + pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE + + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); + pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE + + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); S390_lowcore.percpu_offset = __per_cpu_offset[0]; smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); set_cpu_present(0, true); diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index aa1494d..c479d2f 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c @@ -41,6 +41,7 @@ struct page_key_data { static struct page_key_data *page_key_data; static struct page_key_data *page_key_rp, *page_key_wp; static unsigned long page_key_rx, page_key_wx; +unsigned long suspend_zero_pages; /* * For each page in the hibernation image one additional byte is @@ -149,6 +150,36 @@ int pfn_is_nosave(unsigned long pfn) return 0; } +/* + * PM notifier callback for suspend + */ +static int suspend_pm_cb(struct notifier_block *nb, unsigned long action, + void *ptr) +{ + switch (action) { + case PM_SUSPEND_PREPARE: + case PM_HIBERNATION_PREPARE: + suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER); + if (!suspend_zero_pages) + return NOTIFY_BAD; + break; + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + free_pages(suspend_zero_pages, LC_ORDER); + break; + default: + return NOTIFY_DONE; + } + return NOTIFY_OK; +} + +static int __init suspend_pm_init(void) +{ + pm_notifier(suspend_pm_cb, 0); + return 0; +} +arch_initcall(suspend_pm_init); + void save_processor_state(void) { /* swsusp_arch_suspend() actually saves all cpu register contents. diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index d4ca4e0..c487be4 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -36,8 +36,8 @@ ENTRY(swsusp_arch_suspend) /* Store prefix register on stack */ stpx __SF_EMPTY(%r15) - /* Save prefix register contents for lowcore */ - llgf %r4,__SF_EMPTY(%r15) + /* Save prefix register contents for lowcore copy */ + llgf %r10,__SF_EMPTY(%r15) /* Get pointer to save area */ lghi %r1,0x1000 @@ -91,7 +91,18 @@ ENTRY(swsusp_arch_suspend) xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) spx __SF_EMPTY(%r15) + /* Save absolute zero pages */ + larl %r2,suspend_zero_pages + lg %r2,0(%r2) + lghi %r4,0 + lghi %r3,2*PAGE_SIZE + lghi %r5,2*PAGE_SIZE +1: mvcle %r2,%r4,0 + jo 1b + + /* Copy lowcore to absolute zero lowcore */ lghi %r2,0 + lgr %r4,%r10 lghi %r3,2*PAGE_SIZE lghi %r5,2*PAGE_SIZE 1: mvcle %r2,%r4,0 @@ -248,8 +259,20 @@ restore_registers: /* Load old stack */ lg %r15,0x2f8(%r13) + /* Save prefix register */ + mvc __SF_EMPTY(4,%r15),0x318(%r13) + + /* Restore absolute zero pages */ + lghi %r2,0 + larl %r4,suspend_zero_pages + lg %r4,0(%r4) + lghi %r3,2*PAGE_SIZE + lghi %r5,2*PAGE_SIZE +1: mvcle %r2,%r4,0 + jo 1b + /* Restore prefix register */ - spx 0x318(%r13) + spx __SF_EMPTY(%r15) /* Activate DAT */ stosm __SF_EMPTY(%r15),0x04 diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 13dd63f..c576232 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -12,49 +12,16 @@ * 'Traps.c' handles hardware traps and faults after we have saved some * state in 'asm.s'. */ -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/errno.h> +#include <linux/kprobes.h> +#include <linux/kdebug.h> +#include <linux/module.h> #include <linux/ptrace.h> -#include <linux/timer.h> +#include <linux/sched.h> #include <linux/mm.h> -#include <linux/smp.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/seq_file.h> -#include <linux/delay.h> -#include <linux/module.h> -#include <linux/kdebug.h> -#include <linux/kallsyms.h> -#include <linux/reboot.h> -#include <linux/kprobes.h> -#include <linux/bug.h> -#include <linux/utsname.h> -#include <asm/uaccess.h> -#include <asm/io.h> -#include <linux/atomic.h> -#include <asm/mathemu.h> -#include <asm/cpcmd.h> -#include <asm/lowcore.h> -#include <asm/debug.h> -#include <asm/ipl.h> #include "entry.h" int show_unhandled_signals = 1; -#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) - -#ifndef CONFIG_64BIT -#define LONG "%08lx " -#define FOURLONG "%08lx %08lx %08lx %08lx\n" -static int kstack_depth_to_print = 12; -#else /* CONFIG_64BIT */ -#define LONG "%016lx " -#define FOURLONG "%016lx %016lx %016lx %016lx\n" -static int kstack_depth_to_print = 20; -#endif /* CONFIG_64BIT */ - static inline void __user *get_trap_ip(struct pt_regs *regs) { #ifdef CONFIG_64BIT @@ -72,215 +39,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs) #endif } -/* - * For show_trace we have tree different stack to consider: - * - the panic stack which is used if the kernel stack has overflown - * - the asynchronous interrupt stack (cpu related) - * - the synchronous kernel stack (process related) - * The stack trace can start at any of the three stack and can potentially - * touch all of them. The order is: panic stack, async stack, sync stack. - */ -static unsigned long -__show_trace(unsigned long sp, unsigned long low, unsigned long high) -{ - struct stack_frame *sf; - struct pt_regs *regs; - - while (1) { - sp = sp & PSW_ADDR_INSN; - if (sp < low || sp > high - sizeof(*sf)) - return sp; - sf = (struct stack_frame *) sp; - printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); - print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); - /* Follow the backchain. */ - while (1) { - low = sp; - sp = sf->back_chain & PSW_ADDR_INSN; - if (!sp) - break; - if (sp <= low || sp > high - sizeof(*sf)) - return sp; - sf = (struct stack_frame *) sp; - printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); - print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); - } - /* Zero backchain detected, check for interrupt frame. */ - sp = (unsigned long) (sf + 1); - if (sp <= low || sp > high - sizeof(*regs)) - return sp; - regs = (struct pt_regs *) sp; - printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); - print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); - low = sp; - sp = regs->gprs[15]; - } -} - -static void show_trace(struct task_struct *task, unsigned long *stack) -{ - register unsigned long __r15 asm ("15"); - unsigned long sp; - - sp = (unsigned long) stack; - if (!sp) - sp = task ? task->thread.ksp : __r15; - printk("Call Trace:\n"); -#ifdef CONFIG_CHECK_STACK - sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, - S390_lowcore.panic_stack); -#endif - sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, - S390_lowcore.async_stack); - if (task) - __show_trace(sp, (unsigned long) task_stack_page(task), - (unsigned long) task_stack_page(task) + THREAD_SIZE); - else - __show_trace(sp, S390_lowcore.thread_info, - S390_lowcore.thread_info + THREAD_SIZE); - if (!task) - task = current; - debug_show_held_locks(task); -} - -void show_stack(struct task_struct *task, unsigned long *sp) -{ - register unsigned long * __r15 asm ("15"); - unsigned long *stack; - int i; - - if (!sp) - stack = task ? (unsigned long *) task->thread.ksp : __r15; - else - stack = sp; - - for (i = 0; i < kstack_depth_to_print; i++) { - if (((addr_t) stack & (THREAD_SIZE-1)) == 0) - break; - if ((i * sizeof(long) % 32) == 0) - printk("%s ", i == 0 ? "" : "\n"); - printk(LONG, *stack++); - } - printk("\n"); - show_trace(task, sp); -} - -static void show_last_breaking_event(struct pt_regs *regs) -{ -#ifdef CONFIG_64BIT - printk("Last Breaking-Event-Address:\n"); - printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); - print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); -#endif -} - -/* - * The architecture-independent dump_stack generator - */ -void dump_stack(void) -{ - printk("CPU: %d %s %s %.*s\n", - task_thread_info(current)->cpu, print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); - printk("Process %s (pid: %d, task: %p, ksp: %p)\n", - current->comm, current->pid, current, - (void *) current->thread.ksp); - show_stack(NULL, NULL); -} -EXPORT_SYMBOL(dump_stack); - -static inline int mask_bits(struct pt_regs *regs, unsigned long bits) -{ - return (regs->psw.mask & bits) / ((~bits + 1) & bits); -} - -void show_registers(struct pt_regs *regs) -{ - char *mode; - - mode = user_mode(regs) ? "User" : "Krnl"; - printk("%s PSW : %p %p", - mode, (void *) regs->psw.mask, - (void *) regs->psw.addr); - print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); - printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " - "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), - mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), - mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), - mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), - mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), - mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); -#ifdef CONFIG_64BIT - printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); -#endif - printk("\n%s GPRS: " FOURLONG, mode, - regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); - printk(" " FOURLONG, - regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); - printk(" " FOURLONG, - regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); - printk(" " FOURLONG, - regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); - - show_code(regs); -} - -void show_regs(struct pt_regs *regs) -{ - printk("CPU: %d %s %s %.*s\n", - task_thread_info(current)->cpu, print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); - printk("Process %s (pid: %d, task: %p, ksp: %p)\n", - current->comm, current->pid, current, - (void *) current->thread.ksp); - show_registers(regs); - /* Show stack backtrace if pt_regs is from kernel mode */ - if (!user_mode(regs)) - show_trace(NULL, (unsigned long *) regs->gprs[15]); - show_last_breaking_event(regs); -} - -static DEFINE_SPINLOCK(die_lock); - -void die(struct pt_regs *regs, const char *str) -{ - static int die_counter; - - oops_enter(); - lgr_info_log(); - debug_stop_all(); - console_verbose(); - spin_lock_irq(&die_lock); - bust_spinlocks(1); - printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); -#ifdef CONFIG_PREEMPT - printk("PREEMPT "); -#endif -#ifdef CONFIG_SMP - printk("SMP "); -#endif -#ifdef CONFIG_DEBUG_PAGEALLOC - printk("DEBUG_PAGEALLOC"); -#endif - printk("\n"); - notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); - print_modules(); - show_regs(regs); - bust_spinlocks(0); - add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); - spin_unlock_irq(&die_lock); - if (in_interrupt()) - panic("Fatal exception in interrupt"); - if (panic_on_oops) - panic("Fatal exception: panic_on_oops"); - oops_exit(); - do_exit(SIGSEGV); -} - static inline void report_user_fault(struct pt_regs *regs, int signr) { if ((task_pid_nr(current) > 1) && !show_unhandled_signals) diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index a0042ac..3fb0935 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -158,8 +158,6 @@ void __kprobes vtime_stop_cpu(void) unsigned long psw_mask; trace_hardirqs_on(); - /* Don't trace preempt off for idle. */ - stop_critical_timings(); /* Wait for external, I/O or machine check interrupt. */ psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT | @@ -169,9 +167,6 @@ void __kprobes vtime_stop_cpu(void) /* Call the assembler magic in entry.S */ psw_idle(idle, psw_mask); - /* Reenable preemption tracer. */ - start_critical_timings(); - /* Account time spent with enabled wait psw loaded as idle time. */ idle->sequence++; smp_wmb(); diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h index 2b29e62..c2f582bb1c 100644 --- a/arch/s390/kvm/trace.h +++ b/arch/s390/kvm/trace.h @@ -67,7 +67,7 @@ TRACE_EVENT(kvm_s390_sie_fault, #define sie_intercept_code \ {0x04, "Instruction"}, \ {0x08, "Program interruption"}, \ - {0x0C, "Instruction and program interuption"}, \ + {0x0C, "Instruction and program interruption"}, \ {0x10, "External request"}, \ {0x14, "External interruption"}, \ {0x18, "I/O request"}, \ @@ -117,7 +117,7 @@ TRACE_EVENT(kvm_s390_intercept_instruction, __entry->instruction, insn_to_mnemonic((unsigned char *) &__entry->instruction, - __entry->insn) ? + __entry->insn, sizeof(__entry->insn)) ? "unknown" : __entry->insn) ); diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index 6ab0d0b..20b0e97 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -3,7 +3,6 @@ # lib-y += delay.o string.o uaccess_std.o uaccess_pt.o -obj-y += usercopy.o obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o obj-$(CONFIG_64BIT) += mem64.o lib-$(CONFIG_64BIT) += uaccess_mvcos.o diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index dff631d..466fb33 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to, * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address * contains the (negative) exception code. */ -static __always_inline unsigned long follow_table(struct mm_struct *mm, - unsigned long addr, int write) +#ifdef CONFIG_64BIT +static unsigned long follow_table(struct mm_struct *mm, + unsigned long address, int write) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *ptep; + unsigned long *table = (unsigned long *)__pa(mm->pgd); + + switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { + case _ASCE_TYPE_REGION1: + table = table + ((address >> 53) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x39UL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_REGION2: + table = table + ((address >> 42) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x3aUL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_REGION3: + table = table + ((address >> 31) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x3bUL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_SEGMENT: + table = table + ((address >> 20) & 0x7ff); + if (unlikely(*table & _SEGMENT_ENTRY_INV)) + return -0x10UL; + if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { + if (write && (*table & _SEGMENT_ENTRY_RO)) + return -0x04UL; + return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + + (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); + } + table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); + } + table = table + ((address >> 12) & 0xff); + if (unlikely(*table & _PAGE_INVALID)) + return -0x11UL; + if (write && (*table & _PAGE_RO)) + return -0x04UL; + return (*table & PAGE_MASK) + (address & ~PAGE_MASK); +} - pgd = pgd_offset(mm, addr); - if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) - return -0x3aUL; +#else /* CONFIG_64BIT */ - pud = pud_offset(pgd, addr); - if (pud_none(*pud) || unlikely(pud_bad(*pud))) - return -0x3bUL; +static unsigned long follow_table(struct mm_struct *mm, + unsigned long address, int write) +{ + unsigned long *table = (unsigned long *)__pa(mm->pgd); - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + table = table + ((address >> 20) & 0x7ff); + if (unlikely(*table & _SEGMENT_ENTRY_INV)) return -0x10UL; - if (pmd_large(*pmd)) { - if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) - return -0x04UL; - return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK); - } - if (unlikely(pmd_bad(*pmd))) - return -0x10UL; - - ptep = pte_offset_map(pmd, addr); - if (!pte_present(*ptep)) + table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); + table = table + ((address >> 12) & 0xff); + if (unlikely(*table & _PAGE_INVALID)) return -0x11UL; - if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) + if (write && (*table & _PAGE_RO)) return -0x04UL; - - return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); + return (*table & PAGE_MASK) + (address & ~PAGE_MASK); } +#endif /* CONFIG_64BIT */ + static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, size_t n, int write_user) { @@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from) static size_t clear_user_pt(size_t n, void __user *to) { - void *zpage = &empty_zero_page; + void *zpage = (void *) empty_zero_page; long done, size, ret; done = 0; diff --git a/arch/s390/lib/usercopy.c b/arch/s390/lib/usercopy.c deleted file mode 100644 index 14b363f..0000000 --- a/arch/s390/lib/usercopy.c +++ /dev/null @@ -1,8 +0,0 @@ -#include <linux/module.h> -#include <linux/bug.h> - -void copy_from_user_overflow(void) -{ - WARN(1, "Buffer overflow detected!\n"); -} -EXPORT_SYMBOL(copy_from_user_overflow); diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index 479e9428..9d84a1f 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -458,12 +458,10 @@ static int __init cmm_init(void) if (rc) goto out_pm; cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); - rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; - if (rc) - goto out_kthread; - return 0; + if (!IS_ERR(cmm_thread_ptr)) + return 0; -out_kthread: + rc = PTR_ERR(cmm_thread_ptr); unregister_pm_notifier(&cmm_power_notifier); out_pm: unregister_oom_notifier(&cmm_oom_nb); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 2fb9e63..047c3e4 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -395,8 +395,13 @@ void __kprobes do_protection_exception(struct pt_regs *regs) int fault; trans_exc_code = regs->int_parm_long; - /* Protection exception is suppressing, decrement psw address. */ - regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); + /* + * Protection exceptions are suppressing, decrement psw address. + * The exception to this rule are aborted transactions, for these + * the PSW already points to the correct location. + */ + if (!(regs->int_code & 0x200)) + regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); /* * Check for low-address protection. This needs to be treated * as a special case because the translation exception code diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 532525e..121089d 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -39,7 +39,7 @@ int arch_prepare_hugepage(struct page *page) if (!ptep) return -ENOMEM; - pte = mk_pte(page, PAGE_RW); + pte_val(pte) = addr; for (i = 0; i < PTRS_PER_PTE; i++) { set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); pte_val(pte) += PAGE_SIZE; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 49ce6bb2..0b09b23 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -42,11 +42,10 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); unsigned long empty_zero_page, zero_page_mask; EXPORT_SYMBOL(empty_zero_page); -static unsigned long __init setup_zero_pages(void) +static void __init setup_zero_pages(void) { struct cpuid cpu_id; unsigned int order; - unsigned long size; struct page *page; int i; @@ -63,10 +62,18 @@ static unsigned long __init setup_zero_pages(void) break; case 0x2097: /* z10 */ case 0x2098: /* z10 */ - default: + case 0x2817: /* z196 */ + case 0x2818: /* z196 */ order = 2; break; + case 0x2827: /* zEC12 */ + default: + order = 5; + break; } + /* Limit number of empty zero pages for small memory sizes */ + if (order > 2 && totalram_pages <= 16384) + order = 2; empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!empty_zero_page) @@ -75,14 +82,11 @@ static unsigned long __init setup_zero_pages(void) page = virt_to_page((void *) empty_zero_page); split_page(page, order); for (i = 1 << order; i > 0; i--) { - SetPageReserved(page); + mark_page_reserved(page); page++; } - size = PAGE_SIZE << order; - zero_page_mask = (size - 1) & PAGE_MASK; - - return 1UL << order; + zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; } /* @@ -139,7 +143,7 @@ void __init mem_init(void) /* this will put all low memory onto the freelists */ totalram_pages += free_all_bootmem(); - totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ + setup_zero_pages(); /* Setup zeroed pages. */ reservedpages = 0; @@ -158,34 +162,15 @@ void __init mem_init(void) PFN_ALIGN((unsigned long)&_eshared) - 1); } -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr = begin; - - if (begin >= end) - return; - for (; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM, - PAGE_SIZE); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); -} - void free_initmem(void) { - free_init_pages("unused kernel memory", - (unsigned long)&__init_begin, - (unsigned long)&__init_end); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", start, end); + free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); } #endif diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index d21040e..80adfbf 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -9,31 +9,25 @@ #include <asm/pgtable.h> #include <asm/page.h> +static inline unsigned long sske_frame(unsigned long addr, unsigned char skey) +{ + asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0" + : [addr] "+a" (addr) : [skey] "d" (skey)); + return addr; +} + void storage_key_init_range(unsigned long start, unsigned long end) { - unsigned long boundary, function, size; + unsigned long boundary, size; while (start < end) { - if (MACHINE_HAS_EDAT2) { - /* set storage keys for a 2GB frame */ - function = 0x22000 | PAGE_DEFAULT_KEY; - size = 1UL << 31; - boundary = (start + size) & ~(size - 1); - if (boundary <= end) { - do { - start = pfmf(function, start); - } while (start < boundary); - continue; - } - } if (MACHINE_HAS_EDAT1) { /* set storage keys for a 1MB frame */ - function = 0x21000 | PAGE_DEFAULT_KEY; size = 1UL << 20; boundary = (start + size) & ~(size - 1); if (boundary <= end) { do { - start = pfmf(function, start); + start = sske_frame(start, PAGE_DEFAULT_KEY); } while (start < boundary); continue; } diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index ae44d2a..bd954e9 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -379,75 +379,183 @@ out_unmap: } EXPORT_SYMBOL_GPL(gmap_map_segment); -/* - * this function is assumed to be called with mmap_sem held - */ -unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) +static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap) { - unsigned long *table, vmaddr, segment; - struct mm_struct *mm; - struct gmap_pgtable *mp; - struct gmap_rmap *rmap; - struct vm_area_struct *vma; - struct page *page; - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; + unsigned long *table; - current->thread.gmap_addr = address; - mm = gmap->mm; - /* Walk the gmap address space page table */ table = gmap->table + ((address >> 53) & 0x7ff); if (unlikely(*table & _REGION_ENTRY_INV)) - return -EFAULT; + return ERR_PTR(-EFAULT); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + ((address >> 42) & 0x7ff); if (unlikely(*table & _REGION_ENTRY_INV)) - return -EFAULT; + return ERR_PTR(-EFAULT); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + ((address >> 31) & 0x7ff); if (unlikely(*table & _REGION_ENTRY_INV)) - return -EFAULT; + return ERR_PTR(-EFAULT); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + ((address >> 20) & 0x7ff); + return table; +} + +/** + * __gmap_translate - translate a guest address to a user space address + * @address: guest address + * @gmap: pointer to guest mapping meta data structure + * + * Returns user space address which corresponds to the guest address or + * -EFAULT if no such mapping exists. + * This function does not establish potentially missing page table entries. + * The mmap_sem of the mm that belongs to the address space must be held + * when this function gets called. + */ +unsigned long __gmap_translate(unsigned long address, struct gmap *gmap) +{ + unsigned long *segment_ptr, vmaddr, segment; + struct gmap_pgtable *mp; + struct page *page; + current->thread.gmap_addr = address; + segment_ptr = gmap_table_walk(address, gmap); + if (IS_ERR(segment_ptr)) + return PTR_ERR(segment_ptr); /* Convert the gmap address to an mm address. */ - segment = *table; - if (likely(!(segment & _SEGMENT_ENTRY_INV))) { + segment = *segment_ptr; + if (!(segment & _SEGMENT_ENTRY_INV)) { page = pfn_to_page(segment >> PAGE_SHIFT); mp = (struct gmap_pgtable *) page->index; return mp->vmaddr | (address & ~PMD_MASK); } else if (segment & _SEGMENT_ENTRY_RO) { vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; - vma = find_vma(mm, vmaddr); - if (!vma || vma->vm_start > vmaddr) - return -EFAULT; - - /* Walk the parent mm page table */ - pgd = pgd_offset(mm, vmaddr); - pud = pud_alloc(mm, pgd, vmaddr); - if (!pud) - return -ENOMEM; - pmd = pmd_alloc(mm, pud, vmaddr); - if (!pmd) - return -ENOMEM; - if (!pmd_present(*pmd) && - __pte_alloc(mm, vma, pmd, vmaddr)) - return -ENOMEM; - /* pmd now points to a valid segment table entry. */ - rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); - if (!rmap) - return -ENOMEM; - /* Link gmap segment table entry location to page table. */ - page = pmd_page(*pmd); - mp = (struct gmap_pgtable *) page->index; - rmap->entry = table; - spin_lock(&mm->page_table_lock); + return vmaddr | (address & ~PMD_MASK); + } + return -EFAULT; +} +EXPORT_SYMBOL_GPL(__gmap_translate); + +/** + * gmap_translate - translate a guest address to a user space address + * @address: guest address + * @gmap: pointer to guest mapping meta data structure + * + * Returns user space address which corresponds to the guest address or + * -EFAULT if no such mapping exists. + * This function does not establish potentially missing page table entries. + */ +unsigned long gmap_translate(unsigned long address, struct gmap *gmap) +{ + unsigned long rc; + + down_read(&gmap->mm->mmap_sem); + rc = __gmap_translate(address, gmap); + up_read(&gmap->mm->mmap_sem); + return rc; +} +EXPORT_SYMBOL_GPL(gmap_translate); + +static int gmap_connect_pgtable(unsigned long segment, + unsigned long *segment_ptr, + struct gmap *gmap) +{ + unsigned long vmaddr; + struct vm_area_struct *vma; + struct gmap_pgtable *mp; + struct gmap_rmap *rmap; + struct mm_struct *mm; + struct page *page; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + mm = gmap->mm; + vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; + vma = find_vma(mm, vmaddr); + if (!vma || vma->vm_start > vmaddr) + return -EFAULT; + /* Walk the parent mm page table */ + pgd = pgd_offset(mm, vmaddr); + pud = pud_alloc(mm, pgd, vmaddr); + if (!pud) + return -ENOMEM; + pmd = pmd_alloc(mm, pud, vmaddr); + if (!pmd) + return -ENOMEM; + if (!pmd_present(*pmd) && + __pte_alloc(mm, vma, pmd, vmaddr)) + return -ENOMEM; + /* pmd now points to a valid segment table entry. */ + rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); + if (!rmap) + return -ENOMEM; + /* Link gmap segment table entry location to page table. */ + page = pmd_page(*pmd); + mp = (struct gmap_pgtable *) page->index; + rmap->entry = segment_ptr; + spin_lock(&mm->page_table_lock); + if (*segment_ptr == segment) { list_add(&rmap->list, &mp->mapper); - spin_unlock(&mm->page_table_lock); /* Set gmap segment table entry to page table. */ - *table = pmd_val(*pmd) & PAGE_MASK; - return vmaddr | (address & ~PMD_MASK); + *segment_ptr = pmd_val(*pmd) & PAGE_MASK; + rmap = NULL; + } + spin_unlock(&mm->page_table_lock); + kfree(rmap); + return 0; +} + +static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table) +{ + struct gmap_rmap *rmap, *next; + struct gmap_pgtable *mp; + struct page *page; + int flush; + + flush = 0; + spin_lock(&mm->page_table_lock); + page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + list_for_each_entry_safe(rmap, next, &mp->mapper, list) { + *rmap->entry = + _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; + list_del(&rmap->list); + kfree(rmap); + flush = 1; + } + spin_unlock(&mm->page_table_lock); + if (flush) + __tlb_flush_global(); +} + +/* + * this function is assumed to be called with mmap_sem held + */ +unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) +{ + unsigned long *segment_ptr, segment; + struct gmap_pgtable *mp; + struct page *page; + int rc; + + current->thread.gmap_addr = address; + segment_ptr = gmap_table_walk(address, gmap); + if (IS_ERR(segment_ptr)) + return -EFAULT; + /* Convert the gmap address to an mm address. */ + while (1) { + segment = *segment_ptr; + if (!(segment & _SEGMENT_ENTRY_INV)) { + /* Page table is present */ + page = pfn_to_page(segment >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + return mp->vmaddr | (address & ~PMD_MASK); + } + if (!(segment & _SEGMENT_ENTRY_RO)) + /* Nothing mapped in the gmap address space. */ + break; + rc = gmap_connect_pgtable(segment, segment_ptr, gmap); + if (rc) + return rc; } return -EFAULT; } @@ -511,29 +619,6 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) } EXPORT_SYMBOL_GPL(gmap_discard); -void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) -{ - struct gmap_rmap *rmap, *next; - struct gmap_pgtable *mp; - struct page *page; - int flush; - - flush = 0; - spin_lock(&mm->page_table_lock); - page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - mp = (struct gmap_pgtable *) page->index; - list_for_each_entry_safe(rmap, next, &mp->mapper, list) { - *rmap->entry = - _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; - list_del(&rmap->list); - kfree(rmap); - flush = 1; - } - spin_unlock(&mm->page_table_lock); - if (flush) - __tlb_flush_global(); -} - static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, unsigned long vmaddr) { @@ -586,8 +671,8 @@ static inline void page_table_free_pgste(unsigned long *table) { } -static inline void gmap_unmap_notifier(struct mm_struct *mm, - unsigned long *table) +static inline void gmap_disconnect_pgtable(struct mm_struct *mm, + unsigned long *table) { } @@ -653,7 +738,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) unsigned int bit, mask; if (mm_has_pgste(mm)) { - gmap_unmap_notifier(mm, table); + gmap_disconnect_pgtable(mm, table); return page_table_free_pgste(table); } /* Free 1K/2K page table fragment of a 4K page */ @@ -696,7 +781,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) mm = tlb->mm; if (mm_has_pgste(mm)) { - gmap_unmap_notifier(mm, table); + gmap_disconnect_pgtable(mm, table); table = (unsigned long *) (__pa(table) | FRAG_MASK); tlb_remove_table(tlb, table); return; diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index ffab84d..3583705 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -191,19 +191,16 @@ static void vmem_remove_range(unsigned long start, unsigned long size) /* * Add a backed mem_map array to the virtual mem_map array. */ -int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) { - unsigned long address, start_addr, end_addr; + unsigned long address = start; pgd_t *pg_dir; pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; int ret = -ENOMEM; - start_addr = (unsigned long) start; - end_addr = (unsigned long) (start + nr); - - for (address = start_addr; address < end_addr;) { + for (address = start; address < end;) { pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { pu_dir = vmem_pud_alloc(); @@ -262,14 +259,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) } address += PAGE_SIZE; } - memset(start, 0, nr * sizeof(struct page)); + memset((void *)start, 0, end - start); ret = 0; out: - flush_tlb_kernel_range(start_addr, end_addr); + flush_tlb_kernel_range(start, end); return ret; } -void vmemmap_free(struct page *memmap, unsigned long nr_pages) +void vmemmap_free(unsigned long start, unsigned long end) { } diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 0972e91..82f165f 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -747,10 +747,9 @@ void bpf_jit_compile(struct sk_filter *fp) if (!bpf_jit_enable) return; - addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL); + addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL); if (addrs == NULL) return; - memset(addrs, 0, fp->len * sizeof(*addrs)); memset(&jit, 0, sizeof(cjit)); memset(&cjit, 0, sizeof(cjit)); diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 584b936..ffeb17c 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -440,6 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) switch (id.machine) { case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; + case 0x2827: ops->cpu_type = "s390/zEC12"; break; default: return -ENODEV; } } diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile index f0f426a..086a2e3 100644 --- a/arch/s390/pci/Makefile +++ b/arch/s390/pci/Makefile @@ -2,5 +2,5 @@ # Makefile for the s390 PCI subsystem. # -obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \ - pci_sysfs.o pci_event.o pci_debug.o +obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \ + pci_event.o pci_debug.o pci_insn.o diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 27b4c17..e6f15b5 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -99,9 +99,6 @@ static int __read_mostly aisb_max; static struct kmem_cache *zdev_irq_cache; static struct kmem_cache *zdev_fmb_cache; -debug_info_t *pci_debug_msg_id; -debug_info_t *pci_debug_err_id; - static inline int irq_to_msi_nr(unsigned int irq) { return irq & ZPCI_MSI_MASK; @@ -179,7 +176,7 @@ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb, fib->aisb = (u64) bucket->aisb + aisb / 8; fib->aisbo = aisb & ZPCI_MSI_MASK; - rc = mpcifc_instr(req, fib); + rc = s390pci_mod_fc(req, fib); pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); free_page((unsigned long) fib); @@ -209,7 +206,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args fib->iota = args->iota; fib->fmb_addr = args->fmb_addr; - rc = mpcifc_instr(req, fib); + rc = s390pci_mod_fc(req, fib); free_page((unsigned long) fib); return rc; } @@ -249,10 +246,9 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev) if (zdev->fmb) return -EINVAL; - zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL); + zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL); if (!zdev->fmb) return -ENOMEM; - memset(zdev->fmb, 0, sizeof(*zdev->fmb)); WARN_ON((u64) zdev->fmb & 0xf); args.fmb_addr = virt_to_phys(zdev->fmb); @@ -284,12 +280,12 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) u64 data; int rc; - rc = pcilg_instr(&data, req, offset); - data = data << ((8 - len) * 8); - data = le64_to_cpu(data); - if (!rc) + rc = s390pci_load(&data, req, offset); + if (!rc) { + data = data << ((8 - len) * 8); + data = le64_to_cpu(data); *val = (u32) data; - else + } else *val = 0xffffffff; return rc; } @@ -302,7 +298,7 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) data = cpu_to_le64(data); data = data >> ((8 - len) * 8); - rc = pcistg_instr(data, req, offset); + rc = s390pci_store(data, req, offset); return rc; } @@ -409,20 +405,28 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct zpci_dev *zdev = get_zdev_by_bus(bus); + int ret; if (!zdev || devfn != ZPCI_DEVFN) - return 0; - return zpci_cfg_load(zdev, where, val, size); + ret = -ENODEV; + else + ret = zpci_cfg_load(zdev, where, val, size); + + return ret; } static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct zpci_dev *zdev = get_zdev_by_bus(bus); + int ret; if (!zdev || devfn != ZPCI_DEVFN) - return 0; - return zpci_cfg_store(zdev, where, val, size); + ret = -ENODEV; + else + ret = zpci_cfg_store(zdev, where, val, size); + + return ret; } static struct pci_ops pci_root_ops = { @@ -474,7 +478,7 @@ scan: } /* enable interrupts again */ - sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); + set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); /* check again to not lose initiative */ rmb(); @@ -596,19 +600,6 @@ static void zpci_map_resources(struct zpci_dev *zdev) } }; -static void zpci_unmap_resources(struct pci_dev *pdev) -{ - resource_size_t len; - int i; - - for (i = 0; i < PCI_BAR_COUNT; i++) { - len = pci_resource_len(pdev, i); - if (!len) - continue; - pci_iounmap(pdev, (void *) pdev->resource[i].start); - } -}; - struct zpci_dev *zpci_alloc_device(void) { struct zpci_dev *zdev; @@ -636,32 +627,6 @@ void zpci_free_device(struct zpci_dev *zdev) kfree(zdev); } -/* Called on removal of pci_dev, leaves zpci and bus device */ -static void zpci_remove_device(struct pci_dev *pdev) -{ - struct zpci_dev *zdev = get_zdev(pdev); - - dev_info(&pdev->dev, "Removing device %u\n", zdev->domain); - zdev->state = ZPCI_FN_STATE_CONFIGURED; - zpci_dma_exit_device(zdev); - zpci_fmb_disable_device(zdev); - zpci_sysfs_remove_device(&pdev->dev); - zpci_unmap_resources(pdev); - list_del(&zdev->entry); /* can be called from init */ - zdev->pdev = NULL; -} - -static void zpci_scan_devices(void) -{ - struct zpci_dev *zdev; - - mutex_lock(&zpci_list_lock); - list_for_each_entry(zdev, &zpci_list, entry) - if (zdev->state == ZPCI_FN_STATE_CONFIGURED) - zpci_scan_device(zdev); - mutex_unlock(&zpci_list_lock); -} - /* * Too late for any s390 specific setup, since interrupts must be set up * already which requires DMA setup too and the pci scan will access the @@ -688,12 +653,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask) return 0; } -void pcibios_disable_device(struct pci_dev *pdev) -{ - zpci_remove_device(pdev); - pdev->sysdata = NULL; -} - int pcibios_add_platform_entries(struct pci_dev *pdev) { return zpci_sysfs_add_device(&pdev->dev); @@ -789,7 +748,7 @@ static int __init zpci_irq_init(void) spin_lock_init(&bucket->lock); /* set summary to 1 to be called every time for the ISC */ *zpci_irq_si = 1; - sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); + set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); return 0; out_ai: @@ -872,7 +831,19 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry) spin_unlock(&zpci_iomap_lock); } -static int zpci_create_device_bus(struct zpci_dev *zdev) +int pcibios_add_device(struct pci_dev *pdev) +{ + struct zpci_dev *zdev = get_zdev(pdev); + + zdev->pdev = pdev; + zpci_debug_init_device(zdev); + zpci_fmb_enable_device(zdev); + zpci_map_resources(zdev); + + return 0; +} + +static int zpci_scan_bus(struct zpci_dev *zdev) { struct resource *res; LIST_HEAD(resources); @@ -909,8 +880,8 @@ static int zpci_create_device_bus(struct zpci_dev *zdev) pci_add_resource(&resources, res); } - zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, - zdev, &resources); + zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, + zdev, &resources); if (!zdev->bus) return -EIO; @@ -959,6 +930,13 @@ out: } EXPORT_SYMBOL_GPL(zpci_enable_device); +int zpci_disable_device(struct zpci_dev *zdev) +{ + zpci_dma_exit_device(zdev); + return clp_disable_fh(zdev); +} +EXPORT_SYMBOL_GPL(zpci_disable_device); + int zpci_create_device(struct zpci_dev *zdev) { int rc; @@ -967,9 +945,16 @@ int zpci_create_device(struct zpci_dev *zdev) if (rc) goto out; - rc = zpci_create_device_bus(zdev); + if (zdev->state == ZPCI_FN_STATE_CONFIGURED) { + rc = zpci_enable_device(zdev); + if (rc) + goto out_free; + + zdev->state = ZPCI_FN_STATE_ONLINE; + } + rc = zpci_scan_bus(zdev); if (rc) - goto out_bus; + goto out_disable; mutex_lock(&zpci_list_lock); list_add_tail(&zdev->entry, &zpci_list); @@ -977,21 +962,12 @@ int zpci_create_device(struct zpci_dev *zdev) hotplug_ops->create_slot(zdev); mutex_unlock(&zpci_list_lock); - if (zdev->state == ZPCI_FN_STATE_STANDBY) - return 0; - - rc = zpci_enable_device(zdev); - if (rc) - goto out_start; return 0; -out_start: - mutex_lock(&zpci_list_lock); - list_del(&zdev->entry); - if (hotplug_ops) - hotplug_ops->remove_slot(zdev); - mutex_unlock(&zpci_list_lock); -out_bus: +out_disable: + if (zdev->state == ZPCI_FN_STATE_ONLINE) + zpci_disable_device(zdev); +out_free: zpci_free_domain(zdev); out: return rc; @@ -1016,15 +992,9 @@ int zpci_scan_device(struct zpci_dev *zdev) goto out; } - zpci_debug_init_device(zdev); - zpci_fmb_enable_device(zdev); - zpci_map_resources(zdev); pci_bus_add_devices(zdev->bus); - /* now that pdev was added to the bus mark it as used */ - zdev->state = ZPCI_FN_STATE_ONLINE; return 0; - out: zpci_dma_exit_device(zdev); clp_disable_fh(zdev); @@ -1087,13 +1057,13 @@ void zpci_deregister_hp_ops(void) } EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops); -unsigned int s390_pci_probe = 1; +unsigned int s390_pci_probe; EXPORT_SYMBOL_GPL(s390_pci_probe); char * __init pcibios_setup(char *str) { - if (!strcmp(str, "off")) { - s390_pci_probe = 0; + if (!strcmp(str, "on")) { + s390_pci_probe = 1; return NULL; } return str; @@ -1138,7 +1108,6 @@ static int __init pci_base_init(void) if (rc) goto out_find; - zpci_scan_devices(); return 0; out_find: diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index f339fe2f..bd34359 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -13,6 +13,7 @@ #include <linux/err.h> #include <linux/delay.h> #include <linux/pci.h> +#include <asm/pci_debug.h> #include <asm/pci_clp.h> /* @@ -144,6 +145,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured) struct zpci_dev *zdev; int rc; + zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured); zdev = zpci_alloc_device(); if (IS_ERR(zdev)) return PTR_ERR(zdev); @@ -204,8 +206,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command) if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) *fh = rrb->response.fh; else { - pr_err("Set PCI FN failed with response: %x cc: %d\n", - rrb->response.hdr.rsp, rc); + zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc, + rrb->response.hdr.rsp); rc = -EIO; } clp_free_block(rrb); @@ -221,6 +223,8 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as) if (!rc) /* Success -> store enabled handle in zdev */ zdev->fh = fh; + + zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc); return rc; } @@ -237,9 +241,8 @@ int clp_disable_fh(struct zpci_dev *zdev) if (!rc) /* Success -> store disabled handle in zdev */ zdev->fh = fh; - else - dev_err(&zdev->pdev->dev, - "Failed to disable fn handle: 0x%x\n", fh); + + zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc); return rc; } diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c index a5d07bc..771b823 100644 --- a/arch/s390/pci/pci_debug.c +++ b/arch/s390/pci/pci_debug.c @@ -11,12 +11,17 @@ #include <linux/kernel.h> #include <linux/seq_file.h> #include <linux/debugfs.h> +#include <linux/export.h> #include <linux/pci.h> #include <asm/debug.h> #include <asm/pci_dma.h> static struct dentry *debugfs_root; +debug_info_t *pci_debug_msg_id; +EXPORT_SYMBOL_GPL(pci_debug_msg_id); +debug_info_t *pci_debug_err_id; +EXPORT_SYMBOL_GPL(pci_debug_err_id); static char *pci_perf_names[] = { /* hardware counters */ @@ -168,7 +173,6 @@ int __init zpci_debug_init(void) return -EINVAL; debug_register_view(pci_debug_msg_id, &debug_sprintf_view); debug_set_level(pci_debug_msg_id, 3); - zpci_dbg("Debug view initialized\n"); /* error log */ pci_debug_err_id = debug_register("pci_error", 2, 1, 16); @@ -176,7 +180,6 @@ int __init zpci_debug_init(void) return -EINVAL; debug_register_view(pci_debug_err_id, &debug_hex_ascii_view); debug_set_level(pci_debug_err_id, 6); - zpci_err("Debug view initialized\n"); debugfs_root = debugfs_create_dir("pci", NULL); return 0; diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index a547419..f8e69d5 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -169,8 +169,9 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, * needs to be redone! */ goto no_refresh; - rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr, - nr_pages * PAGE_SIZE); + + rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, + nr_pages * PAGE_SIZE); no_refresh: spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); @@ -268,8 +269,6 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, int flags = ZPCI_PTE_VALID; dma_addr_t dma_addr; - WARN_ON_ONCE(offset > PAGE_SIZE); - /* This rounds up number of pages based on size and offset */ nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); iommu_page_index = dma_alloc_iommu(zdev, nr_pages); @@ -291,7 +290,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages); - return dma_addr + offset; + return dma_addr + (offset & ~PAGE_MASK); } out_free: diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c new file mode 100644 index 0000000..22eeb9d --- /dev/null +++ b/arch/s390/pci/pci_insn.c @@ -0,0 +1,202 @@ +/* + * s390 specific pci instructions + * + * Copyright IBM Corp. 2013 + */ + +#include <linux/export.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <asm/pci_insn.h> +#include <asm/processor.h> + +#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */ + +/* Modify PCI Function Controls */ +static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status) +{ + u8 cc; + + asm volatile ( + " .insn rxy,0xe300000000d0,%[req],%[fib]\n" + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib) + : : "cc"); + *status = req >> 24 & 0xff; + return cc; +} + +int s390pci_mod_fc(u64 req, struct zpci_fib *fib) +{ + u8 cc, status; + + do { + cc = __mpcifc(req, fib, &status); + if (cc == 2) + msleep(ZPCI_INSN_BUSY_DELAY); + } while (cc == 2); + + if (cc) + printk_once(KERN_ERR "%s: error cc: %d status: %d\n", + __func__, cc, status); + return (cc) ? -EIO : 0; +} + +/* Refresh PCI Translations */ +static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status) +{ + register u64 __addr asm("2") = addr; + register u64 __range asm("3") = range; + u8 cc; + + asm volatile ( + " .insn rre,0xb9d30000,%[fn],%[addr]\n" + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=d" (cc), [fn] "+d" (fn) + : [addr] "d" (__addr), "d" (__range) + : "cc"); + *status = fn >> 24 & 0xff; + return cc; +} + +int s390pci_refresh_trans(u64 fn, u64 addr, u64 range) +{ + u8 cc, status; + + do { + cc = __rpcit(fn, addr, range, &status); + if (cc == 2) + udelay(ZPCI_INSN_BUSY_DELAY); + } while (cc == 2); + + if (cc) + printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n", + __func__, cc, status, addr, range); + return (cc) ? -EIO : 0; +} + +/* Set Interruption Controls */ +void set_irq_ctrl(u16 ctl, char *unused, u8 isc) +{ + asm volatile ( + " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" + : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused)); +} + +/* PCI Load */ +static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status) +{ + register u64 __req asm("2") = req; + register u64 __offset asm("3") = offset; + int cc = -ENXIO; + u64 __data; + + asm volatile ( + " .insn rre,0xb9d20000,%[data],%[req]\n" + "0: ipm %[cc]\n" + " srl %[cc],28\n" + "1:\n" + EX_TABLE(0b, 1b) + : [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req) + : "d" (__offset) + : "cc"); + *status = __req >> 24 & 0xff; + if (!cc) + *data = __data; + + return cc; +} + +int s390pci_load(u64 *data, u64 req, u64 offset) +{ + u8 status; + int cc; + + do { + cc = __pcilg(data, req, offset, &status); + if (cc == 2) + udelay(ZPCI_INSN_BUSY_DELAY); + } while (cc == 2); + + if (cc) + printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", + __func__, cc, status, req, offset); + return (cc > 0) ? -EIO : cc; +} +EXPORT_SYMBOL_GPL(s390pci_load); + +/* PCI Store */ +static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status) +{ + register u64 __req asm("2") = req; + register u64 __offset asm("3") = offset; + int cc = -ENXIO; + + asm volatile ( + " .insn rre,0xb9d00000,%[data],%[req]\n" + "0: ipm %[cc]\n" + " srl %[cc],28\n" + "1:\n" + EX_TABLE(0b, 1b) + : [cc] "+d" (cc), [req] "+d" (__req) + : "d" (__offset), [data] "d" (data) + : "cc"); + *status = __req >> 24 & 0xff; + return cc; +} + +int s390pci_store(u64 data, u64 req, u64 offset) +{ + u8 status; + int cc; + + do { + cc = __pcistg(data, req, offset, &status); + if (cc == 2) + udelay(ZPCI_INSN_BUSY_DELAY); + } while (cc == 2); + + if (cc) + printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", + __func__, cc, status, req, offset); + return (cc > 0) ? -EIO : cc; +} +EXPORT_SYMBOL_GPL(s390pci_store); + +/* PCI Store Block */ +static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) +{ + int cc = -ENXIO; + + asm volatile ( + " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n" + "0: ipm %[cc]\n" + " srl %[cc],28\n" + "1:\n" + EX_TABLE(0b, 1b) + : [cc] "+d" (cc), [req] "+d" (req) + : [offset] "d" (offset), [data] "Q" (*data) + : "cc"); + *status = req >> 24 & 0xff; + return cc; +} + +int s390pci_store_block(const u64 *data, u64 req, u64 offset) +{ + u8 status; + int cc; + + do { + cc = __pcistb(data, req, offset, &status); + if (cc == 2) + udelay(ZPCI_INSN_BUSY_DELAY); + } while (cc == 2); + + if (cc) + printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", + __func__, cc, status, req, offset); + return (cc > 0) ? -EIO : cc; +} +EXPORT_SYMBOL_GPL(s390pci_store_block); diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c index 0297931..b097aed 100644 --- a/arch/s390/pci/pci_msi.c +++ b/arch/s390/pci/pci_msi.c @@ -18,8 +18,9 @@ /* mapping of irq numbers to msi_desc */ static struct hlist_head *msi_hash; -static unsigned int msihash_shift = 6; -#define msi_hashfn(nr) hash_long(nr, msihash_shift) +static const unsigned int msi_hash_bits = 8; +#define MSI_HASH_BUCKETS (1U << msi_hash_bits) +#define msi_hashfn(nr) hash_long(nr, msi_hash_bits) static DEFINE_SPINLOCK(msi_map_lock); @@ -74,6 +75,7 @@ int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi, map->irq = nr; map->msi = msi; zdev->msi_map[nr & ZPCI_MSI_MASK] = map; + INIT_HLIST_NODE(&map->msi_chain); pr_debug("%s hashing irq: %u to bucket nr: %llu\n", __func__, nr, msi_hashfn(nr)); @@ -125,11 +127,11 @@ int __init zpci_msihash_init(void) { unsigned int i; - msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL); + msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL); if (!msi_hash) return -ENOMEM; - for (i = 0; i < (1U << msihash_shift); i++) + for (i = 0; i < MSI_HASH_BUCKETS; i++) INIT_HLIST_HEAD(&msi_hash[i]); return 0; } diff --git a/arch/score/Kconfig b/arch/score/Kconfig index e569aa1..c8def8b 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig @@ -12,7 +12,7 @@ config SCORE select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS select HAVE_MOD_ARCH_SPECIFIC - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select MODULES_USE_ELF_REL select CLONE_BACKWARDS diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c index 7956846..f4c6d02 100644 --- a/arch/score/kernel/process.c +++ b/arch/score/kernel/process.c @@ -41,24 +41,6 @@ void machine_halt(void) {} /* If or when software machine-power-off is implemented, add code here. */ void machine_power_off(void) {} -/* - * The idle thread. There's no useful work to be - * done, so just try to conserve power and have a - * low exit latency (ie sit in a loop waiting for - * somebody to say that they'd like to reschedule) - */ -void __noreturn cpu_idle(void) -{ - /* endless idle loop with no priority at all */ - while (1) { - rcu_idle_enter(); - while (!need_resched()) - barrier(); - rcu_idle_exit(); - schedule_preempt_disabled(); - } -} - void ret_from_fork(void); void ret_from_kernel_thread(void); diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c index 0e46fb1..1517a7d 100644 --- a/arch/score/kernel/traps.c +++ b/arch/score/kernel/traps.c @@ -117,6 +117,8 @@ static void show_code(unsigned int *pc) */ void show_regs(struct pt_regs *regs) { + show_regs_print_info(KERN_DEFAULT); + printk("r0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", regs->regs[0], regs->regs[1], regs->regs[2], regs->regs[3], regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); @@ -149,16 +151,6 @@ static void show_registers(struct pt_regs *regs) printk(KERN_NOTICE "\n"); } -/* - * The architecture-independent dump_stack generator - */ -void dump_stack(void) -{ - show_stack(current_thread_info()->task, - (long *) get_irq_regs()->regs[0]); -} -EXPORT_SYMBOL(dump_stack); - void __die(const char *str, struct pt_regs *regs, const char *file, const char *func, unsigned long line) { diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c index cee6bce..1592aad 100644 --- a/arch/score/mm/init.c +++ b/arch/score/mm/init.c @@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(empty_zero_page); static struct kcore_list kcore_mem, kcore_vmalloc; -static unsigned long setup_zero_page(void) +static void setup_zero_page(void) { struct page *page; @@ -52,9 +52,7 @@ static unsigned long setup_zero_page(void) panic("Oh boy, that early out of memory?"); page = virt_to_page((void *) empty_zero_page); - SetPageReserved(page); - - return 1UL; + mark_page_reserved(page); } #ifndef CONFIG_NEED_MULTIPLE_NODES @@ -84,7 +82,7 @@ void __init mem_init(void) high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); totalram_pages += free_all_bootmem(); - totalram_pages -= setup_zero_page(); /* Setup zeroed pages. */ + setup_zero_page(); /* Setup zeroed pages. */ reservedpages = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) @@ -109,37 +107,16 @@ void __init mem_init(void) } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ -static void free_init_pages(const char *what, unsigned long begin, unsigned long end) -{ - unsigned long pfn; - - for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { - struct page *page = pfn_to_page(pfn); - void *addr = phys_to_virt(PFN_PHYS(pfn)); - - ClearPageReserved(page); - init_page_count(page); - memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); - __free_page(page); - totalram_pages++; - } - printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); -} - #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", - virt_to_phys((void *) start), - virt_to_phys((void *) end)); + free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); } #endif void __init_refok free_initmem(void) { - free_init_pages("unused kernel memory", - __pa(&__init_begin), - __pa(&__init_end)); + free_initmem_default(POISON_FREE_INITMEM); } unsigned long pgd_current; diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5e85963..78d8ace 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -33,6 +33,7 @@ config SUPERH select GENERIC_ATOMIC64 select GENERIC_IRQ_SHOW select GENERIC_SMP_IDLE_THREAD + select GENERIC_IDLE_POLL_SETUP select GENERIC_CLOCKEVENTS select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST select GENERIC_STRNCPY_FROM_USER @@ -148,9 +149,6 @@ config ARCH_HAS_ILOG2_U32 config ARCH_HAS_ILOG2_U64 def_bool n -config ARCH_HAS_DEFAULT_IDLE - def_bool y - config NO_IOPORT def_bool !PCI depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN && \ @@ -624,25 +622,7 @@ config SH_CLK_CPG_LEGACY endmenu menu "CPU Frequency scaling" - source "drivers/cpufreq/Kconfig" - -config SH_CPU_FREQ - tristate "SuperH CPU Frequency driver" - depends on CPU_FREQ - select CPU_FREQ_TABLE - help - This adds the cpufreq driver for SuperH. Any CPU that supports - clock rate rounding through the clock framework can use this - driver. While it will make the kernel slightly larger, this is - harmless for CPUs that don't support rate rounding. The driver - will also generate a notice in the boot log before disabling - itself if the CPU in question is not capable of rate rounding. - - For details, take a look at <file:Documentation/cpu-freq>. - - If unsure, say N. - endmenu source "arch/sh/drivers/Kconfig" diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index aaff767..764530c 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -254,11 +254,13 @@ static int usbhs_get_id(struct platform_device *pdev) return gpio_get_value(GPIO_PTB3); } -static void usbhs_phy_reset(struct platform_device *pdev) +static int usbhs_phy_reset(struct platform_device *pdev) { /* enable vbus if HOST */ if (!gpio_get_value(GPIO_PTB3)) gpio_set_value(GPIO_PTB5, 1); + + return 0; } static struct renesas_usbhs_platform_info usbhs_info = { diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c index c2c85f6..a162a7f 100644 --- a/arch/sh/drivers/pci/pcie-sh7786.c +++ b/arch/sh/drivers/pci/pcie-sh7786.c @@ -35,7 +35,7 @@ static unsigned int nr_ports; static struct sh7786_pcie_hwops { int (*core_init)(void); - async_func_ptr *port_init_hw; + async_func_t port_init_hw; } *sh7786_pcie_hwops; static struct resource sh7786_pci0_resources[] = { diff --git a/arch/sh/include/asm/hugetlb.h b/arch/sh/include/asm/hugetlb.h index b3808c7..699255d 100644 --- a/arch/sh/include/asm/hugetlb.h +++ b/arch/sh/include/asm/hugetlb.h @@ -3,6 +3,7 @@ #include <asm/cacheflush.h> #include <asm/page.h> +#include <asm-generic/hugetlb.h> static inline int is_hugepage_only_range(struct mm_struct *mm, diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index e14567a..70ae0b2 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h @@ -14,9 +14,9 @@ struct swsusp_arch_regs { void sh_mobile_call_standby(unsigned long mode); #ifdef CONFIG_CPU_IDLE -void sh_mobile_setup_cpuidle(void); +int sh_mobile_setup_cpuidle(void); #else -static inline void sh_mobile_setup_cpuidle(void) {} +static inline int sh_mobile_setup_cpuidle(void) { return 0; } #endif /* notifier chains for pre/post sleep hooks */ diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 7d5ac4e..45a9366 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -207,8 +207,6 @@ static inline bool test_and_clear_restore_sigmask(void) return true; } -#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) - #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index f259b37..261c8bf 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile @@ -31,7 +31,6 @@ obj-$(CONFIG_VSYSCALL) += vsyscall/ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o obj-$(CONFIG_KGDB) += kgdb.o -obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c index 1ddc876..d306225 100644 --- a/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c @@ -51,70 +51,53 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev, return k; } -static struct cpuidle_device cpuidle_dev; static struct cpuidle_driver cpuidle_driver = { - .name = "sh_idle", - .owner = THIS_MODULE, - .en_core_tk_irqen = 1, + .name = "sh_idle", + .owner = THIS_MODULE, + .states = { + { + .exit_latency = 1, + .target_residency = 1 * 2, + .power_usage = 3, + .flags = CPUIDLE_FLAG_TIME_VALID, + .enter = cpuidle_sleep_enter, + .name = "C1", + .desc = "SuperH Sleep Mode", + }, + { + .exit_latency = 100, + .target_residency = 1 * 2, + .power_usage = 1, + .flags = CPUIDLE_FLAG_TIME_VALID, + .enter = cpuidle_sleep_enter, + .name = "C2", + .desc = "SuperH Sleep Mode [SF]", + .disabled = true, + }, + { + .exit_latency = 2300, + .target_residency = 1 * 2, + .power_usage = 1, + .flags = CPUIDLE_FLAG_TIME_VALID, + .enter = cpuidle_sleep_enter, + .name = "C3", + .desc = "SuperH Mobile Standby Mode [SF]", + .disabled = true, + }, + }, + .safe_state_index = 0, + .state_count = 3, }; -void sh_mobile_setup_cpuidle(void) +int __init sh_mobile_setup_cpuidle(void) { - struct cpuidle_device *dev = &cpuidle_dev; - struct cpuidle_driver *drv = &cpuidle_driver; - struct cpuidle_state *state; - int i; + int ret; + if (sh_mobile_sleep_supported & SUSP_SH_SF) + cpuidle_driver.states[1].disabled = false; - for (i = 0; i < CPUIDLE_STATE_MAX; i++) { - drv->states[i].name[0] = '\0'; - drv->states[i].desc[0] = '\0'; - } + if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) + cpuidle_driver.states[2].disabled = false; - i = CPUIDLE_DRIVER_STATE_START; - - state = &drv->states[i++]; - snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); - strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN); - state->exit_latency = 1; - state->target_residency = 1 * 2; - state->power_usage = 3; - state->flags = 0; - state->flags |= CPUIDLE_FLAG_TIME_VALID; - state->enter = cpuidle_sleep_enter; - - drv->safe_state_index = i-1; - - if (sh_mobile_sleep_supported & SUSP_SH_SF) { - state = &drv->states[i++]; - snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); - strncpy(state->desc, "SuperH Sleep Mode [SF]", - CPUIDLE_DESC_LEN); - state->exit_latency = 100; - state->target_residency = 1 * 2; - state->power_usage = 1; - state->flags = 0; - state->flags |= CPUIDLE_FLAG_TIME_VALID; - state->enter = cpuidle_sleep_enter; - } - - if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) { - state = &drv->states[i++]; - snprintf(state->name, CPUIDLE_NAME_LEN, "C3"); - strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", - CPUIDLE_DESC_LEN); - state->exit_latency = 2300; - state->target_residency = 1 * 2; - state->power_usage = 1; - state->flags = 0; - state->flags |= CPUIDLE_FLAG_TIME_VALID; - state->enter = cpuidle_sleep_enter; - } - - drv->state_count = i; - dev->state_count = i; - - cpuidle_register_driver(&cpuidle_driver); - - cpuidle_register_device(dev); + return cpuidle_register(&cpuidle_driver); } diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c index 08d27fa..ac37b72 100644 --- a/arch/sh/kernel/cpu/shmobile/pm.c +++ b/arch/sh/kernel/cpu/shmobile/pm.c @@ -150,8 +150,7 @@ static const struct platform_suspend_ops sh_pm_ops = { static int __init sh_pm_init(void) { suspend_set_ops(&sh_pm_ops); - sh_mobile_setup_cpuidle(); - return 0; + return sh_mobile_setup_cpuidle(); } late_initcall(sh_pm_init); diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c deleted file mode 100644 index e68b45b..0000000 --- a/arch/sh/kernel/cpufreq.c +++ /dev/null @@ -1,201 +0,0 @@ -/* - * arch/sh/kernel/cpufreq.c - * - * cpufreq driver for the SuperH processors. - * - * Copyright (C) 2002 - 2012 Paul Mundt - * Copyright (C) 2002 M. R. Brown - * - * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c - * - * Copyright (C) 2004-2007 Atmel Corporation - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#define pr_fmt(fmt) "cpufreq: " fmt - -#include <linux/types.h> -#include <linux/cpufreq.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/err.h> -#include <linux/cpumask.h> -#include <linux/cpu.h> -#include <linux/smp.h> -#include <linux/sched.h> /* set_cpus_allowed() */ -#include <linux/clk.h> -#include <linux/percpu.h> -#include <linux/sh_clk.h> - -static DEFINE_PER_CPU(struct clk, sh_cpuclk); - -static unsigned int sh_cpufreq_get(unsigned int cpu) -{ - return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; -} - -/* - * Here we notify other drivers of the proposed change and the final change. - */ -static int sh_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int cpu = policy->cpu; - struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); - cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; - struct device *dev; - long freq; - - if (!cpu_online(cpu)) - return -ENODEV; - - cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - BUG_ON(smp_processor_id() != cpu); - - dev = get_cpu_device(cpu); - - /* Convert target_freq from kHz to Hz */ - freq = clk_round_rate(cpuclk, target_freq * 1000); - - if (freq < (policy->min * 1000) || freq > (policy->max * 1000)) - return -EINVAL; - - dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000); - - freqs.cpu = cpu; - freqs.old = sh_cpufreq_get(cpu); - freqs.new = (freq + 500) / 1000; - freqs.flags = 0; - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - set_cpus_allowed_ptr(current, &cpus_allowed); - clk_set_rate(cpuclk, freq); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - dev_dbg(dev, "set frequency %lu Hz\n", freq); - - return 0; -} - -static int sh_cpufreq_verify(struct cpufreq_policy *policy) -{ - struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); - struct cpufreq_frequency_table *freq_table; - - freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; - if (freq_table) - return cpufreq_frequency_table_verify(policy, freq_table); - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - - policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000; - policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - - return 0; -} - -static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); - struct cpufreq_frequency_table *freq_table; - struct device *dev; - - if (!cpu_online(cpu)) - return -ENODEV; - - dev = get_cpu_device(cpu); - - cpuclk = clk_get(dev, "cpu_clk"); - if (IS_ERR(cpuclk)) { - dev_err(dev, "couldn't get CPU clk\n"); - return PTR_ERR(cpuclk); - } - - policy->cur = policy->min = policy->max = sh_cpufreq_get(cpu); - - freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; - if (freq_table) { - int result; - - result = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (!result) - cpufreq_frequency_table_get_attr(freq_table, cpu); - } else { - dev_notice(dev, "no frequency table found, falling back " - "to rate rounding.\n"); - - policy->cpuinfo.min_freq = - (clk_round_rate(cpuclk, 1) + 500) / 1000; - policy->cpuinfo.max_freq = - (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; - } - - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; - - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - - dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, " - "Maximum %u.%03u MHz.\n", - policy->min / 1000, policy->min % 1000, - policy->max / 1000, policy->max % 1000); - - return 0; -} - -static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); - - cpufreq_frequency_table_put_attr(cpu); - clk_put(cpuclk); - - return 0; -} - -static struct freq_attr *sh_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver sh_cpufreq_driver = { - .owner = THIS_MODULE, - .name = "sh", - .get = sh_cpufreq_get, - .target = sh_cpufreq_target, - .verify = sh_cpufreq_verify, - .init = sh_cpufreq_cpu_init, - .exit = sh_cpufreq_cpu_exit, - .attr = sh_freq_attr, -}; - -static int __init sh_cpufreq_module_init(void) -{ - pr_notice("SuperH CPU frequency driver.\n"); - return cpufreq_register_driver(&sh_cpufreq_driver); -} - -static void __exit sh_cpufreq_module_exit(void) -{ - cpufreq_unregister_driver(&sh_cpufreq_driver); -} - -module_init(sh_cpufreq_module_init); -module_exit(sh_cpufreq_module_exit); - -MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); -MODULE_DESCRIPTION("cpufreq driver for SuperH"); -MODULE_LICENSE("GPL"); diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index 7617dc4..b959f55 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -158,9 +158,3 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) (unsigned long)task_stack_page(tsk)); show_trace(tsk, sp, NULL); } - -void dump_stack(void) -{ - show_stack(NULL, NULL); -} -EXPORT_SYMBOL(dump_stack); diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 3d5a1b3..2ea4483 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -24,98 +24,24 @@ static void (*sh_idle)(void); -static int hlt_counter; - -static int __init nohlt_setup(char *__unused) -{ - hlt_counter = 1; - return 1; -} -__setup("nohlt", nohlt_setup); - -static int __init hlt_setup(char *__unused) -{ - hlt_counter = 0; - return 1; -} -__setup("hlt", hlt_setup); - -static inline int hlt_works(void) -{ - return !hlt_counter; -} - -/* - * On SMP it's slightly faster (but much more power-consuming!) - * to poll the ->work.need_resched flag instead of waiting for the - * cross-CPU IPI to arrive. Use this option with caution. - */ -static void poll_idle(void) +void default_idle(void) { + set_bl_bit(); local_irq_enable(); - while (!need_resched()) - cpu_relax(); + /* Isn't this racy ? */ + cpu_sleep(); + clear_bl_bit(); } -void default_idle(void) +void arch_cpu_idle_dead(void) { - if (hlt_works()) { - clear_thread_flag(TIF_POLLING_NRFLAG); - smp_mb__after_clear_bit(); - - set_bl_bit(); - if (!need_resched()) { - local_irq_enable(); - cpu_sleep(); - } else - local_irq_enable(); - - set_thread_flag(TIF_POLLING_NRFLAG); - clear_bl_bit(); - } else - poll_idle(); + play_dead(); } -/* - * The idle thread. There's no useful work to be done, so just try to conserve - * power and have a low exit latency (ie sit in a loop waiting for somebody to - * say that they'd like to reschedule) - */ -void cpu_idle(void) +void arch_cpu_idle(void) { - unsigned int cpu = smp_processor_id(); - - set_thread_flag(TIF_POLLING_NRFLAG); - - /* endless idle loop with no priority at all */ - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - - while (!need_resched()) { - check_pgt_cache(); - rmb(); - - if (cpu_is_offline(cpu)) - play_dead(); - - local_irq_disable(); - /* Don't trace irqs off for idle */ - stop_critical_timings(); - if (cpuidle_idle_call()) - sh_idle(); - /* - * Sanity check to ensure that sh_idle() returns - * with IRQs enabled - */ - WARN_ON(irqs_disabled()); - start_critical_timings(); - } - - rcu_idle_exit(); - tick_nohz_idle_exit(); - schedule_preempt_disabled(); - } + if (cpuidle_idle_call()) + sh_idle(); } void __init select_idle_routine(void) @@ -123,13 +49,8 @@ void __init select_idle_routine(void) /* * If a platform has set its own idle routine, leave it alone. */ - if (sh_idle) - return; - - if (hlt_works()) + if (!sh_idle) sh_idle = default_idle; - else - sh_idle = poll_idle; } void stop_this_cpu(void *unused) diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 73eb66f..ebd3933 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -32,11 +32,7 @@ void show_regs(struct pt_regs * regs) { printk("\n"); - printk("Pid : %d, Comm: \t\t%s\n", task_pid_nr(current), current->comm); - printk("CPU : %d \t\t%s (%s %.*s)\n\n", - smp_processor_id(), print_tainted(), init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); + show_regs_print_info(KERN_DEFAULT); print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("PR is at %s\n", regs->pr); diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index e611c85..174d124b 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -40,6 +40,7 @@ void show_regs(struct pt_regs *regs) unsigned long long ah, al, bh, bl, ch, cl; printk("\n"); + show_regs_print_info(KERN_DEFAULT); ah = (regs->pc) >> 32; al = (regs->pc) & 0xffffffff; diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c index 47475cc..fe584e5 100644 --- a/arch/sh/kernel/sh_bios.c +++ b/arch/sh/kernel/sh_bios.c @@ -104,6 +104,7 @@ void sh_bios_vbr_reload(void) ); } +#ifdef CONFIG_EARLY_PRINTK /* * Print a string through the BIOS */ @@ -144,8 +145,6 @@ static struct console bios_console = { .index = -1, }; -static struct console *early_console; - static int __init setup_early_printk(char *buf) { int keep_early = 0; @@ -170,3 +169,4 @@ static int __init setup_early_printk(char *buf) return 0; } early_param("earlyprintk", setup_early_printk); +#endif diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 2062aa8..4569645 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -203,7 +203,7 @@ asmlinkage void __cpuinit start_secondary(void) set_cpu_online(cpu, true); per_cpu(cpu_state, cpu) = CPU_ONLINE; - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } extern struct { diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 1057940..20f9ead 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -417,15 +417,13 @@ void __init mem_init(void) for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); - unsigned long node_pages = 0; void *node_high_memory; num_physpages += pgdat->node_present_pages; if (pgdat->node_spanned_pages) - node_pages = free_all_bootmem_node(pgdat); + totalram_pages += free_all_bootmem_node(pgdat); - totalram_pages += node_pages; node_high_memory = (void *)__va((pgdat->node_start_pfn + pgdat->node_spanned_pages) << @@ -501,31 +499,13 @@ void __init mem_init(void) void free_initmem(void) { - unsigned long addr; - - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk("Freeing unused kernel memory: %ldk freed\n", - ((unsigned long)&__init_end - - (unsigned long)&__init_begin) >> 10); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - unsigned long p; - for (p = start; p < end; p += PAGE_SIZE) { - ClearPageReserved(virt_to_page(p)); - init_page_count(virt_to_page(p)); - free_page(p); - totalram_pages++; - } - printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); + free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 67388cd..f5041d7 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -83,12 +83,6 @@ config ARCH_DEFCONFIG default "arch/sparc/configs/sparc32_defconfig" if SPARC32 default "arch/sparc/configs/sparc64_defconfig" if SPARC64 -# CONFIG_BITS can be used at source level to get 32/64 bits -config BITS - int - default 32 if SPARC32 - default 64 if SPARC64 - config IOMMU_HELPER bool default y if SPARC64 @@ -196,7 +190,7 @@ config RWSEM_XCHGADD_ALGORITHM config GENERIC_HWEIGHT bool - default y if !ULTRA_HAS_POPULATION_COUNT + default y config GENERIC_CALIBRATE_DELAY bool @@ -259,29 +253,6 @@ config HOTPLUG_CPU if SPARC64 source "drivers/cpufreq/Kconfig" - -config US3_FREQ - tristate "UltraSPARC-III CPU Frequency driver" - depends on CPU_FREQ - select CPU_FREQ_TABLE - help - This adds the CPUFreq driver for UltraSPARC-III processors. - - For details, take a look at <file:Documentation/cpu-freq>. - - If in doubt, say N. - -config US2E_FREQ - tristate "UltraSPARC-IIe CPU Frequency driver" - depends on CPU_FREQ - select CPU_FREQ_TABLE - help - This adds the CPUFreq driver for UltraSPARC-IIe processors. - - For details, take a look at <file:Documentation/cpu-freq>. - - If in doubt, say N. - endif config US3_MC @@ -412,6 +383,8 @@ config SERIAL_CONSOLE config SPARC_LEON bool "Sparc Leon processor family" depends on SPARC32 + select USB_EHCI_BIG_ENDIAN_MMIO + select USB_EHCI_BIG_ENDIAN_DESC ---help--- If you say Y here if you are running on a SPARC-LEON processor. The LEON processor is a synthesizable VHDL model of the diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index e26d430..ff18e3c 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -2,11 +2,16 @@ generic-y += clkdev.h +generic-y += cputime.h generic-y += div64.h +generic-y += emergency-restart.h generic-y += exec.h generic-y += local64.h +generic-y += mutex.h generic-y += irq_regs.h generic-y += local.h generic-y += module.h +generic-y += serial.h generic-y += trace_clock.h +generic-y += types.h generic-y += word-at-a-time.h diff --git a/arch/sparc/include/asm/cputime.h b/arch/sparc/include/asm/cputime.h deleted file mode 100644 index 1a642b8..0000000 --- a/arch/sparc/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SPARC_CPUTIME_H -#define __SPARC_CPUTIME_H - -#include <asm-generic/cputime.h> - -#endif /* __SPARC_CPUTIME_H */ diff --git a/arch/sparc/include/asm/emergency-restart.h b/arch/sparc/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c4..0000000 --- a/arch/sparc/include/asm/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include <asm-generic/emergency-restart.h> - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index 7eb57d2..e4cab46 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -2,6 +2,7 @@ #define _ASM_SPARC64_HUGETLB_H #include <asm/page.h> +#include <asm-generic/hugetlb.h> void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, diff --git a/arch/sparc/include/asm/mutex.h b/arch/sparc/include/asm/mutex.h deleted file mode 100644 index 458c1f7..0000000 --- a/arch/sparc/include/asm/mutex.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Pull in the generic implementation for the mutex fastpath. - * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) - */ - -#include <asm-generic/mutex-dec.h> diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 08fcce90..7619f2f 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); } +#include <asm/tlbflush.h> #include <asm-generic/pgtable.h> /* We provide our own get_unmapped_area to cope with VA holes and diff --git a/arch/sparc/include/asm/serial.h b/arch/sparc/include/asm/serial.h deleted file mode 100644 index f90d61c..0000000 --- a/arch/sparc/include/asm/serial.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SPARC_SERIAL_H -#define __SPARC_SERIAL_H - -#define BASE_BAUD ( 1843200 / 16 ) - -#endif /* __SPARC_SERIAL_H */ diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h index b73da3c..3c8917f 100644 --- a/arch/sparc/include/asm/smp_32.h +++ b/arch/sparc/include/asm/smp_32.h @@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); void cpu_panic(void); -extern void smp4m_irq_rotate(int cpu); /* * General functions that each host system must provide. @@ -46,7 +45,6 @@ void sun4m_init_smp(void); void sun4d_init_smp(void); void smp_callin(void); -void smp_boot_cpus(void); void smp_store_cpu_info(int); void smp_resched_interrupt(void); @@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void); #define raw_smp_processor_id() (current_thread_info()->cpu) -#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier -#define prof_counter(__cpu) cpu_data(__cpu).counter - void smp_setup_cpu_possible_map(void); #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h index d06a2660..6b67e50 100644 --- a/arch/sparc/include/asm/spitfire.h +++ b/arch/sparc/include/asm/spitfire.h @@ -45,6 +45,7 @@ #define SUN4V_CHIP_NIAGARA3 0x03 #define SUN4V_CHIP_NIAGARA4 0x04 #define SUN4V_CHIP_NIAGARA5 0x05 +#define SUN4V_CHIP_SPARC64X 0x8a #define SUN4V_CHIP_UNKNOWN 0xff #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h index cad36f5..c7de332 100644 --- a/arch/sparc/include/asm/switch_to_64.h +++ b/arch/sparc/include/asm/switch_to_64.h @@ -18,8 +18,7 @@ do { \ * and 2 stores in this critical code path. -DaveM */ #define switch_to(prev, next, last) \ -do { flush_tlb_pending(); \ - save_and_clear_fpu(); \ +do { save_and_clear_fpu(); \ /* If you are tempted to conditionalize the following */ \ /* so that ASI is only written if it changes, think again. */ \ __asm__ __volatile__("wr %%g0, %0, %%asi" \ diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index 25849ae..dd38075 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h @@ -132,8 +132,6 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \ _TIF_SIGPENDING) -#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) - #endif /* __KERNEL__ */ #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 269bd92..d5e5042 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -256,8 +256,6 @@ static inline bool test_and_clear_restore_sigmask(void) return true; } -#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) - #define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0) #define test_thread_64bit_stack(__SP) \ ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \ diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index 2ef4634..f0d6a97 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h @@ -11,24 +11,40 @@ struct tlb_batch { struct mm_struct *mm; unsigned long tlb_nr; + unsigned long active; unsigned long vaddrs[TLB_BATCH_NR]; }; extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); extern void flush_tsb_user(struct tlb_batch *tb); +extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr); /* TLB flush operations. */ -extern void flush_tlb_pending(void); +static inline void flush_tlb_mm(struct mm_struct *mm) +{ +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE -#define flush_tlb_range(vma,start,end) \ - do { (void)(start); flush_tlb_pending(); } while (0) -#define flush_tlb_page(vma,addr) flush_tlb_pending() -#define flush_tlb_mm(mm) flush_tlb_pending() +extern void flush_tlb_pending(void); +extern void arch_enter_lazy_mmu_mode(void); +extern void arch_leave_lazy_mmu_mode(void); +#define arch_flush_lazy_mmu_mode() do {} while (0) /* Local cpu only. */ extern void __flush_tlb_all(void); - +extern void __flush_tlb_page(unsigned long context, unsigned long vaddr); extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); #ifndef CONFIG_SMP @@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \ __flush_tlb_kernel_range(start,end); \ } while (0) +static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); +} + #else /* CONFIG_SMP */ extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); #define flush_tlb_kernel_range(start, end) \ do { flush_tsb_kernel_range(start,end); \ smp_flush_tlb_kernel_range(start, end); \ } while (0) +#define global_flush_tlb_page(mm, vaddr) \ + smp_flush_tlb_page(mm, vaddr) + #endif /* ! CONFIG_SMP */ #endif /* _SPARC64_TLBFLUSH_H */ diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild index ce175af..b5843ee 100644 --- a/arch/sparc/include/uapi/asm/Kbuild +++ b/arch/sparc/include/uapi/asm/Kbuild @@ -44,7 +44,6 @@ header-y += swab.h header-y += termbits.h header-y += termios.h header-y += traps.h -header-y += types.h header-y += uctx.h header-y += unistd.h header-y += utrap.h diff --git a/arch/sparc/include/uapi/asm/types.h b/arch/sparc/include/uapi/asm/types.h deleted file mode 100644 index 383d156..0000000 --- a/arch/sparc/include/uapi/asm/types.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef _SPARC_TYPES_H -#define _SPARC_TYPES_H -/* - * This file is never included by application software unless - * explicitly requested (e.g., via linux/types.h) in which case the - * application is Linux specific so (user-) name space pollution is - * not a major issue. However, for interoperability, libraries still - * need to be careful to avoid a name clashes. - */ - -#if defined(__sparc__) - -#include <asm-generic/int-ll64.h> - -#endif /* defined(__sparc__) */ - -#endif /* defined(_SPARC_TYPES_H) */ diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 6cf591b..5276fd4 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -102,9 +102,6 @@ obj-$(CONFIG_PCI_MSI) += pci_msi.o obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o -# sparc64 cpufreq -obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o -obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o obj-$(CONFIG_US3_MC) += chmc.o obj-$(CONFIG_KPROBES) += kprobes.o diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index a6c94a2..5c51258 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c @@ -493,6 +493,12 @@ static void __init sun4v_cpu_probe(void) sparc_pmu_type = "niagara5"; break; + case SUN4V_CHIP_SPARC64X: + sparc_cpu_type = "SPARC64-X"; + sparc_fpu_type = "SPARC64-X integrated FPU"; + sparc_pmu_type = "sparc64-x"; + break; + default: printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", prom_cpu_compatible); diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 2feb15c..26b706a 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -134,6 +134,8 @@ prom_niagara_prefix: .asciz "SUNW,UltraSPARC-T" prom_sparc_prefix: .asciz "SPARC-" +prom_sparc64x_prefix: + .asciz "SPARC64-X" .align 4 prom_root_compatible: .skip 64 @@ -412,7 +414,7 @@ sun4v_chip_type: cmp %g2, 'T' be,pt %xcc, 70f cmp %g2, 'M' - bne,pn %xcc, 4f + bne,pn %xcc, 49f nop 70: ldub [%g1 + 7], %g2 @@ -425,7 +427,7 @@ sun4v_chip_type: cmp %g2, '5' be,pt %xcc, 5f mov SUN4V_CHIP_NIAGARA5, %g4 - ba,pt %xcc, 4f + ba,pt %xcc, 49f nop 91: sethi %hi(prom_cpu_compatible), %g1 @@ -439,6 +441,25 @@ sun4v_chip_type: mov SUN4V_CHIP_NIAGARA2, %g4 4: + /* Athena */ + sethi %hi(prom_cpu_compatible), %g1 + or %g1, %lo(prom_cpu_compatible), %g1 + sethi %hi(prom_sparc64x_prefix), %g7 + or %g7, %lo(prom_sparc64x_prefix), %g7 + mov 9, %g3 +41: ldub [%g7], %g2 + ldub [%g1], %g4 + cmp %g2, %g4 + bne,pn %icc, 49f + add %g7, 1, %g7 + subcc %g3, 1, %g3 + bne,pt %xcc, 41b + add %g1, 1, %g1 + mov SUN4V_CHIP_SPARC64X, %g4 + ba,pt %xcc, 5f + nop + +49: mov SUN4V_CHIP_UNKNOWN, %g4 5: sethi %hi(sun4v_chip_type), %g2 or %g2, %lo(sun4v_chip_type), %g2 diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S index 9365432..605c960 100644 --- a/arch/sparc/kernel/hvtramp.S +++ b/arch/sparc/kernel/hvtramp.S @@ -128,8 +128,7 @@ hv_cpu_startup: call smp_callin nop - call cpu_idle - mov 0, %o0 + call cpu_panic nop diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index fc43208..4d14871 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -186,6 +186,8 @@ struct grpci2_cap_first { #define CAP9_IOMAP_OFS 0x20 #define CAP9_BARSIZE_OFS 0x24 +#define TGT 256 + struct grpci2_priv { struct leon_pci_info info; /* must be on top of this structure */ struct grpci2_regs *regs; @@ -237,8 +239,12 @@ static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus, if (where & 0x3) return -EINVAL; - if (bus == 0 && PCI_SLOT(devfn) != 0) - devfn += (0x8 * 6); + if (bus == 0) { + devfn += (0x8 * 6); /* start at AD16=Device0 */ + } else if (bus == TGT) { + bus = 0; + devfn = 0; /* special case: bridge controller itself */ + } /* Select bus */ spin_lock_irqsave(&grpci2_dev_lock, flags); @@ -303,8 +309,12 @@ static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus, if (where & 0x3) return -EINVAL; - if (bus == 0 && PCI_SLOT(devfn) != 0) - devfn += (0x8 * 6); + if (bus == 0) { + devfn += (0x8 * 6); /* start at AD16=Device0 */ + } else if (bus == TGT) { + bus = 0; + devfn = 0; /* special case: bridge controller itself */ + } /* Select bus */ spin_lock_irqsave(&grpci2_dev_lock, flags); @@ -368,7 +378,7 @@ static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn, unsigned int busno = bus->number; int ret; - if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) { + if (PCI_SLOT(devfn) > 15 || busno > 255) { *val = ~0; return 0; } @@ -406,7 +416,7 @@ static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn, struct grpci2_priv *priv = grpci2priv; unsigned int busno = bus->number; - if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) + if (PCI_SLOT(devfn) > 15 || busno > 255) return 0; #ifdef GRPCI2_DEBUG_CFGACCESS @@ -578,15 +588,15 @@ void grpci2_hw_init(struct grpci2_priv *priv) REGSTORE(regs->ahbmst_map[i], priv->pci_area); /* Get the GRPCI2 Host PCI ID */ - grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid); + grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid); /* Get address to first (always defined) capability structure */ - grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr); + grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr); /* Enable/Disable Byte twisting */ - grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map); + grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map); io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); - grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map); + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map); /* Setup the Host's PCI Target BARs for other peripherals to access, * and do DMA to the host's memory. The target BARs can be sized and @@ -617,17 +627,18 @@ void grpci2_hw_init(struct grpci2_priv *priv) pciadr = 0; } } - grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz); - grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); - grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4, + bar_sz); + grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", i, pciadr, ahbadr); } /* set as bus master and enable pci memory responses */ - grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data); + grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data); data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); - grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data); + grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data); /* Enable Error respone (CPU-TRAP) on illegal memory access. */ REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 62eede1..fdd819d 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -64,23 +64,12 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *); struct task_struct *last_task_used_math = NULL; struct thread_info *current_set[NR_CPUS]; -/* - * the idle loop on a Sparc... ;) - */ -void cpu_idle(void) +/* Idle loop support. */ +void arch_cpu_idle(void) { - set_thread_flag(TIF_POLLING_NRFLAG); - - /* endless idle loop with no priority at all */ - for (;;) { - while (!need_resched()) { - if (sparc_idle) - (*sparc_idle)(); - else - cpu_relax(); - } - schedule_preempt_disabled(); - } + if (sparc_idle) + (*sparc_idle)(); + local_irq_enable(); } /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ @@ -123,6 +112,8 @@ void show_regs(struct pt_regs *r) { struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14]; + show_regs_print_info(KERN_DEFAULT); + printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", r->psr, r->pc, r->npc, r->y, print_tainted()); printk("PC: <%pS>\n", (void *) r->pc); @@ -153,11 +144,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) struct reg_window32 *rw; int count = 0; - if (tsk != NULL) - task_base = (unsigned long) task_stack_page(tsk); - else - task_base = (unsigned long) current_thread_info(); + if (!tsk) + tsk = current; + if (tsk == current && !_ksp) + __asm__ __volatile__("mov %%fp, %0" : "=r" (_ksp)); + + task_base = (unsigned long) task_stack_page(tsk); fp = (unsigned long) _ksp; do { /* Bogus frame pointer? */ @@ -173,17 +166,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) printk("\n"); } -void dump_stack(void) -{ - unsigned long *ksp; - - __asm__ __volatile__("mov %%fp, %0" - : "=r" (ksp)); - show_stack(current, ksp); -} - -EXPORT_SYMBOL(dump_stack); - /* * Note: sparc64 has a pretty intricated thread_saved_pc, check it out. */ diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index cdb80b2..baebab2 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -52,20 +52,17 @@ #include "kstack.h" -static void sparc64_yield(int cpu) +/* Idle loop support on sparc64. */ +void arch_cpu_idle(void) { if (tlb_type != hypervisor) { touch_nmi_watchdog(); - return; - } - - clear_thread_flag(TIF_POLLING_NRFLAG); - smp_mb__after_clear_bit(); - - while (!need_resched() && !cpu_is_offline(cpu)) { + } else { unsigned long pstate; - /* Disable interrupts. */ + /* The sun4v sleeping code requires that we have PSTATE.IE cleared over + * the cpu sleep hypervisor call. + */ __asm__ __volatile__( "rdpr %%pstate, %0\n\t" "andn %0, %1, %0\n\t" @@ -73,7 +70,7 @@ static void sparc64_yield(int cpu) : "=&r" (pstate) : "i" (PSTATE_IE)); - if (!need_resched() && !cpu_is_offline(cpu)) + if (!need_resched() && !cpu_is_offline(smp_processor_id())) sun4v_cpu_yield(); /* Re-enable interrupts. */ @@ -84,36 +81,16 @@ static void sparc64_yield(int cpu) : "=&r" (pstate) : "i" (PSTATE_IE)); } - - set_thread_flag(TIF_POLLING_NRFLAG); + local_irq_enable(); } -/* The idle loop on sparc64. */ -void cpu_idle(void) -{ - int cpu = smp_processor_id(); - - set_thread_flag(TIF_POLLING_NRFLAG); - - while(1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - - while (!need_resched() && !cpu_is_offline(cpu)) - sparc64_yield(cpu); - - rcu_idle_exit(); - tick_nohz_idle_exit(); - #ifdef CONFIG_HOTPLUG_CPU - if (cpu_is_offline(cpu)) { - sched_preempt_enable_no_resched(); - cpu_play_dead(); - } -#endif - schedule_preempt_disabled(); - } +void arch_cpu_idle_dead() +{ + sched_preempt_enable_no_resched(); + cpu_play_dead(); } +#endif #ifdef CONFIG_COMPAT static void show_regwindow32(struct pt_regs *regs) @@ -186,6 +163,8 @@ static void show_regwindow(struct pt_regs *regs) void show_regs(struct pt_regs *regs) { + show_regs_print_info(KERN_DEFAULT); + printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, regs->tpc, regs->tnpc, regs->y, print_tainted()); printk("TPC: <%pS>\n", (void *) regs->tpc); @@ -315,7 +294,7 @@ static void sysrq_handle_globreg(int key) static struct sysrq_key_op sparc_globalreg_op = { .handler = sysrq_handle_globreg, - .help_msg = "global-regs(Y)", + .help_msg = "global-regs(y)", .action_msg = "Show Global CPU Regs", }; @@ -385,7 +364,7 @@ static void sysrq_handle_globpmu(int key) static struct sysrq_key_op sparc_globalpmu_op = { .handler = sysrq_handle_globpmu, - .help_msg = "global-pmu(X)", + .help_msg = "global-pmu(x)", .action_msg = "Show Global PMU Regs", }; diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 9e7e6d7..e3f2b81 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c @@ -369,7 +369,7 @@ void __cpuinit sparc_start_secondary(void *arg) local_irq_enable(); wmb(); - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); /* We should never reach here! */ BUG(); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 537eb66..77539ed 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -127,6 +127,8 @@ void __cpuinit smp_callin(void) /* idle thread is expected to have preempt disabled */ preempt_disable(); + + cpu_startup_entry(CPUHP_ONLINE); } void cpu_panic(void) @@ -849,7 +851,7 @@ void smp_tsb_sync(struct mm_struct *mm) } extern unsigned long xcall_flush_tlb_mm; -extern unsigned long xcall_flush_tlb_pending; +extern unsigned long xcall_flush_tlb_page; extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_fetch_glob_regs; extern unsigned long xcall_fetch_glob_pmu; @@ -1074,23 +1076,56 @@ local_flush_and_out: put_cpu(); } +struct tlb_pending_info { + unsigned long ctx; + unsigned long nr; + unsigned long *vaddrs; +}; + +static void tlb_pending_func(void *info) +{ + struct tlb_pending_info *t = info; + + __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); +} + void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) { u32 ctx = CTX_HWBITS(mm->context); + struct tlb_pending_info info; int cpu = get_cpu(); + info.ctx = ctx; + info.nr = nr; + info.vaddrs = vaddrs; + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); else - smp_cross_call_masked(&xcall_flush_tlb_pending, - ctx, nr, (unsigned long) vaddrs, - mm_cpumask(mm)); + smp_call_function_many(mm_cpumask(mm), tlb_pending_func, + &info, 1); __flush_tlb_pending(ctx, nr, vaddrs); put_cpu(); } +void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long context = CTX_HWBITS(mm->context); + int cpu = get_cpu(); + + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); + else + smp_cross_call_masked(&xcall_flush_tlb_page, + context, vaddr, 0, + mm_cpumask(mm)); + __flush_tlb_page(context, vaddr); + + put_cpu(); +} + void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) { start &= PAGE_MASK; diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S index da1b781..2e973a2 100644 --- a/arch/sparc/kernel/trampoline_64.S +++ b/arch/sparc/kernel/trampoline_64.S @@ -407,8 +407,7 @@ after_lock_tlb: call smp_callin nop - call cpu_idle - mov 0, %o0 + call cpu_panic nop 1: b,a,pt %xcc, 1b diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 8d38ca9..b3f833a 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2350,13 +2350,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) } while (++count < 16); } -void dump_stack(void) -{ - show_stack(current, NULL); -} - -EXPORT_SYMBOL(dump_stack); - static inline struct reg_window *kernel_stack_up(struct reg_window *rw) { unsigned long fp = rw->ins[6]; diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c deleted file mode 100644 index 489fc15..0000000 --- a/arch/sparc/kernel/us2e_cpufreq.c +++ /dev/null @@ -1,413 +0,0 @@ -/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support - * - * Copyright (C) 2003 David S. Miller (davem@redhat.com) - * - * Many thanks to Dominik Brodowski for fixing up the cpufreq - * infrastructure in order to make this driver easier to implement. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/cpufreq.h> -#include <linux/threads.h> -#include <linux/slab.h> -#include <linux/delay.h> -#include <linux/init.h> - -#include <asm/asi.h> -#include <asm/timer.h> - -static struct cpufreq_driver *cpufreq_us2e_driver; - -struct us2e_freq_percpu_info { - struct cpufreq_frequency_table table[6]; -}; - -/* Indexed by cpu number. */ -static struct us2e_freq_percpu_info *us2e_freq_table; - -#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL -#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL - -/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled - * in the ESTAR mode control register. - */ -#define ESTAR_MODE_DIV_1 0x0000000000000000UL -#define ESTAR_MODE_DIV_2 0x0000000000000001UL -#define ESTAR_MODE_DIV_4 0x0000000000000003UL -#define ESTAR_MODE_DIV_6 0x0000000000000002UL -#define ESTAR_MODE_DIV_8 0x0000000000000004UL -#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL - -#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL -#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL -#define MCTRL0_REFR_COUNT_SHIFT 8 -#define MCTRL0_REFR_INTERVAL 7800 -#define MCTRL0_REFR_CLKS_P_CNT 64 - -static unsigned long read_hbreg(unsigned long addr) -{ - unsigned long ret; - - __asm__ __volatile__("ldxa [%1] %2, %0" - : "=&r" (ret) - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); - return ret; -} - -static void write_hbreg(unsigned long addr, unsigned long val) -{ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) - : "memory"); - if (addr == HBIRD_ESTAR_MODE_ADDR) { - /* Need to wait 16 clock cycles for the PLL to lock. */ - udelay(1); - } -} - -static void self_refresh_ctl(int enable) -{ - unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); - - if (enable) - mctrl |= MCTRL0_SREFRESH_ENAB; - else - mctrl &= ~MCTRL0_SREFRESH_ENAB; - write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); - (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR); -} - -static void frob_mem_refresh(int cpu_slowing_down, - unsigned long clock_tick, - unsigned long old_divisor, unsigned long divisor) -{ - unsigned long old_refr_count, refr_count, mctrl; - - refr_count = (clock_tick * MCTRL0_REFR_INTERVAL); - refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL); - - mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); - old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK) - >> MCTRL0_REFR_COUNT_SHIFT; - - mctrl &= ~MCTRL0_REFR_COUNT_MASK; - mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT; - write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); - mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); - - if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) { - unsigned long usecs; - - /* We have to wait for both refresh counts (old - * and new) to go to zero. - */ - usecs = (MCTRL0_REFR_CLKS_P_CNT * - (refr_count + old_refr_count) * - 1000000UL * - old_divisor) / clock_tick; - udelay(usecs + 1UL); - } -} - -static void us2e_transition(unsigned long estar, unsigned long new_bits, - unsigned long clock_tick, - unsigned long old_divisor, unsigned long divisor) -{ - unsigned long flags; - - local_irq_save(flags); - - estar &= ~ESTAR_MODE_DIV_MASK; - - /* This is based upon the state transition diagram in the IIe manual. */ - if (old_divisor == 2 && divisor == 1) { - self_refresh_ctl(0); - write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); - frob_mem_refresh(0, clock_tick, old_divisor, divisor); - } else if (old_divisor == 1 && divisor == 2) { - frob_mem_refresh(1, clock_tick, old_divisor, divisor); - write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); - self_refresh_ctl(1); - } else if (old_divisor == 1 && divisor > 2) { - us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, - 1, 2); - us2e_transition(estar, new_bits, clock_tick, - 2, divisor); - } else if (old_divisor > 2 && divisor == 1) { - us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, - old_divisor, 2); - us2e_transition(estar, new_bits, clock_tick, - 2, divisor); - } else if (old_divisor < divisor) { - frob_mem_refresh(0, clock_tick, old_divisor, divisor); - write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); - } else if (old_divisor > divisor) { - write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); - frob_mem_refresh(1, clock_tick, old_divisor, divisor); - } else { - BUG(); - } - - local_irq_restore(flags); -} - -static unsigned long index_to_estar_mode(unsigned int index) -{ - switch (index) { - case 0: - return ESTAR_MODE_DIV_1; - - case 1: - return ESTAR_MODE_DIV_2; - - case 2: - return ESTAR_MODE_DIV_4; - - case 3: - return ESTAR_MODE_DIV_6; - - case 4: - return ESTAR_MODE_DIV_8; - - default: - BUG(); - } -} - -static unsigned long index_to_divisor(unsigned int index) -{ - switch (index) { - case 0: - return 1; - - case 1: - return 2; - - case 2: - return 4; - - case 3: - return 6; - - case 4: - return 8; - - default: - BUG(); - } -} - -static unsigned long estar_to_divisor(unsigned long estar) -{ - unsigned long ret; - - switch (estar & ESTAR_MODE_DIV_MASK) { - case ESTAR_MODE_DIV_1: - ret = 1; - break; - case ESTAR_MODE_DIV_2: - ret = 2; - break; - case ESTAR_MODE_DIV_4: - ret = 4; - break; - case ESTAR_MODE_DIV_6: - ret = 6; - break; - case ESTAR_MODE_DIV_8: - ret = 8; - break; - default: - BUG(); - } - - return ret; -} - -static unsigned int us2e_freq_get(unsigned int cpu) -{ - cpumask_t cpus_allowed; - unsigned long clock_tick, estar; - - if (!cpu_online(cpu)) - return 0; - - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - clock_tick = sparc64_get_clock_tick(cpu) / 1000; - estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); - - set_cpus_allowed_ptr(current, &cpus_allowed); - - return clock_tick / estar_to_divisor(estar); -} - -static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) -{ - unsigned long new_bits, new_freq; - unsigned long clock_tick, divisor, old_divisor, estar; - cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; - - if (!cpu_online(cpu)) - return; - - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; - new_bits = index_to_estar_mode(index); - divisor = index_to_divisor(index); - new_freq /= divisor; - - estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); - - old_divisor = estar_to_divisor(estar); - - freqs.old = clock_tick / old_divisor; - freqs.new = new_freq; - freqs.cpu = cpu; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - if (old_divisor != divisor) - us2e_transition(estar, new_bits, clock_tick * 1000, - old_divisor, divisor); - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - set_cpus_allowed_ptr(current, &cpus_allowed); -} - -static int us2e_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int new_index = 0; - - if (cpufreq_frequency_table_target(policy, - &us2e_freq_table[policy->cpu].table[0], - target_freq, relation, &new_index)) - return -EINVAL; - - us2e_set_cpu_divider_index(policy->cpu, new_index); - - return 0; -} - -static int us2e_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &us2e_freq_table[policy->cpu].table[0]); -} - -static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; - struct cpufreq_frequency_table *table = - &us2e_freq_table[cpu].table[0]; - - table[0].index = 0; - table[0].frequency = clock_tick / 1; - table[1].index = 1; - table[1].frequency = clock_tick / 2; - table[2].index = 2; - table[2].frequency = clock_tick / 4; - table[2].index = 3; - table[2].frequency = clock_tick / 6; - table[2].index = 4; - table[2].frequency = clock_tick / 8; - table[2].index = 5; - table[3].frequency = CPUFREQ_TABLE_END; - - policy->cpuinfo.transition_latency = 0; - policy->cur = clock_tick; - - return cpufreq_frequency_table_cpuinfo(policy, table); -} - -static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) -{ - if (cpufreq_us2e_driver) - us2e_set_cpu_divider_index(policy->cpu, 0); - - return 0; -} - -static int __init us2e_freq_init(void) -{ - unsigned long manuf, impl, ver; - int ret; - - if (tlb_type != spitfire) - return -ENODEV; - - __asm__("rdpr %%ver, %0" : "=r" (ver)); - manuf = ((ver >> 48) & 0xffff); - impl = ((ver >> 32) & 0xffff); - - if (manuf == 0x17 && impl == 0x13) { - struct cpufreq_driver *driver; - - ret = -ENOMEM; - driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); - if (!driver) - goto err_out; - - us2e_freq_table = kzalloc( - (NR_CPUS * sizeof(struct us2e_freq_percpu_info)), - GFP_KERNEL); - if (!us2e_freq_table) - goto err_out; - - driver->init = us2e_freq_cpu_init; - driver->verify = us2e_freq_verify; - driver->target = us2e_freq_target; - driver->get = us2e_freq_get; - driver->exit = us2e_freq_cpu_exit; - driver->owner = THIS_MODULE, - strcpy(driver->name, "UltraSPARC-IIe"); - - cpufreq_us2e_driver = driver; - ret = cpufreq_register_driver(driver); - if (ret) - goto err_out; - - return 0; - -err_out: - if (driver) { - kfree(driver); - cpufreq_us2e_driver = NULL; - } - kfree(us2e_freq_table); - us2e_freq_table = NULL; - return ret; - } - - return -ENODEV; -} - -static void __exit us2e_freq_exit(void) -{ - if (cpufreq_us2e_driver) { - cpufreq_unregister_driver(cpufreq_us2e_driver); - kfree(cpufreq_us2e_driver); - cpufreq_us2e_driver = NULL; - kfree(us2e_freq_table); - us2e_freq_table = NULL; - } -} - -MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); -MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe"); -MODULE_LICENSE("GPL"); - -module_init(us2e_freq_init); -module_exit(us2e_freq_exit); diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c deleted file mode 100644 index eb1624b..0000000 --- a/arch/sparc/kernel/us3_cpufreq.c +++ /dev/null @@ -1,274 +0,0 @@ -/* us3_cpufreq.c: UltraSPARC-III cpu frequency support - * - * Copyright (C) 2003 David S. Miller (davem@redhat.com) - * - * Many thanks to Dominik Brodowski for fixing up the cpufreq - * infrastructure in order to make this driver easier to implement. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/cpufreq.h> -#include <linux/threads.h> -#include <linux/slab.h> -#include <linux/init.h> - -#include <asm/head.h> -#include <asm/timer.h> - -static struct cpufreq_driver *cpufreq_us3_driver; - -struct us3_freq_percpu_info { - struct cpufreq_frequency_table table[4]; -}; - -/* Indexed by cpu number. */ -static struct us3_freq_percpu_info *us3_freq_table; - -/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled - * in the Safari config register. - */ -#define SAFARI_CFG_DIV_1 0x0000000000000000UL -#define SAFARI_CFG_DIV_2 0x0000000040000000UL -#define SAFARI_CFG_DIV_32 0x0000000080000000UL -#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL - -static unsigned long read_safari_cfg(void) -{ - unsigned long ret; - - __asm__ __volatile__("ldxa [%%g0] %1, %0" - : "=&r" (ret) - : "i" (ASI_SAFARI_CONFIG)); - return ret; -} - -static void write_safari_cfg(unsigned long val) -{ - __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (val), "i" (ASI_SAFARI_CONFIG) - : "memory"); -} - -static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg) -{ - unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; - unsigned long ret; - - switch (safari_cfg & SAFARI_CFG_DIV_MASK) { - case SAFARI_CFG_DIV_1: - ret = clock_tick / 1; - break; - case SAFARI_CFG_DIV_2: - ret = clock_tick / 2; - break; - case SAFARI_CFG_DIV_32: - ret = clock_tick / 32; - break; - default: - BUG(); - } - - return ret; -} - -static unsigned int us3_freq_get(unsigned int cpu) -{ - cpumask_t cpus_allowed; - unsigned long reg; - unsigned int ret; - - if (!cpu_online(cpu)) - return 0; - - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - reg = read_safari_cfg(); - ret = get_current_freq(cpu, reg); - - set_cpus_allowed_ptr(current, &cpus_allowed); - - return ret; -} - -static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) -{ - unsigned long new_bits, new_freq, reg; - cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; - - if (!cpu_online(cpu)) - return; - - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - new_freq = sparc64_get_clock_tick(cpu) / 1000; - switch (index) { - case 0: - new_bits = SAFARI_CFG_DIV_1; - new_freq /= 1; - break; - case 1: - new_bits = SAFARI_CFG_DIV_2; - new_freq /= 2; - break; - case 2: - new_bits = SAFARI_CFG_DIV_32; - new_freq /= 32; - break; - - default: - BUG(); - } - - reg = read_safari_cfg(); - - freqs.old = get_current_freq(cpu, reg); - freqs.new = new_freq; - freqs.cpu = cpu; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - reg &= ~SAFARI_CFG_DIV_MASK; - reg |= new_bits; - write_safari_cfg(reg); - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - set_cpus_allowed_ptr(current, &cpus_allowed); -} - -static int us3_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int new_index = 0; - - if (cpufreq_frequency_table_target(policy, - &us3_freq_table[policy->cpu].table[0], - target_freq, - relation, - &new_index)) - return -EINVAL; - - us3_set_cpu_divider_index(policy->cpu, new_index); - - return 0; -} - -static int us3_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &us3_freq_table[policy->cpu].table[0]); -} - -static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; - struct cpufreq_frequency_table *table = - &us3_freq_table[cpu].table[0]; - - table[0].index = 0; - table[0].frequency = clock_tick / 1; - table[1].index = 1; - table[1].frequency = clock_tick / 2; - table[2].index = 2; - table[2].frequency = clock_tick / 32; - table[3].index = 0; - table[3].frequency = CPUFREQ_TABLE_END; - - policy->cpuinfo.transition_latency = 0; - policy->cur = clock_tick; - - return cpufreq_frequency_table_cpuinfo(policy, table); -} - -static int us3_freq_cpu_exit(struct cpufreq_policy *policy) -{ - if (cpufreq_us3_driver) - us3_set_cpu_divider_index(policy->cpu, 0); - - return 0; -} - -static int __init us3_freq_init(void) -{ - unsigned long manuf, impl, ver; - int ret; - - if (tlb_type != cheetah && tlb_type != cheetah_plus) - return -ENODEV; - - __asm__("rdpr %%ver, %0" : "=r" (ver)); - manuf = ((ver >> 48) & 0xffff); - impl = ((ver >> 32) & 0xffff); - - if (manuf == CHEETAH_MANUF && - (impl == CHEETAH_IMPL || - impl == CHEETAH_PLUS_IMPL || - impl == JAGUAR_IMPL || - impl == PANTHER_IMPL)) { - struct cpufreq_driver *driver; - - ret = -ENOMEM; - driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); - if (!driver) - goto err_out; - - us3_freq_table = kzalloc( - (NR_CPUS * sizeof(struct us3_freq_percpu_info)), - GFP_KERNEL); - if (!us3_freq_table) - goto err_out; - - driver->init = us3_freq_cpu_init; - driver->verify = us3_freq_verify; - driver->target = us3_freq_target; - driver->get = us3_freq_get; - driver->exit = us3_freq_cpu_exit; - driver->owner = THIS_MODULE, - strcpy(driver->name, "UltraSPARC-III"); - - cpufreq_us3_driver = driver; - ret = cpufreq_register_driver(driver); - if (ret) - goto err_out; - - return 0; - -err_out: - if (driver) { - kfree(driver); - cpufreq_us3_driver = NULL; - } - kfree(us3_freq_table); - us3_freq_table = NULL; - return ret; - } - - return -ENODEV; -} - -static void __exit us3_freq_exit(void) -{ - if (cpufreq_us3_driver) { - cpufreq_unregister_driver(cpufreq_us3_driver); - kfree(cpufreq_us3_driver); - cpufreq_us3_driver = NULL; - kfree(us3_freq_table); - us3_freq_table = NULL; - } -} - -MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); -MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III"); -MODULE_LICENSE("GPL"); - -module_init(us3_freq_init); -module_exit(us3_freq_exit); diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 8410065f2..dbe119b 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -45,4 +45,3 @@ obj-y += iomap.o obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o obj-y += ksyms.o obj-$(CONFIG_SPARC64) += PeeCeeI.o -obj-y += usercopy.o diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c index 48d00e7..8ec4e9c 100644 --- a/arch/sparc/lib/bitext.c +++ b/arch/sparc/lib/bitext.c @@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len) void bit_map_init(struct bit_map *t, unsigned long *map, int size) { - - if ((size & 07) != 0) - BUG(); - memset(map, 0, size>>3); - + bitmap_zero(map, size); memset(t, 0, sizeof *t); spin_lock_init(&t->lock); t->map = map; diff --git a/arch/sparc/lib/usercopy.c b/arch/sparc/lib/usercopy.c deleted file mode 100644 index 5c4284c..0000000 --- a/arch/sparc/lib/usercopy.c +++ /dev/null @@ -1,9 +0,0 @@ -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/bug.h> - -void copy_from_user_overflow(void) -{ - WARN(1, "Buffer overflow detected!\n"); -} -EXPORT_SYMBOL(copy_from_user_overflow); diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 48e0c03..4490c39 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -282,14 +282,8 @@ static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); #endif - for (tmp = start_pfn; tmp < end_pfn; tmp++) { - struct page *page = pfn_to_page(tmp); - - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalhigh_pages++; - } + for (tmp = start_pfn; tmp < end_pfn; tmp++) + free_highmem_page(pfn_to_page(tmp)); } void __init mem_init(void) @@ -347,8 +341,6 @@ void __init mem_init(void) map_high_region(start_pfn, end_pfn); } - totalram_pages += totalhigh_pages; - codepages = (((unsigned long) &_etext) - ((unsigned long)&_start)); codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; datapages = (((unsigned long) &_edata) - ((unsigned long)&_etext)); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 1588d33..6ac99d6 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2181,10 +2181,9 @@ unsigned long vmemmap_table[VMEMMAP_SIZE]; static long __meminitdata addr_start, addr_end; static int __meminitdata node_start; -int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) +int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, + int node) { - unsigned long vstart = (unsigned long) start; - unsigned long vend = (unsigned long) (start + nr); unsigned long phys_start = (vstart - VMEMMAP_BASE); unsigned long phys_end = (vend - VMEMMAP_BASE); unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; @@ -2236,7 +2235,7 @@ void __meminit vmemmap_populate_print_last(void) } } -void vmemmap_free(struct page *memmap, unsigned long nr_pages) +void vmemmap_free(unsigned long start, unsigned long end) { } diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 0f4f719..28f96f2 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -34,7 +34,7 @@ #define IOMMU_RNGE IOMMU_RNGE_256MB #define IOMMU_START 0xF0000000 #define IOMMU_WINSIZE (256*1024*1024U) -#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */ +#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */ #define IOMMU_ORDER 6 /* 4096 * (1<<6) */ /* srmmu.c */ diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index c38bb72..036c279 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void) SRMMU_NOCACHE_ALIGN_MAX, 0UL); memset(srmmu_nocache_pool, 0, srmmu_nocache_size); - srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); + srmmu_nocache_bitmap = + __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long), + SMP_CACHE_BYTES, 0UL); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index ba6ae7f..83d89bc 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); void flush_tlb_pending(void) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); + struct mm_struct *mm = tb->mm; - if (tb->tlb_nr) { - flush_tsb_user(tb); + if (!tb->tlb_nr) + goto out; - if (CTX_VALID(tb->mm->context)) { + flush_tsb_user(tb); + + if (CTX_VALID(mm->context)) { + if (tb->tlb_nr == 1) { + global_flush_tlb_page(mm, tb->vaddrs[0]); + } else { #ifdef CONFIG_SMP smp_flush_tlb_pending(tb->mm, tb->tlb_nr, &tb->vaddrs[0]); @@ -37,12 +43,30 @@ void flush_tlb_pending(void) tb->tlb_nr, &tb->vaddrs[0]); #endif } - tb->tlb_nr = 0; } + tb->tlb_nr = 0; + +out: put_cpu_var(tlb_batch); } +void arch_enter_lazy_mmu_mode(void) +{ + struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + + tb->active = 1; +} + +void arch_leave_lazy_mmu_mode(void) +{ + struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + + if (tb->tlb_nr) + flush_tlb_pending(); + tb->active = 0; +} + static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, bool exec) { @@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, nr = 0; } + if (!tb->active) { + global_flush_tlb_page(mm, vaddr); + flush_tsb_user_page(mm, vaddr); + goto out; + } + if (nr == 0) tb->mm = mm; @@ -68,6 +98,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, if (nr >= TLB_BATCH_NR) flush_tlb_pending(); +out: put_cpu_var(tlb_batch); } diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 428982b..2cc3bce 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -7,11 +7,10 @@ #include <linux/preempt.h> #include <linux/slab.h> #include <asm/page.h> -#include <asm/tlbflush.h> -#include <asm/tlb.h> -#include <asm/mmu_context.h> #include <asm/pgtable.h> +#include <asm/mmu_context.h> #include <asm/tsb.h> +#include <asm/tlb.h> #include <asm/oplib.h> extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; @@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) } } -static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, - unsigned long tsb, unsigned long nentries) +static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v, + unsigned long hash_shift, + unsigned long nentries) { - unsigned long i; + unsigned long tag, ent, hash; - for (i = 0; i < tb->tlb_nr; i++) { - unsigned long v = tb->vaddrs[i]; - unsigned long tag, ent, hash; + v &= ~0x1UL; + hash = tsb_hash(v, hash_shift, nentries); + ent = tsb + (hash * sizeof(struct tsb)); + tag = (v >> 22UL); - v &= ~0x1UL; + tsb_flush(ent, tag); +} - hash = tsb_hash(v, hash_shift, nentries); - ent = tsb + (hash * sizeof(struct tsb)); - tag = (v >> 22UL); +static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, + unsigned long tsb, unsigned long nentries) +{ + unsigned long i; - tsb_flush(ent, tag); - } + for (i = 0; i < tb->tlb_nr; i++) + __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); } void flush_tsb_user(struct tlb_batch *tb) @@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb) spin_unlock_irqrestore(&mm->context.lock, flags); } +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long nentries, base, flags; + + spin_lock_irqsave(&mm->context.lock, flags); + + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); + +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) + if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { + base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; + nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); + } +#endif + spin_unlock_irqrestore(&mm->context.lock, flags); +} + #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index f8e13d4..432aa0c 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -53,6 +53,33 @@ __flush_tlb_mm: /* 18 insns */ nop .align 32 + .globl __flush_tlb_page +__flush_tlb_page: /* 22 insns */ + /* %o0 = context, %o1 = vaddr */ + rdpr %pstate, %g7 + andn %g7, PSTATE_IE, %g2 + wrpr %g2, %pstate + mov SECONDARY_CONTEXT, %o4 + ldxa [%o4] ASI_DMMU, %g2 + stxa %o0, [%o4] ASI_DMMU + andcc %o1, 1, %g0 + andn %o1, 1, %o3 + be,pn %icc, 1f + or %o3, 0x10, %o3 + stxa %g0, [%o3] ASI_IMMU_DEMAP +1: stxa %g0, [%o3] ASI_DMMU_DEMAP + membar #Sync + stxa %g2, [%o4] ASI_DMMU + sethi %hi(KERNBASE), %o4 + flush %o4 + retl + wrpr %g7, 0x0, %pstate + nop + nop + nop + nop + + .align 32 .globl __flush_tlb_pending __flush_tlb_pending: /* 26 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ @@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */ retl wrpr %g7, 0x0, %pstate +__cheetah_flush_tlb_page: /* 22 insns */ + /* %o0 = context, %o1 = vaddr */ + rdpr %pstate, %g7 + andn %g7, PSTATE_IE, %g2 + wrpr %g2, 0x0, %pstate + wrpr %g0, 1, %tl + mov PRIMARY_CONTEXT, %o4 + ldxa [%o4] ASI_DMMU, %g2 + srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 + sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 + or %o0, %o3, %o0 /* Preserve nucleus page size fields */ + stxa %o0, [%o4] ASI_DMMU + andcc %o1, 1, %g0 + be,pn %icc, 1f + andn %o1, 1, %o3 + stxa %g0, [%o3] ASI_IMMU_DEMAP +1: stxa %g0, [%o3] ASI_DMMU_DEMAP + membar #Sync + stxa %g2, [%o4] ASI_DMMU + sethi %hi(KERNBASE), %o4 + flush %o4 + wrpr %g0, 0, %tl + retl + wrpr %g7, 0x0, %pstate + __cheetah_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 @@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */ retl nop +__hypervisor_flush_tlb_page: /* 11 insns */ + /* %o0 = context, %o1 = vaddr */ + mov %o0, %g2 + mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ + mov %g2, %o1 /* ARG1: mmu context */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + srlx %o0, PAGE_SHIFT, %o0 + sllx %o0, PAGE_SHIFT, %o0 + ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_MMU_UNMAP_ADDR_TRAP, %o1 + retl + nop + __hypervisor_flush_tlb_pending: /* 16 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ sllx %o1, 3, %g1 @@ -339,6 +405,13 @@ cheetah_patch_cachetlbops: call tlb_patch_one mov 19, %o2 + sethi %hi(__flush_tlb_page), %o0 + or %o0, %lo(__flush_tlb_page), %o0 + sethi %hi(__cheetah_flush_tlb_page), %o1 + or %o1, %lo(__cheetah_flush_tlb_page), %o1 + call tlb_patch_one + mov 22, %o2 + sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__cheetah_flush_tlb_pending), %o1 @@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */ nop nop - .globl xcall_flush_tlb_pending -xcall_flush_tlb_pending: /* 21 insns */ - /* %g5=context, %g1=nr, %g7=vaddrs[] */ - sllx %g1, 3, %g1 + .globl xcall_flush_tlb_page +xcall_flush_tlb_page: /* 17 insns */ + /* %g5=context, %g1=vaddr */ mov PRIMARY_CONTEXT, %g4 ldxa [%g4] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 @@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */ or %g5, %g4, %g5 mov PRIMARY_CONTEXT, %g4 stxa %g5, [%g4] ASI_DMMU -1: sub %g1, (1 << 3), %g1 - ldx [%g7 + %g1], %g5 - andcc %g5, 0x1, %g0 + andcc %g1, 0x1, %g0 be,pn %icc, 2f - - andn %g5, 0x1, %g5 + andn %g1, 0x1, %g5 stxa %g0, [%g5] ASI_IMMU_DEMAP 2: stxa %g0, [%g5] ASI_DMMU_DEMAP membar #Sync - brnz,pt %g1, 1b - nop stxa %g2, [%g4] ASI_DMMU retry nop + nop .globl xcall_flush_tlb_kernel_range xcall_flush_tlb_kernel_range: /* 25 insns */ @@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ membar #Sync retry - .globl __hypervisor_xcall_flush_tlb_pending -__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ - /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ - sllx %g1, 3, %g1 + .globl __hypervisor_xcall_flush_tlb_page +__hypervisor_xcall_flush_tlb_page: /* 17 insns */ + /* %g5=ctx, %g1=vaddr */ mov %o0, %g2 mov %o1, %g3 mov %o2, %g4 -1: sub %g1, (1 << 3), %g1 - ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ + mov %g1, %o0 /* ARG0: virtual address */ mov %g5, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ srlx %o0, PAGE_SHIFT, %o0 @@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */ mov HV_MMU_UNMAP_ADDR_TRAP, %g6 brnz,a,pn %o0, __hypervisor_tlb_xcall_error mov %o0, %g5 - brnz,pt %g1, 1b - nop mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 @@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops: call tlb_patch_one mov 10, %o2 + sethi %hi(__flush_tlb_page), %o0 + or %o0, %lo(__flush_tlb_page), %o0 + sethi %hi(__hypervisor_flush_tlb_page), %o1 + or %o1, %lo(__hypervisor_flush_tlb_page), %o1 + call tlb_patch_one + mov 11, %o2 + sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__hypervisor_flush_tlb_pending), %o1 @@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops: call tlb_patch_one mov 21, %o2 - sethi %hi(xcall_flush_tlb_pending), %o0 - or %o0, %lo(xcall_flush_tlb_pending), %o0 - sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 - or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 + sethi %hi(xcall_flush_tlb_page), %o0 + or %o0, %lo(xcall_flush_tlb_page), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 call tlb_patch_one - mov 21, %o2 + mov 17, %o2 sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 95bd2ef..0e53439 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -16,8 +16,9 @@ config TILE select GENERIC_PENDING_IRQ if SMP select GENERIC_IRQ_SHOW select HAVE_DEBUG_BUGVERBOSE - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select SYS_HYPERVISOR + select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_CLOCKEVENTS select MODULES_USE_ELF_RELA @@ -113,13 +114,6 @@ config STRICT_DEVMEM config SMP def_bool y -# Allow checking for compile-time determined overflow errors in -# copy_from_user(). There are still unprovable places in the -# generic code as of 2.6.34, so this option is not really compatible -# with -Werror, which is more useful in general. -config DEBUG_COPY_FROM_USER - def_bool n - config HVC_TILE depends on TTY select HVC_DRIVER diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig index 8c5eff6..4768481 100644 --- a/arch/tile/configs/tilegx_defconfig +++ b/arch/tile/configs/tilegx_defconfig @@ -330,7 +330,6 @@ CONFIG_MD_RAID0=m CONFIG_MD_RAID1=m CONFIG_MD_RAID10=m CONFIG_MD_RAID456=m -CONFIG_MULTICORE_RAID456=y CONFIG_MD_FAULTY=m CONFIG_BLK_DEV_DM=m CONFIG_DM_DEBUG=y diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig index e7a3dfc..dd2b8f0 100644 --- a/arch/tile/configs/tilepro_defconfig +++ b/arch/tile/configs/tilepro_defconfig @@ -324,7 +324,6 @@ CONFIG_MD_RAID0=m CONFIG_MD_RAID1=m CONFIG_MD_RAID10=m CONFIG_MD_RAID456=m -CONFIG_MULTICORE_RAID456=y CONFIG_MD_FAULTY=m CONFIG_BLK_DEV_DM=m CONFIG_DM_DEBUG=y diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index 001d418..78f1f2d 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h @@ -288,6 +288,9 @@ long compat_sys_sync_file_range2(int fd, unsigned int flags, long compat_sys_fallocate(int fd, int mode, u32 offset_lo, u32 offset_hi, u32 len_lo, u32 len_hi); +long compat_sys_llseek(unsigned int fd, unsigned int offset_high, + unsigned int offset_low, loff_t __user * result, + unsigned int origin); /* Assembly trampoline to avoid clobbering r0. */ long _compat_sys_rt_sigreturn(void); diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h index 0f885af..3257733 100644 --- a/arch/tile/include/asm/hugetlb.h +++ b/arch/tile/include/asm/hugetlb.h @@ -16,6 +16,7 @@ #define _ASM_TILE_HUGETLB_H #include <asm/page.h> +#include <asm-generic/hugetlb.h> static inline int is_hugepage_only_range(struct mm_struct *mm, diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 241c0bb..c96f9bb 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h @@ -40,7 +40,15 @@ #include <asm/percpu.h> #include <arch/spr_def.h> -/* Set and clear kernel interrupt masks. */ +/* + * Set and clear kernel interrupt masks. + * + * NOTE: __insn_mtspr() is a compiler builtin marked as a memory + * clobber. We rely on it being equivalent to a compiler barrier in + * this code since arch_local_irq_save() and friends must act as + * compiler barriers. This compiler semantic is baked into enough + * places that the compiler will maintain it going forward. + */ #if CHIP_HAS_SPLIT_INTR_MASK() #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 # error Fix assumptions about which word various interrupts are in diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index e9c670d..ccc8ef3 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -153,8 +153,6 @@ extern void _cpu_idle(void); #define TS_POLLING 0x0004 /* in idle loop but not sleeping */ #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */ -#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) - #ifndef __ASSEMBLY__ #define HAVE_SET_RESTORE_SIGMASK 1 static inline void set_restore_sigmask(void) diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h index 9ab078a..8a082bc 100644 --- a/arch/tile/include/asm/uaccess.h +++ b/arch/tile/include/asm/uaccess.h @@ -395,7 +395,12 @@ _copy_from_user(void *to, const void __user *from, unsigned long n) return n; } -#ifdef CONFIG_DEBUG_COPY_FROM_USER +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS +/* + * There are still unprovable places in the generic code as of 2.6.34, so this + * option is not really compatible with -Werror, which is more useful in + * general. + */ extern void copy_from_user_overflow(void) __compiletime_warning("copy_from_user() size is not provably correct"); diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c index c262a02..ed37841 100644 --- a/arch/tile/kernel/compat.c +++ b/arch/tile/kernel/compat.c @@ -32,45 +32,59 @@ * adapt the usual convention. */ -long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high) +COMPAT_SYSCALL_DEFINE4(truncate64, char __user *, filename, u32, dummy, + u32, low, u32, high) { return sys_truncate(filename, ((loff_t)high << 32) | low); } -long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high) +COMPAT_SYSCALL_DEFINE4(ftruncate64, unsigned int, fd, u32, dummy, + u32, low, u32, high) { return sys_ftruncate(fd, ((loff_t)high << 32) | low); } -long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count, - u32 dummy, u32 low, u32 high) +COMPAT_SYSCALL_DEFINE6(pread64, unsigned int, fd, char __user *, ubuf, + size_t, count, u32, dummy, u32, low, u32, high) { return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low); } -long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count, - u32 dummy, u32 low, u32 high) +COMPAT_SYSCALL_DEFINE6(pwrite64, unsigned int, fd, char __user *, ubuf, + size_t, count, u32, dummy, u32, low, u32, high) { return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low); } -long compat_sys_sync_file_range2(int fd, unsigned int flags, - u32 offset_lo, u32 offset_hi, - u32 nbytes_lo, u32 nbytes_hi) +COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags, + u32, offset_lo, u32, offset_hi, + u32, nbytes_lo, u32, nbytes_hi) { return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo, ((loff_t)nbytes_hi << 32) | nbytes_lo, flags); } -long compat_sys_fallocate(int fd, int mode, - u32 offset_lo, u32 offset_hi, - u32 len_lo, u32 len_hi) +COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, + u32, offset_lo, u32, offset_hi, + u32, len_lo, u32, len_hi) { return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo, ((loff_t)len_hi << 32) | len_lo); } +/* + * Avoid bug in generic sys_llseek() that specifies offset_high and + * offset_low as "unsigned long", thus making it possible to pass + * a sign-extended high 32 bits in offset_low. + */ +COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high, + unsigned int, offset_low, loff_t __user *, result, + unsigned int, origin) +{ + return sys_llseek(fd, offset_high, offset_low, result, origin); +} + /* Provide the compat syscall number to call mapping. */ #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), @@ -78,6 +92,7 @@ long compat_sys_fallocate(int fd, int mode, /* See comments in sys.c */ #define compat_sys_fadvise64_64 sys32_fadvise64_64 #define compat_sys_readahead sys32_readahead +#define sys_llseek compat_sys_llseek /* Call the assembly trampolines where necessary. */ #define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c index afb9c9a..34d72a1 100644 --- a/arch/tile/kernel/early_printk.c +++ b/arch/tile/kernel/early_printk.c @@ -17,6 +17,7 @@ #include <linux/init.h> #include <linux/string.h> #include <linux/irqflags.h> +#include <linux/printk.h> #include <asm/setup.h> #include <hv/hypervisor.h> @@ -33,25 +34,8 @@ static struct console early_hv_console = { }; /* Direct interface for emergencies */ -static struct console *early_console = &early_hv_console; -static int early_console_initialized; static int early_console_complete; -static void early_vprintk(const char *fmt, va_list ap) -{ - char buf[512]; - int n = vscnprintf(buf, sizeof(buf), fmt, ap); - early_console->write(early_console, buf, n); -} - -void early_printk(const char *fmt, ...) -{ - va_list ap; - va_start(ap, fmt); - early_vprintk(fmt, ap); - va_end(ap); -} - void early_panic(const char *fmt, ...) { va_list ap; @@ -69,14 +53,13 @@ static int __initdata keep_early; static int __init setup_early_printk(char *str) { - if (early_console_initialized) + if (early_console) return 1; if (str != NULL && strncmp(str, "keep", 4) == 0) keep_early = 1; early_console = &early_hv_console; - early_console_initialized = 1; register_console(early_console); return 0; @@ -85,12 +68,12 @@ static int __init setup_early_printk(char *str) void __init disable_early_printk(void) { early_console_complete = 1; - if (!early_console_initialized || !early_console) + if (!early_console) return; if (!keep_early) { early_printk("disabling early console\n"); unregister_console(early_console); - early_console_initialized = 0; + early_console = NULL; } else { early_printk("keeping early console\n"); } @@ -98,7 +81,7 @@ void __init disable_early_printk(void) void warn_early_printk(void) { - if (early_console_complete || early_console_initialized) + if (early_console_complete || early_console) return; early_printk("\ Machine shutting down before console output is fully initialized.\n\ diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index caf93ae..8ac3044 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -40,13 +40,11 @@ #include <arch/abi.h> #include <arch/sim_def.h> - /* * Use the (x86) "idle=poll" option to prefer low latency when leaving the * idle loop over low power while in the idle loop, e.g. if we have * one thread per core and we want to get threads out of futex waits fast. */ -static int no_idle_nap; static int __init idle_setup(char *str) { if (!str) @@ -54,64 +52,19 @@ static int __init idle_setup(char *str) if (!strcmp(str, "poll")) { pr_info("using polling idle threads.\n"); - no_idle_nap = 1; - } else if (!strcmp(str, "halt")) - no_idle_nap = 0; - else - return -1; - - return 0; + cpu_idle_poll_ctrl(true); + return 0; + } else if (!strcmp(str, "halt")) { + return 0; + } + return -1; } early_param("idle", idle_setup); -/* - * The idle thread. There's no useful work to be - * done, so just try to conserve power and have a - * low exit latency (ie sit in a loop waiting for - * somebody to say that they'd like to reschedule) - */ -void cpu_idle(void) +void arch_cpu_idle(void) { - int cpu = smp_processor_id(); - - - current_thread_info()->status |= TS_POLLING; - - if (no_idle_nap) { - while (1) { - while (!need_resched()) - cpu_relax(); - schedule(); - } - } - - /* endless idle loop with no priority at all */ - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - while (!need_resched()) { - if (cpu_is_offline(cpu)) - BUG(); /* no HOTPLUG_CPU */ - - local_irq_disable(); - __get_cpu_var(irq_stat).idle_timestamp = jiffies; - current_thread_info()->status &= ~TS_POLLING; - /* - * TS_POLLING-cleared state must be visible before we - * test NEED_RESCHED: - */ - smp_mb(); - - if (!need_resched()) - _cpu_idle(); - else - local_irq_enable(); - current_thread_info()->status |= TS_POLLING; - } - rcu_idle_exit(); - tick_nohz_idle_exit(); - schedule_preempt_disabled(); - } + __get_cpu_var(irq_stat).idle_timestamp = jiffies; + _cpu_idle(); } /* @@ -620,8 +573,7 @@ void show_regs(struct pt_regs *regs) int i; pr_err("\n"); - pr_err(" Pid: %d, comm: %20s, CPU: %d\n", - tsk->pid, tsk->comm, smp_processor_id()); + show_regs_print_info(KERN_ERR); #ifdef __tilegx__ for (i = 0; i < 51; i += 3) pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n", diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index d1e15f7..7a5aa1a 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot) #ifdef CONFIG_BLK_DEV_INITRD -/* - * Note that the kernel can potentially support other compression - * techniques than gz, though we don't do so by default. If we ever - * decide to do so we can either look for other filename extensions, - * or just allow a file with this name to be compressed with an - * arbitrary compressor (somewhat counterintuitively). - */ static int __initdata set_initramfs_file; -static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; +static char __initdata initramfs_file[128] = "initramfs"; static int __init setup_initramfs_file(char *str) { @@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str) early_param("initramfs_file", setup_initramfs_file); /* - * We look for an "initramfs.cpio.gz" file in the hvfs. - * If there is one, we allocate some memory for it and it will be - * unpacked to the initramfs. + * We look for a file called "initramfs" in the hvfs. If there is one, we + * allocate some memory for it and it will be unpacked to the initramfs. + * If it's compressed, the initd code will uncompress it first. */ static void __init load_hv_initrd(void) { @@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void) fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); if (fd == HV_ENOENT) { - if (set_initramfs_file) + if (set_initramfs_file) { pr_warning("No such hvfs initramfs file '%s'\n", initramfs_file); - return; + return; + } else { + /* Try old backwards-compatible name. */ + fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz"); + if (fd == HV_ENOENT) + return; + } } BUG_ON(fd < 0); stat = hv_fs_fstat(fd); diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c index e686c5a..44bab29 100644 --- a/arch/tile/kernel/smpboot.c +++ b/arch/tile/kernel/smpboot.c @@ -207,9 +207,7 @@ void __cpuinit online_secondary(void) /* Set up tile-timer clock-event device on this cpu */ setup_tile_timer(); - preempt_enable(); - - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) diff --git a/arch/tile/lib/uaccess.c b/arch/tile/lib/uaccess.c index f8d398c..030abe3 100644 --- a/arch/tile/lib/uaccess.c +++ b/arch/tile/lib/uaccess.c @@ -22,11 +22,3 @@ int __range_ok(unsigned long addr, unsigned long size) is_arch_mappable_range(addr, size)); } EXPORT_SYMBOL(__range_ok); - -#ifdef CONFIG_DEBUG_COPY_FROM_USER -void copy_from_user_overflow(void) -{ - WARN(1, "Buffer overflow detected!\n"); -} -EXPORT_SYMBOL(copy_from_user_overflow); -#endif diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index b3b4972..dfd63ce 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c @@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in) in parallel. Reuse of the virtual address is prevented by leaving it in the global lists until we're done with it. cpa takes care of the direct mappings. */ - read_lock(&vmlist_lock); - for (p = vmlist; p; p = p->next) { - if (p->addr == addr) - break; - } - read_unlock(&vmlist_lock); + p = find_vm_area((void *)addr); if (!p) { pr_err("iounmap: bad address %p\n", addr); diff --git a/arch/um/drivers/chan.h b/arch/um/drivers/chan.h index 78f1b89..c512b03 100644 --- a/arch/um/drivers/chan.h +++ b/arch/um/drivers/chan.h @@ -37,7 +37,7 @@ extern int console_write_chan(struct chan *chan, const char *buf, extern int console_open_chan(struct line *line, struct console *co); extern void deactivate_chan(struct chan *chan, int irq); extern void reactivate_chan(struct chan *chan, int irq); -extern void chan_enable_winch(struct chan *chan, struct tty_struct *tty); +extern void chan_enable_winch(struct chan *chan, struct tty_port *port); extern int enable_chan(struct line *line); extern void close_chan(struct line *line); extern int chan_window_size(struct line *line, diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c index 15c553c..acbe6c6 100644 --- a/arch/um/drivers/chan_kern.c +++ b/arch/um/drivers/chan_kern.c @@ -122,10 +122,10 @@ static int open_chan(struct list_head *chans) return err; } -void chan_enable_winch(struct chan *chan, struct tty_struct *tty) +void chan_enable_winch(struct chan *chan, struct tty_port *port) { if (chan && chan->primary && chan->ops->winch) - register_winch(chan->fd, tty); + register_winch(chan->fd, port); } static void line_timer_cb(struct work_struct *work) @@ -568,11 +568,7 @@ void chan_interrupt(struct line *line, int irq) reactivate_fd(chan->fd, irq); if (err == -EIO) { if (chan->primary) { - struct tty_struct *tty = tty_port_tty_get(&line->port); - if (tty != NULL) { - tty_hangup(tty); - tty_kref_put(tty); - } + tty_port_tty_hangup(&line->port, false); if (line->chan_out != chan) close_one_chan(line->chan_out, 1); } diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c index 9be670a..3fd7c3e 100644 --- a/arch/um/drivers/chan_user.c +++ b/arch/um/drivers/chan_user.c @@ -216,7 +216,7 @@ static int winch_thread(void *arg) } } -static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out, +static int winch_tramp(int fd, struct tty_port *port, int *fd_out, unsigned long *stack_out) { struct winch_data data; @@ -271,7 +271,7 @@ static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out, return err; } -void register_winch(int fd, struct tty_struct *tty) +void register_winch(int fd, struct tty_port *port) { unsigned long stack; int pid, thread, count, thread_fd = -1; @@ -281,17 +281,17 @@ void register_winch(int fd, struct tty_struct *tty) return; pid = tcgetpgrp(fd); - if (is_skas_winch(pid, fd, tty)) { - register_winch_irq(-1, fd, -1, tty, 0); + if (is_skas_winch(pid, fd, port)) { + register_winch_irq(-1, fd, -1, port, 0); return; } if (pid == -1) { - thread = winch_tramp(fd, tty, &thread_fd, &stack); + thread = winch_tramp(fd, port, &thread_fd, &stack); if (thread < 0) return; - register_winch_irq(thread_fd, fd, thread, tty, stack); + register_winch_irq(thread_fd, fd, thread, port, stack); count = write(thread_fd, &c, sizeof(c)); if (count != sizeof(c)) diff --git a/arch/um/drivers/chan_user.h b/arch/um/drivers/chan_user.h index dc693298..03f1b56 100644 --- a/arch/um/drivers/chan_user.h +++ b/arch/um/drivers/chan_user.h @@ -38,10 +38,10 @@ extern int generic_window_size(int fd, void *unused, unsigned short *rows_out, unsigned short *cols_out); extern void generic_free(void *data); -struct tty_struct; -extern void register_winch(int fd, struct tty_struct *tty); +struct tty_port; +extern void register_winch(int fd, struct tty_port *port); extern void register_winch_irq(int fd, int tty_fd, int pid, - struct tty_struct *tty, unsigned long stack); + struct tty_port *port, unsigned long stack); #define __channel_help(fn, prefix) \ __uml_help(fn, prefix "[0-9]*=<channel description>\n" \ diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index f1b3857..8035145 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -248,7 +248,6 @@ static irqreturn_t line_write_interrupt(int irq, void *data) { struct chan *chan = data; struct line *line = chan->line; - struct tty_struct *tty; int err; /* @@ -267,12 +266,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data) } spin_unlock(&line->lock); - tty = tty_port_tty_get(&line->port); - if (tty == NULL) - return IRQ_NONE; - - tty_wakeup(tty); - tty_kref_put(tty); + tty_port_tty_wakeup(&line->port); return IRQ_HANDLED; } @@ -305,7 +299,7 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty) return ret; if (!line->sigio) { - chan_enable_winch(line->chan_out, tty); + chan_enable_winch(line->chan_out, port); line->sigio = 1; } @@ -315,8 +309,22 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty) return 0; } +static void unregister_winch(struct tty_struct *tty); + +static void line_destruct(struct tty_port *port) +{ + struct tty_struct *tty = tty_port_tty_get(port); + struct line *line = tty->driver_data; + + if (line->sigio) { + unregister_winch(tty); + line->sigio = 0; + } +} + static const struct tty_port_operations line_port_ops = { .activate = line_activate, + .destruct = line_destruct, }; int line_open(struct tty_struct *tty, struct file *filp) @@ -340,18 +348,6 @@ int line_install(struct tty_driver *driver, struct tty_struct *tty, return 0; } -static void unregister_winch(struct tty_struct *tty); - -void line_cleanup(struct tty_struct *tty) -{ - struct line *line = tty->driver_data; - - if (line->sigio) { - unregister_winch(tty); - line->sigio = 0; - } -} - void line_close(struct tty_struct *tty, struct file * filp) { struct line *line = tty->driver_data; @@ -601,7 +597,7 @@ struct winch { int fd; int tty_fd; int pid; - struct tty_struct *tty; + struct tty_port *port; unsigned long stack; struct work_struct work; }; @@ -655,7 +651,7 @@ static irqreturn_t winch_interrupt(int irq, void *data) goto out; } } - tty = winch->tty; + tty = tty_port_tty_get(winch->port); if (tty != NULL) { line = tty->driver_data; if (line != NULL) { @@ -663,6 +659,7 @@ static irqreturn_t winch_interrupt(int irq, void *data) &tty->winsize.ws_col); kill_pgrp(tty->pgrp, SIGWINCH, 1); } + tty_kref_put(tty); } out: if (winch->fd != -1) @@ -670,7 +667,7 @@ static irqreturn_t winch_interrupt(int irq, void *data) return IRQ_HANDLED; } -void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty, +void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port, unsigned long stack) { struct winch *winch; @@ -685,7 +682,7 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty, .fd = fd, .tty_fd = tty_fd, .pid = pid, - .tty = tty, + .port = port, .stack = stack }); if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, @@ -714,15 +711,18 @@ static void unregister_winch(struct tty_struct *tty) { struct list_head *ele, *next; struct winch *winch; + struct tty_struct *wtty; spin_lock(&winch_handler_lock); list_for_each_safe(ele, next, &winch_handlers) { winch = list_entry(ele, struct winch, list); - if (winch->tty == tty) { + wtty = tty_port_tty_get(winch->port); + if (wtty == tty) { free_winch(winch); break; } + tty_kref_put(wtty); } spin_unlock(&winch_handler_lock); } diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index d8926c3..39f1862 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -218,6 +218,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock_irqsave(&lp->lock, flags); len = (*lp->write)(lp->fd, skb, lp); + skb_tx_timestamp(skb); if (len == skb->len) { dev->stats.tx_packets++; @@ -281,6 +282,7 @@ static void uml_net_get_drvinfo(struct net_device *dev, static const struct ethtool_ops uml_net_ethtool_ops = { .get_drvinfo = uml_net_get_drvinfo, .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, }; static void uml_net_user_timer_expire(unsigned long _conn) diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c index 16fdd0a..b8d14fa 100644 --- a/arch/um/drivers/ssl.c +++ b/arch/um/drivers/ssl.c @@ -105,7 +105,6 @@ static const struct tty_operations ssl_ops = { .throttle = line_throttle, .unthrottle = line_unthrottle, .install = ssl_install, - .cleanup = line_cleanup, .hangup = line_hangup, }; diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c index 827777a..7b361f3 100644 --- a/arch/um/drivers/stdio_console.c +++ b/arch/um/drivers/stdio_console.c @@ -110,7 +110,6 @@ static const struct tty_operations console_ops = { .set_termios = line_set_termios, .throttle = line_throttle, .unthrottle = line_unthrottle, - .cleanup = line_cleanup, .hangup = line_hangup, }; diff --git a/arch/um/kernel/early_printk.c b/arch/um/kernel/early_printk.c index 49480f0..4a0800b 100644 --- a/arch/um/kernel/early_printk.c +++ b/arch/um/kernel/early_printk.c @@ -16,7 +16,7 @@ static void early_console_write(struct console *con, const char *s, unsigned int um_early_printk(s, n); } -static struct console early_console = { +static struct console early_console_dev = { .name = "earlycon", .write = early_console_write, .flags = CON_BOOT, @@ -25,8 +25,10 @@ static struct console early_console = { static int __init setup_early_printk(char *buf) { - register_console(&early_console); - + if (!early_console) { + early_console = &early_console_dev; + register_console(&early_console_dev); + } return 0; } diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 5abcbfb..9df292b 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -42,17 +42,12 @@ static unsigned long brk_end; static void setup_highmem(unsigned long highmem_start, unsigned long highmem_len) { - struct page *page; unsigned long highmem_pfn; int i; highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT; - for (i = 0; i < highmem_len >> PAGE_SHIFT; i++) { - page = &mem_map[highmem_pfn + i]; - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - } + for (i = 0; i < highmem_len >> PAGE_SHIFT; i++) + free_highmem_page(&mem_map[highmem_pfn + i]); } #endif @@ -73,18 +68,13 @@ void __init mem_init(void) totalram_pages = free_all_bootmem(); max_low_pfn = totalram_pages; #ifdef CONFIG_HIGHMEM - totalhigh_pages = highmem >> PAGE_SHIFT; - totalram_pages += totalhigh_pages; + setup_highmem(end_iomem, highmem); #endif num_physpages = totalram_pages; max_pfn = totalram_pages; printk(KERN_INFO "Memory: %luk available\n", nr_free_pages() << (PAGE_SHIFT-10)); kmalloc_ok = 1; - -#ifdef CONFIG_HIGHMEM - setup_highmem(end_iomem, highmem); -#endif } /* @@ -254,15 +244,7 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - if (start < end) - printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", - (end - start) >> 10); - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } + free_reserved_area(start, end, 0, "initrd"); } #endif diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index b462b13..bbcef52 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -210,33 +210,14 @@ void initial_thread_cb(void (*proc)(void *), void *arg) kmalloc_ok = save_kmalloc_ok; } -void default_idle(void) +void arch_cpu_idle(void) { unsigned long long nsecs; - while (1) { - /* endless idle loop with no priority at all */ - - /* - * although we are an idle CPU, we do not want to - * get into the scheduler unnecessarily. - */ - if (need_resched()) - schedule(); - - tick_nohz_idle_enter(); - rcu_idle_enter(); - nsecs = disable_timer(); - idle_sleep(nsecs); - rcu_idle_exit(); - tick_nohz_idle_exit(); - } -} - -void cpu_idle(void) -{ cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); - default_idle(); + nsecs = disable_timer(); + idle_sleep(nsecs); + local_irq_enable(); } int __cant_sleep(void) { diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index e562ff8..7d101a2 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c @@ -35,18 +35,6 @@ void show_trace(struct task_struct *task, unsigned long * stack) } #endif -/* - * stack dumps generator - this is used by arch-independent code. - * And this is identical to i386 currently. - */ -void dump_stack(void) -{ - unsigned long stack; - - show_trace(current, &stack); -} -EXPORT_SYMBOL(dump_stack); - /*Stolen from arch/i386/kernel/traps.c */ static const int kstack_depth_to_print = 24; diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c index b1469fe..9d9f1b4 100644 --- a/arch/um/os-Linux/signal.c +++ b/arch/um/os-Linux/signal.c @@ -15,7 +15,7 @@ #include <sysdep/mcontext.h> #include "internal.h" -void (*sig_info[NSIG])(int, siginfo_t *, struct uml_pt_regs *) = { +void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = { [SIGTRAP] = relay_signal, [SIGFPE] = relay_signal, [SIGILL] = relay_signal, diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c index da4b9e9..337518c 100644 --- a/arch/um/os-Linux/start_up.c +++ b/arch/um/os-Linux/start_up.c @@ -15,6 +15,8 @@ #include <sys/mman.h> #include <sys/stat.h> #include <sys/wait.h> +#include <sys/time.h> +#include <sys/resource.h> #include <asm/unistd.h> #include <init.h> #include <os.h> diff --git a/arch/um/sys-ppc/sysrq.c b/arch/um/sys-ppc/sysrq.c index f889449..1ff1ad7 100644 --- a/arch/um/sys-ppc/sysrq.c +++ b/arch/um/sys-ppc/sysrq.c @@ -11,6 +11,8 @@ void show_regs(struct pt_regs_subarch *regs) { printk("\n"); + show_regs_print_info(KERN_DEFAULT); + printk("show_regs(): insert regs here.\n"); #if 0 printk("\n"); diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index dc50b15..2943e3a 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -9,7 +9,7 @@ config UNICORE32 select GENERIC_ATOMIC64 select HAVE_KERNEL_LZO select HAVE_KERNEL_LZMA - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select ARCH_HAVE_CUSTOM_GPIO_H select GENERIC_FIND_FIRST_BIT select GENERIC_IRQ_PROBE diff --git a/arch/unicore32/kernel/Makefile b/arch/unicore32/kernel/Makefile index fa497e0..607a72f 100644 --- a/arch/unicore32/kernel/Makefile +++ b/arch/unicore32/kernel/Makefile @@ -9,7 +9,6 @@ obj-y += setup.o signal.o sys.o stacktrace.o traps.o obj-$(CONFIG_MODULES) += ksyms.o module.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -obj-$(CONFIG_CPU_FREQ) += cpu-ucv2.o obj-$(CONFIG_UNICORE_FPU_F64) += fpu-ucf64.o # obj-y for architecture PKUnity v3 diff --git a/arch/unicore32/kernel/cpu-ucv2.c b/arch/unicore32/kernel/cpu-ucv2.c deleted file mode 100644 index 4a99f62..0000000 --- a/arch/unicore32/kernel/cpu-ucv2.c +++ /dev/null @@ -1,93 +0,0 @@ -/* - * linux/arch/unicore32/kernel/cpu-ucv2.c: clock scaling for the UniCore-II - * - * Code specific to PKUnity SoC and UniCore ISA - * - * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn> - * Copyright (C) 2001-2010 Guan Xuetao - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/init.h> -#include <linux/clk.h> -#include <linux/cpufreq.h> - -#include <mach/hardware.h> - -static struct cpufreq_driver ucv2_driver; - -/* make sure that only the "userspace" governor is run - * -- anything else wouldn't make sense on this platform, anyway. - */ -int ucv2_verify_speed(struct cpufreq_policy *policy) -{ - if (policy->cpu) - return -EINVAL; - - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); - - return 0; -} - -static unsigned int ucv2_getspeed(unsigned int cpu) -{ - struct clk *mclk = clk_get(NULL, "MAIN_CLK"); - - if (cpu) - return 0; - return clk_get_rate(mclk)/1000; -} - -static int ucv2_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int cur = ucv2_getspeed(0); - struct cpufreq_freqs freqs; - struct clk *mclk = clk_get(NULL, "MAIN_CLK"); - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - if (!clk_set_rate(mclk, target_freq * 1000)) { - freqs.old = cur; - freqs.new = target_freq; - freqs.cpu = 0; - } - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - return 0; -} - -static int __init ucv2_cpu_init(struct cpufreq_policy *policy) -{ - if (policy->cpu != 0) - return -EINVAL; - policy->cur = ucv2_getspeed(0); - policy->min = policy->cpuinfo.min_freq = 250000; - policy->max = policy->cpuinfo.max_freq = 1000000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - return 0; -} - -static struct cpufreq_driver ucv2_driver = { - .flags = CPUFREQ_STICKY, - .verify = ucv2_verify_speed, - .target = ucv2_target, - .get = ucv2_getspeed, - .init = ucv2_cpu_init, - .name = "UniCore-II", -}; - -static int __init ucv2_cpufreq_init(void) -{ - return cpufreq_register_driver(&ucv2_driver); -} - -arch_initcall(ucv2_cpufreq_init); diff --git a/arch/unicore32/kernel/early_printk.c b/arch/unicore32/kernel/early_printk.c index 3922255..9be0d5d 100644 --- a/arch/unicore32/kernel/early_printk.c +++ b/arch/unicore32/kernel/early_printk.c @@ -33,21 +33,17 @@ static struct console early_ocd_console = { .index = -1, }; -/* Direct interface for emergencies */ -static struct console *early_console = &early_ocd_console; - -static int __initdata keep_early; - static int __init setup_early_printk(char *buf) { - if (!buf) + int keep_early; + + if (!buf || early_console) return 0; if (strstr(buf, "keep")) keep_early = 1; - if (!strncmp(buf, "ocd", 3)) - early_console = &early_ocd_console; + early_console = &early_ocd_console; if (keep_early) early_console->flags &= ~CON_BOOT; diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index 872d7e2..c944769 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c @@ -45,25 +45,10 @@ static const char * const processor_modes[] = { "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR" }; -void cpu_idle(void) +void arch_cpu_idle(void) { - /* endless idle loop with no priority at all */ - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - while (!need_resched()) { - local_irq_disable(); - stop_critical_timings(); - cpu_do_idle(); - local_irq_enable(); - start_critical_timings(); - } - rcu_idle_exit(); - tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); - } + cpu_do_idle(); + local_irq_enable(); } static char reboot_mode = 'h'; @@ -159,11 +144,7 @@ void __show_regs(struct pt_regs *regs) unsigned long flags; char buf[64]; - printk(KERN_DEFAULT "CPU: %d %s (%s %.*s)\n", - raw_smp_processor_id(), print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); + show_regs_print_info(KERN_DEFAULT); print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("LR is at %s\n", regs->UCreg_lr); printk(KERN_DEFAULT "pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c index 0870b68..c54e324 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c @@ -170,12 +170,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) c_backtrace(fp, mode); } -void dump_stack(void) -{ - dump_backtrace(NULL, NULL); -} -EXPORT_SYMBOL(dump_stack); - void show_stack(struct task_struct *tsk, unsigned long *sp) { dump_backtrace(NULL, tsk); diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c index de186bd..63df12d 100644 --- a/arch/unicore32/mm/init.c +++ b/arch/unicore32/mm/init.c @@ -66,6 +66,9 @@ void show_mem(unsigned int filter) printk(KERN_DEFAULT "Mem-info:\n"); show_free_areas(filter); + if (filter & SHOW_MEM_FILTER_PAGE_COUNT) + return; + for_each_bank(i, mi) { struct membank *bank = &mi->bank[i]; unsigned int pfn1, pfn2; @@ -313,24 +316,6 @@ void __init bootmem_init(void) max_pfn = max_high - PHYS_PFN_OFFSET; } -static inline int free_area(unsigned long pfn, unsigned long end, char *s) -{ - unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); - - for (; pfn < end; pfn++) { - struct page *page = pfn_to_page(pfn); - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - pages++; - } - - if (size && s) - printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); - - return pages; -} - static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) { @@ -404,9 +389,9 @@ void __init mem_init(void) max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; - /* this will put all unused low memory onto the freelists */ free_unused_memmap(&meminfo); + /* this will put all unused low memory onto the freelists */ totalram_pages += free_all_bootmem(); reserved_pages = free_pages = 0; @@ -491,9 +476,7 @@ void __init mem_init(void) void free_initmem(void) { - totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), - __phys_to_pfn(__pa(__init_end)), - "init"); + free_initmem_default(0); } #ifdef CONFIG_BLK_DEV_INITRD @@ -503,9 +486,7 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) - totalram_pages += free_area(__phys_to_pfn(__pa(start)), - __phys_to_pfn(__pa(end)), - "initrd"); + free_reserved_area(start, end, 0, "initrd"); } static int __init keepinitrd_setup(char *__unused) diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c index b7a6055..13068ee 100644 --- a/arch/unicore32/mm/ioremap.c +++ b/arch/unicore32/mm/ioremap.c @@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached); void __uc32_iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); - struct vm_struct **p, *tmp; + struct vm_struct *vm; /* * If this is a section based mapping we need to handle it @@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr) * all the mappings before the area can be reclaimed * by someone else. */ - write_lock(&vmlist_lock); - for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { - if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { - if (tmp->flags & VM_UNICORE_SECTION_MAPPING) { - unmap_area_sections((unsigned long)tmp->addr, - tmp->size); - } - break; - } - } - write_unlock(&vmlist_lock); + vm = find_vm_area(addr); + if (vm && (vm->flags & VM_IOREMAP) && + (vm->flags & VM_UNICORE_SECTION_MAPPING)) + unmap_area_sections((unsigned long)vm->addr, vm->size); vunmap(addr); } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a4f24f5..5db2117 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -20,6 +20,7 @@ config X86_64 ### Arch settings config X86 def_bool y + select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS select HAVE_AOUT if X86_32 select HAVE_UNSTABLE_SCHED_CLOCK select ARCH_SUPPORTS_NUMA_BALANCING @@ -112,7 +113,7 @@ config X86 select GENERIC_STRNLEN_USER select HAVE_CONTEXT_TRACKING if X86_64 select HAVE_IRQ_TIME_ACCOUNTING - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select MODULES_USE_ELF_REL if X86_32 select MODULES_USE_ELF_RELA if X86_64 select CLONE_BACKWARDS if X86_32 @@ -120,6 +121,7 @@ config X86 select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION select OLD_SIGACTION if X86_32 select COMPAT_OLD_SIGACTION if IA32_EMULATION + select RTC_LIB config INSTRUCTION_DECODER def_bool y @@ -188,9 +190,6 @@ config GENERIC_CALIBRATE_DELAY config ARCH_HAS_CPU_RELAX def_bool y -config ARCH_HAS_DEFAULT_IDLE - def_bool y - config ARCH_HAS_CACHE_LINE_SIZE def_bool y @@ -389,7 +388,7 @@ config X86_NUMACHIP config X86_VSMP bool "ScaleMP vSMP" - select PARAVIRT_GUEST + select HYPERVISOR_GUEST select PARAVIRT depends on X86_64 && PCI depends on X86_EXTENDED_PLATFORM @@ -596,44 +595,17 @@ config SCHED_OMIT_FRAME_POINTER If in doubt, say "Y". -menuconfig PARAVIRT_GUEST - bool "Paravirtualized guest support" - ---help--- - Say Y here to get to see options related to running Linux under - various hypervisors. This option alone does not add any kernel code. - - If you say N, all options in this submenu will be skipped and disabled. - -if PARAVIRT_GUEST - -config PARAVIRT_TIME_ACCOUNTING - bool "Paravirtual steal time accounting" - select PARAVIRT - default n +menuconfig HYPERVISOR_GUEST + bool "Linux guest support" ---help--- - Select this option to enable fine granularity task steal time - accounting. Time spent executing other tasks in parallel with - the current vCPU is discounted from the vCPU power. To account for - that, there can be a small performance impact. - - If in doubt, say N here. - -source "arch/x86/xen/Kconfig" + Say Y here to enable options for running Linux under various hyper- + visors. This option enables basic hypervisor detection and platform + setup. -config KVM_GUEST - bool "KVM Guest support (including kvmclock)" - select PARAVIRT - select PARAVIRT - select PARAVIRT_CLOCK - default y if PARAVIRT_GUEST - ---help--- - This option enables various optimizations for running under the KVM - hypervisor. It includes a paravirtualized clock, so that instead - of relying on a PIT (or probably other) emulation by the - underlying device model, the host provides the guest with - timing infrastructure such as time of day, and system time + If you say N, all options in this submenu will be skipped and + disabled, and Linux guest support won't be built in. -source "arch/x86/lguest/Kconfig" +if HYPERVISOR_GUEST config PARAVIRT bool "Enable paravirtualization code" @@ -643,6 +615,13 @@ config PARAVIRT over full virtualization. However, when run without a hypervisor the kernel is theoretically slower and slightly larger. +config PARAVIRT_DEBUG + bool "paravirt-ops debugging" + depends on PARAVIRT && DEBUG_KERNEL + ---help--- + Enable to debug paravirt_ops internals. Specifically, BUG if + a paravirt_op is missing when it is called. + config PARAVIRT_SPINLOCKS bool "Paravirtualization layer for spinlocks" depends on PARAVIRT && SMP @@ -656,17 +635,38 @@ config PARAVIRT_SPINLOCKS If you are unsure how to answer this question, answer N. -config PARAVIRT_CLOCK - bool +source "arch/x86/xen/Kconfig" -endif +config KVM_GUEST + bool "KVM Guest support (including kvmclock)" + depends on PARAVIRT + select PARAVIRT_CLOCK + default y + ---help--- + This option enables various optimizations for running under the KVM + hypervisor. It includes a paravirtualized clock, so that instead + of relying on a PIT (or probably other) emulation by the + underlying device model, the host provides the guest with + timing infrastructure such as time of day, and system time -config PARAVIRT_DEBUG - bool "paravirt-ops debugging" - depends on PARAVIRT && DEBUG_KERNEL +source "arch/x86/lguest/Kconfig" + +config PARAVIRT_TIME_ACCOUNTING + bool "Paravirtual steal time accounting" + depends on PARAVIRT + default n ---help--- - Enable to debug paravirt_ops internals. Specifically, BUG if - a paravirt_op is missing when it is called. + Select this option to enable fine granularity task steal time + accounting. Time spent executing other tasks in parallel with + the current vCPU is discounted from the vCPU power. To account for + that, there can be a small performance impact. + + If in doubt, say N here. + +config PARAVIRT_CLOCK + bool + +endif #HYPERVISOR_GUEST config NO_BOOTMEM def_bool y @@ -1549,6 +1549,7 @@ config X86_SMAP config EFI bool "EFI runtime service support" depends on ACPI + select UCS2_STRING ---help--- This enables the kernel to use EFI runtime services that are available (such as the EFI variable services). diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index b322f12..c198b7e 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -131,7 +131,7 @@ config DOUBLEFAULT config DEBUG_TLBFLUSH bool "Set upper limit of TLB entries to flush one-by-one" - depends on DEBUG_KERNEL && (X86_64 || X86_INVLPG) + depends on DEBUG_KERNEL ---help--- X86-only for now. @@ -292,20 +292,6 @@ config OPTIMIZE_INLINING If unsure, say N. -config DEBUG_STRICT_USER_COPY_CHECKS - bool "Strict copy size checks" - depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING - ---help--- - Enabling this option turns a certain set of sanity checks for user - copy operations into compile time failures. - - The copy_from_user() etc checks are there to help test if there - are sufficient security checks on the length argument of - the copy operation, by having gcc prove that the argument is - within bounds. - - If unsure, or if you run an older (pre 4.4) gcc, say N. - config DEBUG_NMI_SELFTEST bool "NMI Selftest" depends on DEBUG_KERNEL && X86_LOCAL_APIC diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 8a84501..5ef205c 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -4,7 +4,7 @@ # create a compressed vmlinux image from the original vmlinux # -targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o +targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC @@ -29,7 +29,6 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ $(obj)/piggy.o $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone -$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone ifeq ($(CONFIG_EFI_STUB), y) VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o @@ -43,7 +42,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) -targets += vmlinux.bin.all vmlinux.relocs +targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs CMD_RELOCS = arch/x86/tools/relocs quiet_cmd_relocs = RELOCS $@ diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index c205035..35ee62f 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size) *size = len; } +static efi_status_t setup_efi_vars(struct boot_params *params) +{ + struct setup_data *data; + struct efi_var_bootdata *efidata; + u64 store_size, remaining_size, var_size; + efi_status_t status; + + if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION) + return EFI_UNSUPPORTED; + + data = (struct setup_data *)(unsigned long)params->hdr.setup_data; + + while (data && data->next) + data = (struct setup_data *)(unsigned long)data->next; + + status = efi_call_phys4((void *)sys_table->runtime->query_variable_info, + EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, &store_size, + &remaining_size, &var_size); + + if (status != EFI_SUCCESS) + return status; + + status = efi_call_phys3(sys_table->boottime->allocate_pool, + EFI_LOADER_DATA, sizeof(*efidata), &efidata); + + if (status != EFI_SUCCESS) + return status; + + efidata->data.type = SETUP_EFI_VARS; + efidata->data.len = sizeof(struct efi_var_bootdata) - + sizeof(struct setup_data); + efidata->data.next = 0; + efidata->store_size = store_size; + efidata->remaining_size = remaining_size; + efidata->max_var_size = var_size; + + if (data) + data->next = (unsigned long)efidata; + else + params->hdr.setup_data = (unsigned long)efidata; + +} + static efi_status_t setup_efi_pci(struct boot_params *params) { efi_pci_io_protocol *pci; @@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table, setup_graphics(boot_params); + setup_efi_vars(boot_params); + setup_efi_pci(boot_params); status = efi_call_phys3(sys_table->boottime->allocate_pool, diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index c1d383d..16f24e6 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -52,7 +52,7 @@ ENTRY(startup_32) jnz 1f cli - movl $(__KERNEL_DS), %eax + movl $(__BOOT_DS), %eax movl %eax, %ds movl %eax, %es movl %eax, %ss diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 03abf9b..81e94d9 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -162,7 +162,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, fs = get_fs(); set_fs(KERNEL_DS); has_dumped = 1; - current->flags |= PF_DUMPCORE; strncpy(dump.u_comm, current->comm, sizeof(current->comm)); dump.u_ar0 = offsetof(struct user32, regs); dump.signal = signr; diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index 5b5e9cb..653668d 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h @@ -14,13 +14,29 @@ * analysis of kexec-tools; if other broken bootloaders initialize a * different set of fields we will need to figure out how to disambiguate. * + * Note: efi_info is commonly left uninitialized, but that field has a + * private magic, so it is better to leave it unchanged. */ static void sanitize_boot_params(struct boot_params *boot_params) { + /* + * IMPORTANT NOTE TO BOOTLOADER AUTHORS: do not simply clear + * this field. The purpose of this field is to guarantee + * compliance with the x86 boot spec located in + * Documentation/x86/boot.txt . That spec says that the + * *whole* structure should be cleared, after which only the + * portion defined by struct setup_header (boot_params->hdr) + * should be copied in. + * + * If you're having an issue because the sentinel is set, you + * need to change the whole structure to be cleared, not this + * (or any other) individual field, or you will soon have + * problems again. + */ if (boot_params->sentinel) { - /*fields in boot_params are not valid, clear them */ + /* fields in boot_params are left uninitialized, clear them */ memset(&boot_params->olpc_ofw_header, 0, - (char *)&boot_params->alt_mem_k - + (char *)&boot_params->efi_info - (char *)&boot_params->olpc_ofw_header); memset(&boot_params->kbd_status, 0, (char *)&boot_params->hdr - diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index 11e1152..2f03ff0 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -37,7 +37,4 @@ do { \ #include <asm-generic/bug.h> - -extern void show_regs_common(void); - #endif /* _ASM_X86_BUG_H */ diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 8d871ea..d47786a 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -35,7 +35,7 @@ extern void __add_wrong_size(void) /* * An exchange-type operation, which takes a value and a pointer, and - * returns a the old value. + * returns the old value. */ #define __xchg_op(ptr, arg, op, lock) \ ({ \ diff --git a/arch/x86/include/asm/context_tracking.h b/arch/x86/include/asm/context_tracking.h index 1616562..1fe4970 100644 --- a/arch/x86/include/asm/context_tracking.h +++ b/arch/x86/include/asm/context_tracking.h @@ -1,31 +1,10 @@ #ifndef _ASM_X86_CONTEXT_TRACKING_H #define _ASM_X86_CONTEXT_TRACKING_H -#ifndef __ASSEMBLY__ -#include <linux/context_tracking.h> -#include <asm/ptrace.h> - -static inline void exception_enter(struct pt_regs *regs) -{ - user_exit(); -} - -static inline void exception_exit(struct pt_regs *regs) -{ -#ifdef CONFIG_CONTEXT_TRACKING - if (user_mode(regs)) - user_enter(); -#endif -} - -#else /* __ASSEMBLY__ */ - #ifdef CONFIG_CONTEXT_TRACKING # define SCHEDULE_USER call schedule_user #else # define SCHEDULE_USER call schedule #endif -#endif /* !__ASSEMBLY__ */ - #endif diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 93fe929..8010ebc 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -9,6 +9,7 @@ #endif #define NCAPINTS 10 /* N 32-bit words worth of info */ +#define NBUGINTS 1 /* N 32-bit bug flags */ /* * Note: If the comment begins with a quoted string, that string is used @@ -100,6 +101,7 @@ #define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ #define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ #define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */ +#define X86_FEATURE_NONSTOP_TSC_S3 (3*32+30) /* TSC doesn't stop in S3 state */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ @@ -168,6 +170,7 @@ #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ #define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ #define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */ +#define X86_FEATURE_PERFCTR_L2 (6*32+28) /* L2 performance counter extensions */ /* * Auxiliary flags: Linux defined - For features scattered in various @@ -182,6 +185,7 @@ #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ #define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */ #define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */ +#define X86_FEATURE_PROC_FEEDBACK (7*32+ 9) /* AMD ProcFeedbackInterface */ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ @@ -216,6 +220,17 @@ #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ #define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ +/* + * BUG word(s) + */ +#define X86_BUG(x) (NCAPINTS*32 + (x)) + +#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ +#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ +#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ +#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* AMD Erratum 383 */ +#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* AMD Erratum 400 */ + #if defined(__KERNEL__) && !defined(__ASSEMBLY__) #include <asm/asm.h> @@ -311,6 +326,7 @@ extern const char * const x86_power_flags[32]; #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) #define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) +#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2) #define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) @@ -401,6 +417,13 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) #define static_cpu_has(bit) boot_cpu_has(bit) #endif +#define cpu_has_bug(c, bit) cpu_has(c, (bit)) +#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) +#define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit)); + +#define static_cpu_has_bug(bit) static_cpu_has((bit)) +#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit)) + #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ #endif /* _ASM_X86_CPUFEATURE_H */ diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 60c89f3..2fb5d58 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void); extern void efi_unmap_memmap(void); extern void efi_memory_uc(u64 addr, unsigned long size); +struct efi_var_bootdata { + struct setup_data data; + u64 store_size; + u64 remaining_size; + u64 max_var_size; +}; + #ifdef CONFIG_EFI static inline bool efi_is_native(void) diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index a09c285..0dc7d9e 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -104,12 +104,7 @@ enum fixed_addresses { FIX_LI_PCIA, /* Lithium PCI Bridge A */ FIX_LI_PCIB, /* Lithium PCI Bridge B */ #endif -#ifdef CONFIG_X86_F00F_BUG - FIX_F00F_IDT, /* Virtual mapping for IDT */ -#endif -#ifdef CONFIG_X86_CYCLONE_TIMER - FIX_CYCLONE_TIMER, /*cyclone timer register*/ -#endif + FIX_RO_IDT, /* Virtual mapping for read-only IDT */ #ifdef CONFIG_X86_32 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h index bdd35db..a809121 100644 --- a/arch/x86/include/asm/hugetlb.h +++ b/arch/x86/include/asm/hugetlb.h @@ -2,6 +2,7 @@ #define _ASM_X86_HUGETLB_H #include <asm/page.h> +#include <asm-generic/hugetlb.h> static inline int is_hugepage_only_range(struct mm_struct *mm, diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index 86095ed..2d4b5e6 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -20,13 +20,11 @@ #ifndef _ASM_X86_HYPERVISOR_H #define _ASM_X86_HYPERVISOR_H +#ifdef CONFIG_HYPERVISOR_GUEST + #include <asm/kvm_para.h> #include <asm/xen/hypervisor.h> -extern void init_hypervisor(struct cpuinfo_x86 *c); -extern void init_hypervisor_platform(void); -extern bool hypervisor_x2apic_available(void); - /* * x86 hypervisor information */ @@ -55,4 +53,12 @@ extern const struct hypervisor_x86 x86_hyper_ms_hyperv; extern const struct hypervisor_x86 x86_hyper_xen_hvm; extern const struct hypervisor_x86 x86_hyper_kvm; -#endif +extern void init_hypervisor(struct cpuinfo_x86 *c); +extern void init_hypervisor_platform(void); +extern bool hypervisor_x2apic_available(void); +#else +static inline void init_hypervisor(struct cpuinfo_x86 *c) { } +static inline void init_hypervisor_platform(void) { } +static inline bool hypervisor_x2apic_available(void) { return false; } +#endif /* CONFIG_HYPERVISOR_GUEST */ +#endif /* _ASM_X86_HYPERVISOR_H */ diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index d3ddd17..5a6d287 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -77,6 +77,7 @@ struct arch_specific_insn { * a post_handler or break_handler). */ int boostable; + bool if_modifier; }; struct arch_optimized_insn { diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 635a74d..4979778 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -414,8 +414,8 @@ struct kvm_vcpu_arch { gpa_t time; struct pvclock_vcpu_time_info hv_clock; unsigned int hw_tsc_khz; - unsigned int time_offset; - struct page *time_page; + struct gfn_to_hva_cache pv_time; + bool pv_time_enabled; /* set guest stopped flag in pvclock flags field */ bool pvclock_set_guest_stopped_request; diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index f4076af..fa5f71e 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -146,13 +146,13 @@ DECLARE_PER_CPU(struct device *, mce_device); void mce_intel_feature_init(struct cpuinfo_x86 *c); void cmci_clear(void); void cmci_reenable(void); -void cmci_rediscover(int dying); +void cmci_rediscover(void); void cmci_recheck(void); #else static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } static inline void cmci_clear(void) {} static inline void cmci_reenable(void) {} -static inline void cmci_rediscover(int dying) {} +static inline void cmci_rediscover(void) {} static inline void cmci_recheck(void) {} #endif diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 9264802..cb75028 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -137,11 +137,11 @@ static inline unsigned long long native_read_pmc(int counter) * pointer indirection), this allows gcc to optimize better */ -#define rdmsr(msr, val1, val2) \ +#define rdmsr(msr, low, high) \ do { \ u64 __val = native_read_msr((msr)); \ - (void)((val1) = (u32)__val); \ - (void)((val2) = (u32)(__val >> 32)); \ + (void)((low) = (u32)__val); \ + (void)((high) = (u32)(__val >> 32)); \ } while (0) static inline void wrmsr(unsigned msr, unsigned low, unsigned high) @@ -162,12 +162,12 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) } /* rdmsr with exception handling */ -#define rdmsr_safe(msr, p1, p2) \ +#define rdmsr_safe(msr, low, high) \ ({ \ int __err; \ u64 __val = native_read_msr_safe((msr), &__err); \ - (*p1) = (u32)__val; \ - (*p2) = (u32)(__val >> 32); \ + (*low) = (u32)__val; \ + (*high) = (u32)(__val >> 32); \ __err; \ }) @@ -208,7 +208,7 @@ do { \ #define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ (u32)((val) >> 32)) -#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) +#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 8b491e6..6c896fb 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -48,6 +48,5 @@ * arch/x86/kernel/head_64.S), and it is mapped here: */ #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) -#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) #endif /* _ASM_X86_PAGE_64_DEFS_H */ diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5edd174..cfdc9ee 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -262,10 +262,6 @@ static inline void set_ldt(const void *addr, unsigned entries) { PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); } -static inline void store_gdt(struct desc_ptr *dtr) -{ - PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr); -} static inline void store_idt(struct desc_ptr *dtr) { PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); @@ -703,7 +699,10 @@ static inline void arch_leave_lazy_mmu_mode(void) PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); } -void arch_flush_lazy_mmu_mode(void); +static inline void arch_flush_lazy_mmu_mode(void) +{ + PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush); +} static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys, pgprot_t flags) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 142236e..0db1fca 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -91,6 +91,7 @@ struct pv_lazy_ops { /* Set deferred update mode, used for batching operations. */ void (*enter)(void); void (*leave)(void); + void (*flush)(void); }; struct pv_time_ops { @@ -122,7 +123,7 @@ struct pv_cpu_ops { void (*load_tr_desc)(void); void (*load_gdt)(const struct desc_ptr *); void (*load_idt)(const struct desc_ptr *); - void (*store_gdt)(struct desc_ptr *); + /* store_gdt has been removed. */ void (*store_idt)(struct desc_ptr *); void (*set_ldt)(const void *desc, unsigned entries); unsigned long (*store_tr)(void); @@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next); void paravirt_enter_lazy_mmu(void); void paravirt_leave_lazy_mmu(void); +void paravirt_flush_lazy_mmu(void); void _paravirt_nop(void); u32 _paravirt_ident_32(u32); diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index 4f7e67e..85e13cc 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h @@ -24,45 +24,45 @@ #define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) #define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1)) -#define P4_ESCR_EVENT_MASK 0x7e000000U +#define P4_ESCR_EVENT_MASK 0x7e000000ULL #define P4_ESCR_EVENT_SHIFT 25 -#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U +#define P4_ESCR_EVENTMASK_MASK 0x01fffe00ULL #define P4_ESCR_EVENTMASK_SHIFT 9 -#define P4_ESCR_TAG_MASK 0x000001e0U +#define P4_ESCR_TAG_MASK 0x000001e0ULL #define P4_ESCR_TAG_SHIFT 5 -#define P4_ESCR_TAG_ENABLE 0x00000010U -#define P4_ESCR_T0_OS 0x00000008U -#define P4_ESCR_T0_USR 0x00000004U -#define P4_ESCR_T1_OS 0x00000002U -#define P4_ESCR_T1_USR 0x00000001U +#define P4_ESCR_TAG_ENABLE 0x00000010ULL +#define P4_ESCR_T0_OS 0x00000008ULL +#define P4_ESCR_T0_USR 0x00000004ULL +#define P4_ESCR_T1_OS 0x00000002ULL +#define P4_ESCR_T1_USR 0x00000001ULL #define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT) #define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT) #define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT) -#define P4_CCCR_OVF 0x80000000U -#define P4_CCCR_CASCADE 0x40000000U -#define P4_CCCR_OVF_PMI_T0 0x04000000U -#define P4_CCCR_OVF_PMI_T1 0x08000000U -#define P4_CCCR_FORCE_OVF 0x02000000U -#define P4_CCCR_EDGE 0x01000000U -#define P4_CCCR_THRESHOLD_MASK 0x00f00000U +#define P4_CCCR_OVF 0x80000000ULL +#define P4_CCCR_CASCADE 0x40000000ULL +#define P4_CCCR_OVF_PMI_T0 0x04000000ULL +#define P4_CCCR_OVF_PMI_T1 0x08000000ULL +#define P4_CCCR_FORCE_OVF 0x02000000ULL +#define P4_CCCR_EDGE 0x01000000ULL +#define P4_CCCR_THRESHOLD_MASK 0x00f00000ULL #define P4_CCCR_THRESHOLD_SHIFT 20 -#define P4_CCCR_COMPLEMENT 0x00080000U -#define P4_CCCR_COMPARE 0x00040000U -#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000U +#define P4_CCCR_COMPLEMENT 0x00080000ULL +#define P4_CCCR_COMPARE 0x00040000ULL +#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000ULL #define P4_CCCR_ESCR_SELECT_SHIFT 13 -#define P4_CCCR_ENABLE 0x00001000U -#define P4_CCCR_THREAD_SINGLE 0x00010000U -#define P4_CCCR_THREAD_BOTH 0x00020000U -#define P4_CCCR_THREAD_ANY 0x00030000U -#define P4_CCCR_RESERVED 0x00000fffU +#define P4_CCCR_ENABLE 0x00001000ULL +#define P4_CCCR_THREAD_SINGLE 0x00010000ULL +#define P4_CCCR_THREAD_BOTH 0x00020000ULL +#define P4_CCCR_THREAD_ANY 0x00030000ULL +#define P4_CCCR_RESERVED 0x00000fffULL #define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT) #define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT) #define P4_GEN_ESCR_EMASK(class, name, bit) \ - class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) + class##__##name = ((1ULL << bit) << P4_ESCR_EVENTMASK_SHIFT) #define P4_ESCR_EMASK_BIT(class, name) class##__##name /* @@ -107,7 +107,7 @@ * P4_PEBS_CONFIG_MASK and related bits on * modification.) */ -#define P4_CONFIG_ALIASABLE (1 << 9) +#define P4_CONFIG_ALIASABLE (1ULL << 9) /* * The bits we allow to pass for RAW events @@ -784,17 +784,17 @@ enum P4_ESCR_EMASKS { * Note we have UOP and PEBS bits reserved for now * just in case if we will need them once */ -#define P4_PEBS_CONFIG_ENABLE (1 << 7) -#define P4_PEBS_CONFIG_UOP_TAG (1 << 8) -#define P4_PEBS_CONFIG_METRIC_MASK 0x3f -#define P4_PEBS_CONFIG_MASK 0xff +#define P4_PEBS_CONFIG_ENABLE (1ULL << 7) +#define P4_PEBS_CONFIG_UOP_TAG (1ULL << 8) +#define P4_PEBS_CONFIG_METRIC_MASK 0x3FLL +#define P4_PEBS_CONFIG_MASK 0xFFLL /* * mem: Only counters MSR_IQ_COUNTER4 (16) and * MSR_IQ_COUNTER5 (17) are allowed for PEBS sampling */ -#define P4_PEBS_ENABLE 0x02000000U -#define P4_PEBS_ENABLE_UOP_TAG 0x01000000U +#define P4_PEBS_ENABLE 0x02000000ULL +#define P4_PEBS_ENABLE_UOP_TAG 0x01000000ULL #define p4_config_unpack_metric(v) (((u64)(v)) & P4_PEBS_CONFIG_METRIC_MASK) #define p4_config_unpack_pebs(v) (((u64)(v)) & P4_PEBS_CONFIG_MASK) diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 567b5d0..e642300 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -351,7 +351,6 @@ static inline void update_page_count(int level, unsigned long pages) { } * as a pte too. */ extern pte_t *lookup_address(unsigned long address, unsigned int *level); -extern int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase); extern phys_addr_t slow_virt_to_phys(void *__address); #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 3270116..22224b3 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -91,9 +91,6 @@ struct cpuinfo_x86 { /* Problems on some 486Dx4's and old 386's: */ char hard_math; char rfu; - char fdiv_bug; - char f00f_bug; - char coma_bug; char pad0; #else /* Number of 4K pages in DTLB/ITLB combined(in pages): */ @@ -107,7 +104,7 @@ struct cpuinfo_x86 { __u32 extended_cpuid_level; /* Maximum supported CPUID level, -1=no CPUID: */ int cpuid_level; - __u32 x86_capability[NCAPINTS]; + __u32 x86_capability[NCAPINTS + NBUGINTS]; char x86_vendor_id[16]; char x86_model_id[64]; /* in KB - valid for CPUS which support this call: */ @@ -973,26 +970,6 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old, return ratio; } -/* - * AMD errata checking - */ -#ifdef CONFIG_CPU_SUP_AMD -extern const int amd_erratum_383[]; -extern const int amd_erratum_400[]; -extern bool cpu_has_amd_erratum(const int *); - -#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } -#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } -#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ - ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) -#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) -#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) -#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) - -#else -#define cpu_has_amd_erratum(x) (false) -#endif /* CONFIG_CPU_SUP_AMD */ - extern unsigned long arch_align_stack(unsigned long sp); extern void free_init_pages(char *what, unsigned long begin, unsigned long end); diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index 487055c..f6064b7 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h @@ -15,7 +15,6 @@ struct saved_context { unsigned long cr0, cr2, cr3, cr4; u64 misc_enable; bool misc_enable_saved; - struct desc_ptr gdt; struct desc_ptr idt; u16 ldt; u16 tss; diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h index 09b0bf1..97b84e0 100644 --- a/arch/x86/include/asm/suspend_64.h +++ b/arch/x86/include/asm/suspend_64.h @@ -25,9 +25,6 @@ struct saved_context { u64 misc_enable; bool misc_enable_saved; unsigned long efer; - u16 gdt_pad; - u16 gdt_limit; - unsigned long gdt_base; u16 idt_pad; u16 idt_limit; unsigned long idt_base; diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 1ace47b..2e188d6 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[]; */ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - return regs->orig_ax & __SYSCALL_MASK; + return regs->orig_ax; } static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { - regs->ax = regs->orig_ax & __SYSCALL_MASK; + regs->ax = regs->orig_ax; } static inline long syscall_get_error(struct task_struct *task, diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 2cd056e..a1df6e8 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -241,8 +241,6 @@ static inline struct thread_info *current_thread_info(void) skip sending interrupt */ #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ -#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) - #ifndef __ASSEMBLY__ #define HAVE_SET_RESTORE_SIGMASK 1 static inline void set_restore_sigmask(void) diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 4fef207..c779730 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -7,7 +7,7 @@ #define tlb_flush(tlb) \ { \ - if (tlb->fullmm == 0) \ + if (!tlb->fullmm && !tlb->need_flush_all) \ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ else \ flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \ diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index 8ff8be7..6e51979 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h @@ -55,4 +55,5 @@ extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); +extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); #endif /* _ASM_UPROBES_H */ diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index c20d1ce..e709884 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str) return _hypercall3(int, console_io, cmd, count, str); } -extern int __must_check HYPERVISOR_physdev_op_compat(int, void *); +extern int __must_check xen_physdev_op_compat(int, void *); static inline int HYPERVISOR_physdev_op(int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); if (unlikely(rc == -ENOSYS)) - rc = HYPERVISOR_physdev_op_compat(cmd, arg); + rc = xen_physdev_op_compat(cmd, arg); return rc; } diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index c15ddaf..0874424 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -6,6 +6,7 @@ #define SETUP_E820_EXT 1 #define SETUP_DTB 2 #define SETUP_PCI 3 +#define SETUP_EFI_VARS 4 /* ram_size flags */ #define RAMDISK_IMAGE_START_MASK 0x07FF diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index 892ce40..b575788 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -44,6 +44,7 @@ #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) +#define MSR_PLATFORM_INFO 0x000000ce #define MSR_MTRRcap 0x000000fe #define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL3 0x0000011e @@ -71,6 +72,7 @@ #define MSR_IA32_PEBS_ENABLE 0x000003f1 #define MSR_IA32_DS_AREA 0x00000600 #define MSR_IA32_PERF_CAPABILITIES 0x00000345 +#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 #define MSR_MTRRfix64K_00000 0x00000250 #define MSR_MTRRfix16K_80000 0x00000258 @@ -194,6 +196,10 @@ #define MSR_AMD64_IBSBRTARGET 0xc001103b #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ +/* Fam 16h MSRs */ +#define MSR_F16H_L2I_PERF_CTL 0xc0010230 +#define MSR_F16H_L2I_PERF_CTR 0xc0010231 + /* Fam 15h MSRs */ #define MSR_F15H_PERF_CTL 0xc0010200 #define MSR_F15H_PERF_CTR 0xc0010201 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 0532f5d..b44577b 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -46,7 +46,7 @@ int acpi_suspend_lowlevel(void) header->pmode_behavior = 0; #ifndef CONFIG_64BIT - store_gdt((struct desc_ptr *)&header->pmode_gdt); + native_store_gdt((struct desc_ptr *)&header->pmode_gdt); if (!rdmsr_safe(MSR_EFER, &header->pmode_efer_low, diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index 13ab7205..d1daa66 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -1,4 +1,4 @@ - .section .text..page_aligned + .text #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> @@ -18,7 +18,6 @@ wakeup_pmode_return: movw %ax, %gs # reload the gdt, as we need the full 32 bit address - lgdt saved_gdt lidt saved_idt lldt saved_ldt ljmp $(__KERNEL_CS), $1f @@ -44,7 +43,6 @@ bogus_magic: save_registers: - sgdt saved_gdt sidt saved_idt sldt saved_ldt str saved_tss @@ -93,7 +91,6 @@ ENTRY(saved_magic) .long 0 ENTRY(saved_eip) .long 0 # saved registers -saved_gdt: .long 0,0 saved_idt: .long 0,0 saved_ldt: .long 0 saved_tss: .long 0 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index ef5ccca..c15cf9a 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -271,7 +271,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start, replacement = (u8 *)&a->repl_offset + a->repl_offset; BUG_ON(a->replacementlen > a->instrlen); BUG_ON(a->instrlen > sizeof(insnbuf)); - BUG_ON(a->cpuid >= NCAPINTS*32); + BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); if (!boot_cpu_has(a->cpuid)) continue; diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index aadf335..3048ded 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -20,12 +20,14 @@ const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, {} }; EXPORT_SYMBOL(amd_nb_misc_ids); -static struct pci_device_id amd_nb_link_ids[] = { +static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, {} }; @@ -81,7 +83,6 @@ int amd_cache_northbridges(void) next_northbridge(link, amd_nb_link_ids); } - /* some CPU families (e.g. family 0x11) do not support GART */ if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x15) amd_northbridges.flags |= AMD_NB_GART; diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index d5fd66f..fd972a3 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -87,7 +87,7 @@ static u32 __init allocate_aperture(void) */ addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR, aper_size, aper_size); - if (!addr || addr + aper_size > GART_MAX_ADDR) { + if (!addr) { printk(KERN_ERR "Cannot allocate aperture memory hole (%lx,%uK)\n", addr, aper_size>>10); diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 66b5faf..53a4e27 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -373,7 +373,6 @@ static int apm_cpu_idle(struct cpuidle_device *dev, static struct cpuidle_driver apm_idle_driver = { .name = "apm_idle", .owner = THIS_MODULE, - .en_core_tk_irqen = 1, .states = { { /* entry 0 is for polling */ }, { /* entry 1 is for APM idle */ diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index a0e067d..b0684e4 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -14,7 +14,6 @@ CFLAGS_common.o := $(nostackp) obj-y := intel_cacheinfo.o scattered.o topology.o obj-y += proc.o capflags.o powerflags.o common.o -obj-y += vmware.o hypervisor.o mshyperv.o obj-y += rdrand.o obj-y += match.o @@ -31,7 +30,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o ifdef CONFIG_PERF_EVENTS -obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o +obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o perf_event_amd_uncore.o obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o @@ -42,11 +41,13 @@ obj-$(CONFIG_MTRR) += mtrr/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o +obj-$(CONFIG_HYPERVISOR_GUEST) += vmware.o hypervisor.o mshyperv.o + quiet_cmd_mkcapflags = MKCAP $@ - cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ + cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@ cpufeature = $(src)/../../include/asm/cpufeature.h targets += capflags.c -$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.pl FORCE +$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE $(call if_changed,mkcapflags) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index fa96eb0..5013a48 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -20,11 +20,11 @@ static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) { - struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); u32 gprs[8] = { 0 }; int err; - WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); + WARN_ONCE((boot_cpu_data.x86 != 0xf), + "%s should only be used on K8!\n", __func__); gprs[1] = msr; gprs[7] = 0x9c5a203a; @@ -38,10 +38,10 @@ static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) { - struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); u32 gprs[8] = { 0 }; - WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); + WARN_ONCE((boot_cpu_data.x86 != 0xf), + "%s should only be used on K8!\n", __func__); gprs[0] = (u32)val; gprs[1] = msr; @@ -192,11 +192,11 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) /* Athlon 660/661 is valid. */ if ((c->x86_model == 6) && ((c->x86_mask == 0) || (c->x86_mask == 1))) - goto valid_k7; + return; /* Duron 670 is valid */ if ((c->x86_model == 7) && (c->x86_mask == 0)) - goto valid_k7; + return; /* * Athlon 662, Duron 671, and Athlon >model 7 have capability @@ -209,7 +209,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) ((c->x86_model == 7) && (c->x86_mask >= 1)) || (c->x86_model > 7)) if (cpu_has_mp) - goto valid_k7; + return; /* If we get here, not a certified SMP capable AMD system. */ @@ -220,9 +220,6 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) WARN_ONCE(1, "WARNING: This combination of AMD" " processors is not suitable for SMP.\n"); add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); - -valid_k7: - ; } static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) @@ -513,6 +510,10 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) #endif } +static const int amd_erratum_383[]; +static const int amd_erratum_400[]; +static bool cpu_has_amd_erratum(const int *erratum); + static void __cpuinit init_amd(struct cpuinfo_x86 *c) { u32 dummy; @@ -727,8 +728,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) rdmsrl_safe(MSR_AMD64_BU_CFG2, &value); value &= ~(1ULL << 24); wrmsrl_safe(MSR_AMD64_BU_CFG2, value); + + if (cpu_has_amd_erratum(amd_erratum_383)) + set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); } + if (cpu_has_amd_erratum(amd_erratum_400)) + set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); + rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); } @@ -847,8 +854,7 @@ cpu_dev_register(amd_cpu_dev); * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that * have an OSVW id assigned, which it takes as first argument. Both take a * variable number of family-specific model-stepping ranges created by - * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const - * int[] in arch/x86/include/asm/processor.h. + * AMD_MODEL_RANGE(). * * Example: * @@ -858,16 +864,22 @@ cpu_dev_register(amd_cpu_dev); * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); */ -const int amd_erratum_400[] = +#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } +#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } +#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ + ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) +#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) +#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) +#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) + +static const int amd_erratum_400[] = AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); -EXPORT_SYMBOL_GPL(amd_erratum_400); -const int amd_erratum_383[] = +static const int amd_erratum_383[] = AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); -EXPORT_SYMBOL_GPL(amd_erratum_383); -bool cpu_has_amd_erratum(const int *erratum) +static bool cpu_has_amd_erratum(const int *erratum) { struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); int osvw_id = *erratum++; @@ -908,5 +920,3 @@ bool cpu_has_amd_erratum(const int *erratum) return false; } - -EXPORT_SYMBOL_GPL(cpu_has_amd_erratum); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index af6455e..4112be9 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -59,7 +59,7 @@ static void __init check_fpu(void) * trap_init() enabled FXSR and company _before_ testing for FP * problems here. * - * Test for the divl bug.. + * Test for the divl bug: http://en.wikipedia.org/wiki/Fdiv_bug */ __asm__("fninit\n\t" "fldl %1\n\t" @@ -75,26 +75,12 @@ static void __init check_fpu(void) kernel_fpu_end(); - boot_cpu_data.fdiv_bug = fdiv_bug; - if (boot_cpu_data.fdiv_bug) + if (fdiv_bug) { + set_cpu_bug(&boot_cpu_data, X86_BUG_FDIV); pr_warn("Hmm, FPU with FDIV bug\n"); + } } -/* - * Check whether we are able to run this kernel safely on SMP. - * - * - i386 is no longer supported. - * - In order to run on anything without a TSC, we need to be - * compiled for a i486. - */ - -static void __init check_config(void) -{ - if (boot_cpu_data.x86 < 4) - panic("Kernel requires i486+ for 'invlpg' and other features"); -} - - void __init check_bugs(void) { identify_boot_cpu(); @@ -102,7 +88,17 @@ void __init check_bugs(void) pr_info("CPU: "); print_cpu_info(&boot_cpu_data); #endif - check_config(); + + /* + * Check whether we are able to run this kernel safely on SMP. + * + * - i386 is no longer supported. + * - In order to run on anything without a TSC, we need to be + * compiled for a i486. + */ + if (boot_cpu_data.x86 < 4) + panic("Kernel requires i486+ for 'invlpg' and other features"); + init_utsname()->machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); alternative_instructions(); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d814772..22018f7 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -920,6 +920,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) /* AND the already accumulated flags with these */ for (i = 0; i < NCAPINTS; i++) boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; + + /* OR, i.e. replicate the bug flags */ + for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) + c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; } /* Init Machine Check Exception if available. */ diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 4fbd384..d048d5c 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -249,7 +249,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) /* Emulate MTRRs using Cyrix's ARRs. */ set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); /* 6x86's contain this bug */ - c->coma_bug = 1; + set_cpu_bug(c, X86_BUG_COMA); break; case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */ @@ -317,7 +317,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) /* Enable MMX extensions (App note 108) */ setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); } else { - c->coma_bug = 1; /* 6x86MX, it has the bug. */ + /* A 6x86MX - it has the bug. */ + set_cpu_bug(c, X86_BUG_COMA); } tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0; Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7]; diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 1905ce9..9b0c441 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -96,6 +96,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) sched_clock_stable = 1; } + /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ + if (c->x86 == 6) { + switch (c->x86_model) { + case 0x27: /* Penwell */ + case 0x35: /* Cloverview */ + set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); + break; + default: + break; + } + } + /* * There is a known erratum on Pentium III and Core Solo * and Core Duo CPUs. @@ -164,20 +176,6 @@ int __cpuinit ppro_with_ram_bug(void) return 0; } -#ifdef CONFIG_X86_F00F_BUG -static void __cpuinit trap_init_f00f_bug(void) -{ - __set_fixmap(FIX_F00F_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); - - /* - * Update the IDT descriptor and reload the IDT so that - * it uses the read-only mapped virtual address. - */ - idt_descr.address = fix_to_virt(FIX_F00F_IDT); - load_idt(&idt_descr); -} -#endif - static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) { /* calling is from identify_secondary_cpu() ? */ @@ -206,16 +204,14 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) /* * All current models of Pentium and Pentium with MMX technology CPUs * have the F0 0F bug, which lets nonprivileged users lock up the - * system. - * Note that the workaround only should be initialized once... + * system. Announce that the fault handler will be checking for it. */ - c->f00f_bug = 0; + clear_cpu_bug(c, X86_BUG_F00F); if (!paravirt_enabled() && c->x86 == 5) { static int f00f_workaround_enabled; - c->f00f_bug = 1; + set_cpu_bug(c, X86_BUG_F00F); if (!f00f_workaround_enabled) { - trap_init_f00f_bug(); printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); f00f_workaround_enabled = 1; } diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 7bc1263..9239504 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -2358,7 +2358,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) if (action == CPU_POST_DEAD) { /* intentionally ignoring frozen here */ - cmci_rediscover(cpu); + cmci_rediscover(); } return NOTIFY_OK; diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 1ac581f..9cb5276 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -33,7 +33,6 @@ #include <asm/mce.h> #include <asm/msr.h> -#define NR_BANKS 6 #define NR_BLOCKS 9 #define THRESHOLD_MAX 0xFFF #define INT_TYPE_APIC 0x00020000 @@ -57,12 +56,7 @@ static const char * const th_names[] = { "execution_unit", }; -static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks); - -static unsigned char shared_bank[NR_BANKS] = { - 0, 0, 0, 0, 1 -}; - +static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks); static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ static void amd_threshold_interrupt(void); @@ -79,6 +73,12 @@ struct thresh_restart { u16 old_limit; }; +static inline bool is_shared_bank(int bank) +{ + /* Bank 4 is for northbridge reporting and is thus shared */ + return (bank == 4); +} + static const char * const bank4_names(struct threshold_block *b) { switch (b->address) { @@ -214,7 +214,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) unsigned int bank, block; int offset = -1; - for (bank = 0; bank < NR_BANKS; ++bank) { + for (bank = 0; bank < mca_cfg.banks; ++bank) { for (block = 0; block < NR_BLOCKS; ++block) { if (block == 0) address = MSR_IA32_MC0_MISC + bank * 4; @@ -276,7 +276,7 @@ static void amd_threshold_interrupt(void) mce_setup(&m); /* assume first bank caused it */ - for (bank = 0; bank < NR_BANKS; ++bank) { + for (bank = 0; bank < mca_cfg.banks; ++bank) { if (!(per_cpu(bank_map, m.cpu) & (1 << bank))) continue; for (block = 0; block < NR_BLOCKS; ++block) { @@ -467,7 +467,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, u32 low, high; int err; - if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) + if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) return 0; if (rdmsr_safe_on_cpu(cpu, address, &low, &high)) @@ -575,7 +575,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) const char *name = th_names[bank]; int err = 0; - if (shared_bank[bank]) { + if (is_shared_bank(bank)) { nb = node_to_amd_nb(amd_get_nb_id(cpu)); /* threshold descriptor already initialized on this node? */ @@ -609,7 +609,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) per_cpu(threshold_banks, cpu)[bank] = b; - if (shared_bank[bank]) { + if (is_shared_bank(bank)) { atomic_set(&b->cpus, 1); /* nb is already initialized, see above */ @@ -635,9 +635,17 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) static __cpuinit int threshold_create_device(unsigned int cpu) { unsigned int bank; + struct threshold_bank **bp; int err = 0; - for (bank = 0; bank < NR_BANKS; ++bank) { + bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks, + GFP_KERNEL); + if (!bp) + return -ENOMEM; + + per_cpu(threshold_banks, cpu) = bp; + + for (bank = 0; bank < mca_cfg.banks; ++bank) { if (!(per_cpu(bank_map, cpu) & (1 << bank))) continue; err = threshold_create_bank(cpu, bank); @@ -691,7 +699,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) if (!b->blocks) goto free_out; - if (shared_bank[bank]) { + if (is_shared_bank(bank)) { if (!atomic_dec_and_test(&b->cpus)) { __threshold_remove_blocks(b); per_cpu(threshold_banks, cpu)[bank] = NULL; @@ -719,11 +727,12 @@ static void threshold_remove_device(unsigned int cpu) { unsigned int bank; - for (bank = 0; bank < NR_BANKS; ++bank) { + for (bank = 0; bank < mca_cfg.banks; ++bank) { if (!(per_cpu(bank_map, cpu) & (1 << bank))) continue; threshold_remove_bank(cpu, bank); } + kfree(per_cpu(threshold_banks, cpu)); } /* get notified when a cpu comes on/off */ diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 402c454..ae1697c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -285,39 +285,24 @@ void cmci_clear(void) raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); } -static long cmci_rediscover_work_func(void *arg) +static void cmci_rediscover_work_func(void *arg) { int banks; /* Recheck banks in case CPUs don't all have the same */ if (cmci_supported(&banks)) cmci_discover(banks); - - return 0; } -/* - * After a CPU went down cycle through all the others and rediscover - * Must run in process context. - */ -void cmci_rediscover(int dying) +/* After a CPU went down cycle through all the others and rediscover */ +void cmci_rediscover(void) { - int cpu, banks; + int banks; if (!cmci_supported(&banks)) return; - for_each_online_cpu(cpu) { - if (cpu == dying) - continue; - - if (cpu == smp_processor_id()) { - cmci_rediscover_work_func(NULL); - continue; - } - - work_on_cpu(cpu, cmci_rediscover_work_func, NULL); - } + on_each_cpu(cmci_rediscover_work_func, NULL, 1); } /* diff --git a/arch/x86/kernel/cpu/mkcapflags.pl b/arch/x86/kernel/cpu/mkcapflags.pl deleted file mode 100644 index 091972e..0000000 --- a/arch/x86/kernel/cpu/mkcapflags.pl +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/perl -w -# -# Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h -# - -($in, $out) = @ARGV; - -open(IN, "< $in\0") or die "$0: cannot open: $in: $!\n"; -open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n"; - -print OUT "#ifndef _ASM_X86_CPUFEATURE_H\n"; -print OUT "#include <asm/cpufeature.h>\n"; -print OUT "#endif\n"; -print OUT "\n"; -print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n"; - -%features = (); -$err = 0; - -while (defined($line = <IN>)) { - if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) { - $macro = $1; - $feature = "\L$2"; - $tail = $3; - if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) { - $feature = "\L$1"; - } - - next if ($feature eq ''); - - if ($features{$feature}++) { - print STDERR "$in: duplicate feature name: $feature\n"; - $err++; - } - printf OUT "\t%-32s = \"%s\",\n", "[$macro]", $feature; - } -} -print OUT "};\n"; - -close(IN); -close(OUT); - -if ($err) { - unlink($out); - exit(1); -} - -exit(0); diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh new file mode 100644 index 0000000..2bf6165 --- /dev/null +++ b/arch/x86/kernel/cpu/mkcapflags.sh @@ -0,0 +1,41 @@ +#!/bin/sh +# +# Generate the x86_cap_flags[] array from include/asm/cpufeature.h +# + +IN=$1 +OUT=$2 + +TABS="$(printf '\t\t\t\t\t')" +trap 'rm "$OUT"' EXIT + +( + echo "#ifndef _ASM_X86_CPUFEATURE_H" + echo "#include <asm/cpufeature.h>" + echo "#endif" + echo "" + echo "const char * const x86_cap_flags[NCAPINTS*32] = {" + + # Iterate through any input lines starting with #define X86_FEATURE_ + sed -n -e 's/\t/ /g' -e 's/^ *# *define *X86_FEATURE_//p' $IN | + while read i + do + # Name is everything up to the first whitespace + NAME="$(echo "$i" | sed 's/ .*//')" + + # If the /* comment */ starts with a quote string, grab that. + VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')" + [ -z "$VALUE" ] && VALUE="\"$NAME\"" + [ "$VALUE" == '""' ] && continue + + # Name is uppercase, VALUE is all lowercase + VALUE="$(echo "$VALUE" | tr A-Z a-z)" + + TABCOUNT=$(( ( 5*8 - 14 - $(echo "$NAME" | wc -c) ) / 8 )) + printf "\t[%s]%.*s = %s,\n" \ + "X86_FEATURE_$NAME" "$TABCOUNT" "$TABS" "$VALUE" + done + echo "};" +) > $OUT + +trap - EXIT diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index a7d26d8..8f4be53 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void) if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) return false; - /* - * Xen emulates Hyper-V to support enlightened Windows. - * Check to see first if we are on a Xen Hypervisor. - */ - if (xen_cpuid_base()) - return false; - cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); @@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void) if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); -#if IS_ENABLED(CONFIG_HYPERV) - /* - * Setup the IDT for hypervisor callback. - */ - alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); -#endif } const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { @@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr; void hv_register_vmbus_handler(int irq, irq_handler_t handler) { + /* + * Setup the IDT for hypervisor callback. + */ + alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); + vmbus_irq = irq; vmbus_isr = handler; } diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index bf0f01a..1025f3c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -180,8 +180,9 @@ static void release_pmc_hardware(void) {} static bool check_hw_exists(void) { - u64 val, val_new = ~0; - int i, reg, ret = 0; + u64 val, val_fail, val_new= ~0; + int i, reg, reg_fail, ret = 0; + int bios_fail = 0; /* * Check to see if the BIOS enabled any of the counters, if so @@ -192,8 +193,11 @@ static bool check_hw_exists(void) ret = rdmsrl_safe(reg, &val); if (ret) goto msr_fail; - if (val & ARCH_PERFMON_EVENTSEL_ENABLE) - goto bios_fail; + if (val & ARCH_PERFMON_EVENTSEL_ENABLE) { + bios_fail = 1; + val_fail = val; + reg_fail = reg; + } } if (x86_pmu.num_counters_fixed) { @@ -202,8 +206,11 @@ static bool check_hw_exists(void) if (ret) goto msr_fail; for (i = 0; i < x86_pmu.num_counters_fixed; i++) { - if (val & (0x03 << i*4)) - goto bios_fail; + if (val & (0x03 << i*4)) { + bios_fail = 1; + val_fail = val; + reg_fail = reg; + } } } @@ -221,14 +228,13 @@ static bool check_hw_exists(void) if (ret || val != val_new) goto msr_fail; - return true; - -bios_fail: /* * We still allow the PMU driver to operate: */ - printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n"); - printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val); + if (bios_fail) { + printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n"); + printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg_fail, val_fail); + } return true; @@ -1316,9 +1322,16 @@ static struct attribute_group x86_pmu_format_group = { */ static void __init filter_events(struct attribute **attrs) { + struct device_attribute *d; + struct perf_pmu_events_attr *pmu_attr; int i, j; for (i = 0; attrs[i]; i++) { + d = (struct device_attribute *)attrs[i]; + pmu_attr = container_of(d, struct perf_pmu_events_attr, attr); + /* str trumps id */ + if (pmu_attr->event_str) + continue; if (x86_pmu.event_map(i)) continue; @@ -1330,22 +1343,45 @@ static void __init filter_events(struct attribute **attrs) } } -static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, +/* Merge two pointer arrays */ +static __init struct attribute **merge_attr(struct attribute **a, struct attribute **b) +{ + struct attribute **new; + int j, i; + + for (j = 0; a[j]; j++) + ; + for (i = 0; b[i]; i++) + j++; + j++; + + new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL); + if (!new) + return NULL; + + j = 0; + for (i = 0; a[i]; i++) + new[j++] = a[i]; + for (i = 0; b[i]; i++) + new[j++] = b[i]; + new[j] = NULL; + + return new; +} + +ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page) { struct perf_pmu_events_attr *pmu_attr = \ container_of(attr, struct perf_pmu_events_attr, attr); - u64 config = x86_pmu.event_map(pmu_attr->id); - return x86_pmu.events_sysfs_show(page, config); -} -#define EVENT_VAR(_id) event_attr_##_id -#define EVENT_PTR(_id) &event_attr_##_id.attr.attr + /* string trumps id */ + if (pmu_attr->event_str) + return sprintf(page, "%s", pmu_attr->event_str); -#define EVENT_ATTR(_name, _id) \ - PMU_EVENT_ATTR(_name, EVENT_VAR(_id), PERF_COUNT_HW_##_id, \ - events_sysfs_show) + return x86_pmu.events_sysfs_show(page, config); +} EVENT_ATTR(cpu-cycles, CPU_CYCLES ); EVENT_ATTR(instructions, INSTRUCTIONS ); @@ -1459,16 +1495,27 @@ static int __init init_hw_perf_events(void) unconstrained = (struct event_constraint) __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, - 0, x86_pmu.num_counters, 0); + 0, x86_pmu.num_counters, 0, 0); x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ x86_pmu_format_group.attrs = x86_pmu.format_attrs; + if (x86_pmu.event_attrs) + x86_pmu_events_group.attrs = x86_pmu.event_attrs; + if (!x86_pmu.events_sysfs_show) x86_pmu_events_group.attrs = &empty_attrs; else filter_events(x86_pmu_events_group.attrs); + if (x86_pmu.cpu_events) { + struct attribute **tmp; + + tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events); + if (!WARN_ON(!tmp)) + x86_pmu_events_group.attrs = tmp; + } + pr_info("... version: %d\n", x86_pmu.version); pr_info("... bit width: %d\n", x86_pmu.cntval_bits); pr_info("... generic registers: %d\n", x86_pmu.num_counters); diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 7f5c75c..ba9aadf 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -46,6 +46,7 @@ enum extra_reg_type { EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ EXTRA_REG_LBR = 2, /* lbr_select */ + EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ EXTRA_REG_MAX /* number of entries needed */ }; @@ -59,7 +60,13 @@ struct event_constraint { u64 cmask; int weight; int overlap; + int flags; }; +/* + * struct event_constraint flags + */ +#define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */ +#define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */ struct amd_nb { int nb_id; /* NorthBridge id */ @@ -170,16 +177,17 @@ struct cpu_hw_events { void *kfree_on_online; }; -#define __EVENT_CONSTRAINT(c, n, m, w, o) {\ +#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\ { .idxmsk64 = (n) }, \ .code = (c), \ .cmask = (m), \ .weight = (w), \ .overlap = (o), \ + .flags = f, \ } #define EVENT_CONSTRAINT(c, n, m) \ - __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0) + __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) /* * The overlap flag marks event constraints with overlapping counter @@ -203,7 +211,7 @@ struct cpu_hw_events { * and its counter masks must be kept at a minimum. */ #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ - __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1) + __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) /* * Constraint on the Event code. @@ -231,6 +239,14 @@ struct cpu_hw_events { #define INTEL_UEVENT_CONSTRAINT(c, n) \ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) +#define INTEL_PLD_CONSTRAINT(c, n) \ + __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ + HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) + +#define INTEL_PST_CONSTRAINT(c, n) \ + __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ + HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) + #define EVENT_CONSTRAINT_END \ EVENT_CONSTRAINT(0, 0, 0) @@ -260,12 +276,22 @@ struct extra_reg { .msr = (ms), \ .config_mask = (m), \ .valid_mask = (vm), \ - .idx = EXTRA_REG_##i \ + .idx = EXTRA_REG_##i, \ } #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) +#define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \ + EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \ + ARCH_PERFMON_EVENTSEL_UMASK, vm, idx) + +#define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \ + INTEL_UEVENT_EXTRA_REG(c, \ + MSR_PEBS_LD_LAT_THRESHOLD, \ + 0xffff, \ + LDLAT) + #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) union perf_capabilities { @@ -355,8 +381,10 @@ struct x86_pmu { */ int attr_rdpmc; struct attribute **format_attrs; + struct attribute **event_attrs; ssize_t (*events_sysfs_show)(char *page, u64 config); + struct attribute **cpu_events; /* * CPU Hotplug hooks @@ -421,6 +449,23 @@ do { \ #define ERF_NO_HT_SHARING 1 #define ERF_HAS_RSP_1 2 +#define EVENT_VAR(_id) event_attr_##_id +#define EVENT_PTR(_id) &event_attr_##_id.attr.attr + +#define EVENT_ATTR(_name, _id) \ +static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ + .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ + .id = PERF_COUNT_HW_##_id, \ + .event_str = NULL, \ +}; + +#define EVENT_ATTR_STR(_name, v, str) \ +static struct perf_pmu_events_attr event_attr_##v = { \ + .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ + .id = 0, \ + .event_str = str, \ +}; + extern struct x86_pmu x86_pmu __read_mostly; DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); @@ -628,6 +673,9 @@ int p6_pmu_init(void); int knc_pmu_init(void); +ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, + char *page); + #else /* CONFIG_CPU_SUP_INTEL */ static inline void reserve_ds_buffers(void) diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index dfdab42..7e28d94 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -132,14 +132,11 @@ static u64 amd_pmu_event_map(int hw_event) return amd_perfmon_event_map[hw_event]; } -static struct event_constraint *amd_nb_event_constraint; - /* * Previously calculated offsets */ static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; -static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly; /* * Legacy CPUs: @@ -147,14 +144,10 @@ static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly; * * CPUs with core performance counter extensions: * 6 counters starting at 0xc0010200 each offset by 2 - * - * CPUs with north bridge performance counter extensions: - * 4 additional counters starting at 0xc0010240 each offset by 2 - * (indexed right above either one of the above core counters) */ static inline int amd_pmu_addr_offset(int index, bool eventsel) { - int offset, first, base; + int offset; if (!index) return index; @@ -167,23 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel) if (offset) return offset; - if (amd_nb_event_constraint && - test_bit(index, amd_nb_event_constraint->idxmsk)) { - /* - * calculate the offset of NB counters with respect to - * base eventsel or perfctr - */ - - first = find_first_bit(amd_nb_event_constraint->idxmsk, - X86_PMC_IDX_MAX); - - if (eventsel) - base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel; - else - base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr; - - offset = base + ((index - first) << 1); - } else if (!cpu_has_perfctr_core) + if (!cpu_has_perfctr_core) offset = index; else offset = index << 1; @@ -196,36 +173,6 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel) return offset; } -static inline int amd_pmu_rdpmc_index(int index) -{ - int ret, first; - - if (!index) - return index; - - ret = rdpmc_indexes[index]; - - if (ret) - return ret; - - if (amd_nb_event_constraint && - test_bit(index, amd_nb_event_constraint->idxmsk)) { - /* - * according to the mnual, ECX value of the NB counters is - * the index of the NB counter (0, 1, 2 or 3) plus 6 - */ - - first = find_first_bit(amd_nb_event_constraint->idxmsk, - X86_PMC_IDX_MAX); - ret = index - first + 6; - } else - ret = index; - - rdpmc_indexes[index] = ret; - - return ret; -} - static int amd_core_hw_config(struct perf_event *event) { if (event->attr.exclude_host && event->attr.exclude_guest) @@ -245,34 +192,6 @@ static int amd_core_hw_config(struct perf_event *event) } /* - * NB counters do not support the following event select bits: - * Host/Guest only - * Counter mask - * Invert counter mask - * Edge detect - * OS/User mode - */ -static int amd_nb_hw_config(struct perf_event *event) -{ - /* for NB, we only allow system wide counting mode */ - if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) - return -EINVAL; - - if (event->attr.exclude_user || event->attr.exclude_kernel || - event->attr.exclude_host || event->attr.exclude_guest) - return -EINVAL; - - event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | - ARCH_PERFMON_EVENTSEL_OS); - - if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB | - ARCH_PERFMON_EVENTSEL_INT)) - return -EINVAL; - - return 0; -} - -/* * AMD64 events are detected based on their event codes. */ static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) @@ -285,11 +204,6 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc) return (hwc->config & 0xe0) == 0xe0; } -static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc) -{ - return amd_nb_event_constraint && amd_is_nb_event(hwc); -} - static inline int amd_has_nb(struct cpu_hw_events *cpuc) { struct amd_nb *nb = cpuc->amd_nb; @@ -315,9 +229,6 @@ static int amd_pmu_hw_config(struct perf_event *event) if (event->attr.type == PERF_TYPE_RAW) event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; - if (amd_is_perfctr_nb_event(&event->hw)) - return amd_nb_hw_config(event); - return amd_core_hw_config(event); } @@ -341,19 +252,6 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, } } -static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc) -{ - int core_id = cpu_data(smp_processor_id()).cpu_core_id; - - /* deliver interrupts only to this core */ - if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) { - hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE; - hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK; - hwc->config |= (u64)(core_id) << - AMD64_EVENTSEL_INT_CORE_SEL_SHIFT; - } -} - /* * AMD64 NorthBridge events need special treatment because * counter access needs to be synchronized across all cores @@ -441,9 +339,6 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev if (new == -1) return &emptyconstraint; - if (amd_is_perfctr_nb_event(hwc)) - amd_nb_interrupt_hw_config(hwc); - return &nb->event_constraints[new]; } @@ -543,8 +438,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) return &unconstrained; - return __amd_get_nb_event_constraints(cpuc, event, - amd_nb_event_constraint); + return __amd_get_nb_event_constraints(cpuc, event, NULL); } static void amd_put_event_constraints(struct cpu_hw_events *cpuc, @@ -643,9 +537,6 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); -static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0); -static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0); - static struct event_constraint * amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) { @@ -711,8 +602,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev return &amd_f15_PMC20; } case AMD_EVENT_NB: - return __amd_get_nb_event_constraints(cpuc, event, - amd_nb_event_constraint); + /* moved to perf_event_amd_uncore.c */ + return &emptyconstraint; default: return &emptyconstraint; } @@ -738,7 +629,6 @@ static __initconst const struct x86_pmu amd_pmu = { .eventsel = MSR_K7_EVNTSEL0, .perfctr = MSR_K7_PERFCTR0, .addr_offset = amd_pmu_addr_offset, - .rdpmc_index = amd_pmu_rdpmc_index, .event_map = amd_pmu_event_map, .max_events = ARRAY_SIZE(amd_perfmon_event_map), .num_counters = AMD64_NUM_COUNTERS, @@ -790,23 +680,6 @@ static int setup_perfctr_core(void) return 0; } -static int setup_perfctr_nb(void) -{ - if (!cpu_has_perfctr_nb) - return -ENODEV; - - x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB; - - if (cpu_has_perfctr_core) - amd_nb_event_constraint = &amd_NBPMC96; - else - amd_nb_event_constraint = &amd_NBPMC74; - - printk(KERN_INFO "perf: AMD northbridge performance counters detected\n"); - - return 0; -} - __init int amd_pmu_init(void) { /* Performance-monitoring supported from K7 and later: */ @@ -817,7 +690,6 @@ __init int amd_pmu_init(void) setup_event_constraints(); setup_perfctr_core(); - setup_perfctr_nb(); /* Events are common for all AMDs */ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c new file mode 100644 index 0000000..c0c661a --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c @@ -0,0 +1,547 @@ +/* + * Copyright (C) 2013 Advanced Micro Devices, Inc. + * + * Author: Jacob Shin <jacob.shin@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/perf_event.h> +#include <linux/percpu.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/cpumask.h> + +#include <asm/cpufeature.h> +#include <asm/perf_event.h> +#include <asm/msr.h> + +#define NUM_COUNTERS_NB 4 +#define NUM_COUNTERS_L2 4 +#define MAX_COUNTERS NUM_COUNTERS_NB + +#define RDPMC_BASE_NB 6 +#define RDPMC_BASE_L2 10 + +#define COUNTER_SHIFT 16 + +struct amd_uncore { + int id; + int refcnt; + int cpu; + int num_counters; + int rdpmc_base; + u32 msr_base; + cpumask_t *active_mask; + struct pmu *pmu; + struct perf_event *events[MAX_COUNTERS]; + struct amd_uncore *free_when_cpu_online; +}; + +static struct amd_uncore * __percpu *amd_uncore_nb; +static struct amd_uncore * __percpu *amd_uncore_l2; + +static struct pmu amd_nb_pmu; +static struct pmu amd_l2_pmu; + +static cpumask_t amd_nb_active_mask; +static cpumask_t amd_l2_active_mask; + +static bool is_nb_event(struct perf_event *event) +{ + return event->pmu->type == amd_nb_pmu.type; +} + +static bool is_l2_event(struct perf_event *event) +{ + return event->pmu->type == amd_l2_pmu.type; +} + +static struct amd_uncore *event_to_amd_uncore(struct perf_event *event) +{ + if (is_nb_event(event) && amd_uncore_nb) + return *per_cpu_ptr(amd_uncore_nb, event->cpu); + else if (is_l2_event(event) && amd_uncore_l2) + return *per_cpu_ptr(amd_uncore_l2, event->cpu); + + return NULL; +} + +static void amd_uncore_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 prev, new; + s64 delta; + + /* + * since we do not enable counter overflow interrupts, + * we do not have to worry about prev_count changing on us + */ + + prev = local64_read(&hwc->prev_count); + rdpmcl(hwc->event_base_rdpmc, new); + local64_set(&hwc->prev_count, new); + delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT); + delta >>= COUNTER_SHIFT; + local64_add(delta, &event->count); +} + +static void amd_uncore_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (flags & PERF_EF_RELOAD) + wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count)); + + hwc->state = 0; + wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE)); + perf_event_update_userpage(event); +} + +static void amd_uncore_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrl(hwc->config_base, hwc->config); + hwc->state |= PERF_HES_STOPPED; + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + amd_uncore_read(event); + hwc->state |= PERF_HES_UPTODATE; + } +} + +static int amd_uncore_add(struct perf_event *event, int flags) +{ + int i; + struct amd_uncore *uncore = event_to_amd_uncore(event); + struct hw_perf_event *hwc = &event->hw; + + /* are we already assigned? */ + if (hwc->idx != -1 && uncore->events[hwc->idx] == event) + goto out; + + for (i = 0; i < uncore->num_counters; i++) { + if (uncore->events[i] == event) { + hwc->idx = i; + goto out; + } + } + + /* if not, take the first available counter */ + hwc->idx = -1; + for (i = 0; i < uncore->num_counters; i++) { + if (cmpxchg(&uncore->events[i], NULL, event) == NULL) { + hwc->idx = i; + break; + } + } + +out: + if (hwc->idx == -1) + return -EBUSY; + + hwc->config_base = uncore->msr_base + (2 * hwc->idx); + hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx); + hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx; + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + amd_uncore_start(event, PERF_EF_RELOAD); + + return 0; +} + +static void amd_uncore_del(struct perf_event *event, int flags) +{ + int i; + struct amd_uncore *uncore = event_to_amd_uncore(event); + struct hw_perf_event *hwc = &event->hw; + + amd_uncore_stop(event, PERF_EF_UPDATE); + + for (i = 0; i < uncore->num_counters; i++) { + if (cmpxchg(&uncore->events[i], event, NULL) == event) + break; + } + + hwc->idx = -1; +} + +static int amd_uncore_event_init(struct perf_event *event) +{ + struct amd_uncore *uncore; + struct hw_perf_event *hwc = &event->hw; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* + * NB and L2 counters (MSRs) are shared across all cores that share the + * same NB / L2 cache. Interrupts can be directed to a single target + * core, however, event counts generated by processes running on other + * cores cannot be masked out. So we do not support sampling and + * per-thread events. + */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EINVAL; + + /* NB and L2 counters do not have usr/os/guest/host bits */ + if (event->attr.exclude_user || event->attr.exclude_kernel || + event->attr.exclude_host || event->attr.exclude_guest) + return -EINVAL; + + /* and we do not enable counter overflow interrupts */ + hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; + hwc->idx = -1; + + if (event->cpu < 0) + return -EINVAL; + + uncore = event_to_amd_uncore(event); + if (!uncore) + return -ENODEV; + + /* + * since request can come in to any of the shared cores, we will remap + * to a single common cpu. + */ + event->cpu = uncore->cpu; + + return 0; +} + +static ssize_t amd_uncore_attr_show_cpumask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int n; + cpumask_t *active_mask; + struct pmu *pmu = dev_get_drvdata(dev); + + if (pmu->type == amd_nb_pmu.type) + active_mask = &amd_nb_active_mask; + else if (pmu->type == amd_l2_pmu.type) + active_mask = &amd_l2_active_mask; + else + return 0; + + n = cpulist_scnprintf(buf, PAGE_SIZE - 2, active_mask); + buf[n++] = '\n'; + buf[n] = '\0'; + return n; +} +static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL); + +static struct attribute *amd_uncore_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group amd_uncore_attr_group = { + .attrs = amd_uncore_attrs, +}; + +PMU_FORMAT_ATTR(event, "config:0-7,32-35"); +PMU_FORMAT_ATTR(umask, "config:8-15"); + +static struct attribute *amd_uncore_format_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + NULL, +}; + +static struct attribute_group amd_uncore_format_group = { + .name = "format", + .attrs = amd_uncore_format_attr, +}; + +static const struct attribute_group *amd_uncore_attr_groups[] = { + &amd_uncore_attr_group, + &amd_uncore_format_group, + NULL, +}; + +static struct pmu amd_nb_pmu = { + .attr_groups = amd_uncore_attr_groups, + .name = "amd_nb", + .event_init = amd_uncore_event_init, + .add = amd_uncore_add, + .del = amd_uncore_del, + .start = amd_uncore_start, + .stop = amd_uncore_stop, + .read = amd_uncore_read, +}; + +static struct pmu amd_l2_pmu = { + .attr_groups = amd_uncore_attr_groups, + .name = "amd_l2", + .event_init = amd_uncore_event_init, + .add = amd_uncore_add, + .del = amd_uncore_del, + .start = amd_uncore_start, + .stop = amd_uncore_stop, + .read = amd_uncore_read, +}; + +static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu) +{ + return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL, + cpu_to_node(cpu)); +} + +static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu) +{ + struct amd_uncore *uncore; + + if (amd_uncore_nb) { + uncore = amd_uncore_alloc(cpu); + uncore->cpu = cpu; + uncore->num_counters = NUM_COUNTERS_NB; + uncore->rdpmc_base = RDPMC_BASE_NB; + uncore->msr_base = MSR_F15H_NB_PERF_CTL; + uncore->active_mask = &amd_nb_active_mask; + uncore->pmu = &amd_nb_pmu; + *per_cpu_ptr(amd_uncore_nb, cpu) = uncore; + } + + if (amd_uncore_l2) { + uncore = amd_uncore_alloc(cpu); + uncore->cpu = cpu; + uncore->num_counters = NUM_COUNTERS_L2; + uncore->rdpmc_base = RDPMC_BASE_L2; + uncore->msr_base = MSR_F16H_L2I_PERF_CTL; + uncore->active_mask = &amd_l2_active_mask; + uncore->pmu = &amd_l2_pmu; + *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; + } +} + +static struct amd_uncore * +__cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this, + struct amd_uncore * __percpu *uncores) +{ + unsigned int cpu; + struct amd_uncore *that; + + for_each_online_cpu(cpu) { + that = *per_cpu_ptr(uncores, cpu); + + if (!that) + continue; + + if (this == that) + continue; + + if (this->id == that->id) { + that->free_when_cpu_online = this; + this = that; + break; + } + } + + this->refcnt++; + return this; +} + +static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu) +{ + unsigned int eax, ebx, ecx, edx; + struct amd_uncore *uncore; + + if (amd_uncore_nb) { + uncore = *per_cpu_ptr(amd_uncore_nb, cpu); + cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); + uncore->id = ecx & 0xff; + + uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb); + *per_cpu_ptr(amd_uncore_nb, cpu) = uncore; + } + + if (amd_uncore_l2) { + unsigned int apicid = cpu_data(cpu).apicid; + unsigned int nshared; + + uncore = *per_cpu_ptr(amd_uncore_l2, cpu); + cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx); + nshared = ((eax >> 14) & 0xfff) + 1; + uncore->id = apicid - (apicid % nshared); + + uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2); + *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; + } +} + +static void __cpuinit uncore_online(unsigned int cpu, + struct amd_uncore * __percpu *uncores) +{ + struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); + + kfree(uncore->free_when_cpu_online); + uncore->free_when_cpu_online = NULL; + + if (cpu == uncore->cpu) + cpumask_set_cpu(cpu, uncore->active_mask); +} + +static void __cpuinit amd_uncore_cpu_online(unsigned int cpu) +{ + if (amd_uncore_nb) + uncore_online(cpu, amd_uncore_nb); + + if (amd_uncore_l2) + uncore_online(cpu, amd_uncore_l2); +} + +static void __cpuinit uncore_down_prepare(unsigned int cpu, + struct amd_uncore * __percpu *uncores) +{ + unsigned int i; + struct amd_uncore *this = *per_cpu_ptr(uncores, cpu); + + if (this->cpu != cpu) + return; + + /* this cpu is going down, migrate to a shared sibling if possible */ + for_each_online_cpu(i) { + struct amd_uncore *that = *per_cpu_ptr(uncores, i); + + if (cpu == i) + continue; + + if (this == that) { + perf_pmu_migrate_context(this->pmu, cpu, i); + cpumask_clear_cpu(cpu, that->active_mask); + cpumask_set_cpu(i, that->active_mask); + that->cpu = i; + break; + } + } +} + +static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu) +{ + if (amd_uncore_nb) + uncore_down_prepare(cpu, amd_uncore_nb); + + if (amd_uncore_l2) + uncore_down_prepare(cpu, amd_uncore_l2); +} + +static void __cpuinit uncore_dead(unsigned int cpu, + struct amd_uncore * __percpu *uncores) +{ + struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); + + if (cpu == uncore->cpu) + cpumask_clear_cpu(cpu, uncore->active_mask); + + if (!--uncore->refcnt) + kfree(uncore); + *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; +} + +static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu) +{ + if (amd_uncore_nb) + uncore_dead(cpu, amd_uncore_nb); + + if (amd_uncore_l2) + uncore_dead(cpu, amd_uncore_l2); +} + +static int __cpuinit +amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, + void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + amd_uncore_cpu_up_prepare(cpu); + break; + + case CPU_STARTING: + amd_uncore_cpu_starting(cpu); + break; + + case CPU_ONLINE: + amd_uncore_cpu_online(cpu); + break; + + case CPU_DOWN_PREPARE: + amd_uncore_cpu_down_prepare(cpu); + break; + + case CPU_UP_CANCELED: + case CPU_DEAD: + amd_uncore_cpu_dead(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = { + .notifier_call = amd_uncore_cpu_notifier, + .priority = CPU_PRI_PERF + 1, +}; + +static void __init init_cpu_already_online(void *dummy) +{ + unsigned int cpu = smp_processor_id(); + + amd_uncore_cpu_starting(cpu); + amd_uncore_cpu_online(cpu); +} + +static int __init amd_uncore_init(void) +{ + unsigned int cpu; + int ret = -ENODEV; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return -ENODEV; + + if (!cpu_has_topoext) + return -ENODEV; + + if (cpu_has_perfctr_nb) { + amd_uncore_nb = alloc_percpu(struct amd_uncore *); + perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1); + + printk(KERN_INFO "perf: AMD NB counters detected\n"); + ret = 0; + } + + if (cpu_has_perfctr_l2) { + amd_uncore_l2 = alloc_percpu(struct amd_uncore *); + perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1); + + printk(KERN_INFO "perf: AMD L2I counters detected\n"); + ret = 0; + } + + if (ret) + return -ENODEV; + + get_online_cpus(); + /* init cpus already online before registering for hotplug notifier */ + for_each_online_cpu(cpu) { + amd_uncore_cpu_up_prepare(cpu); + smp_call_function_single(cpu, init_cpu_already_online, NULL, 1); + } + + register_cpu_notifier(&amd_uncore_cpu_notifier_block); + put_online_cpus(); + + return 0; +} +device_initcall(amd_uncore_init); diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 529c893..ffd6050 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -81,6 +81,7 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), EVENT_EXTRA_END }; @@ -101,9 +102,15 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ + INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ + INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ + INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ + INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ + INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ + INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ EVENT_CONSTRAINT_END }; @@ -132,6 +139,7 @@ static struct extra_reg intel_westmere_extra_regs[] __read_mostly = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), EVENT_EXTRA_END }; @@ -149,11 +157,34 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = }; static struct extra_reg intel_snb_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), - INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), EVENT_EXTRA_END }; +static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), + EVENT_EXTRA_END +}; + +EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); +EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); +EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2"); + +struct attribute *nhm_events_attrs[] = { + EVENT_PTR(mem_ld_nhm), + NULL, +}; + +struct attribute *snb_events_attrs[] = { + EVENT_PTR(mem_ld_snb), + EVENT_PTR(mem_st_snb), + NULL, +}; + static u64 intel_pmu_event_map(int hw_event) { return intel_perfmon_event_map[hw_event]; @@ -1388,8 +1419,11 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) if (x86_pmu.event_constraints) { for_each_event_constraint(c, x86_pmu.event_constraints) { - if ((event->hw.config & c->cmask) == c->code) + if ((event->hw.config & c->cmask) == c->code) { + /* hw.flags zeroed at initialization */ + event->hw.flags |= c->flags; return c; + } } } @@ -1434,6 +1468,7 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, static void intel_put_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { + event->hw.flags = 0; intel_put_shared_regs_event_constraints(cpuc, event); } @@ -1757,6 +1792,8 @@ static void intel_pmu_flush_branch_stack(void) PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); +PMU_FORMAT_ATTR(ldlat, "config1:0-15"); + static struct attribute *intel_arch3_formats_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, @@ -1767,6 +1804,7 @@ static struct attribute *intel_arch3_formats_attr[] = { &format_attr_cmask.attr, &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */ + &format_attr_ldlat.attr, /* PEBS load latency */ NULL, }; @@ -2027,6 +2065,8 @@ __init int intel_pmu_init(void) x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.extra_regs = intel_nehalem_extra_regs; + x86_pmu.cpu_events = nhm_events_attrs; + /* UOPS_ISSUED.STALLED_CYCLES */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); @@ -2070,6 +2110,8 @@ __init int intel_pmu_init(void) x86_pmu.extra_regs = intel_westmere_extra_regs; x86_pmu.er_flags |= ERF_HAS_RSP_1; + x86_pmu.cpu_events = nhm_events_attrs; + /* UOPS_ISSUED.STALLED_CYCLES */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); @@ -2093,11 +2135,16 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; x86_pmu.pebs_aliases = intel_pebs_aliases_snb; - x86_pmu.extra_regs = intel_snb_extra_regs; + if (boot_cpu_data.x86_model == 45) + x86_pmu.extra_regs = intel_snbep_extra_regs; + else + x86_pmu.extra_regs = intel_snb_extra_regs; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; x86_pmu.er_flags |= ERF_NO_HT_SHARING; + x86_pmu.cpu_events = snb_events_attrs; + /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); @@ -2119,11 +2166,16 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_ivb_event_constraints; x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; x86_pmu.pebs_aliases = intel_pebs_aliases_snb; - x86_pmu.extra_regs = intel_snb_extra_regs; + if (boot_cpu_data.x86_model == 62) + x86_pmu.extra_regs = intel_snbep_extra_regs; + else + x86_pmu.extra_regs = intel_snb_extra_regs; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; x86_pmu.er_flags |= ERF_NO_HT_SHARING; + x86_pmu.cpu_events = snb_events_attrs; + /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 826054a..60250f6 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -24,6 +24,130 @@ struct pebs_record_32 { */ +union intel_x86_pebs_dse { + u64 val; + struct { + unsigned int ld_dse:4; + unsigned int ld_stlb_miss:1; + unsigned int ld_locked:1; + unsigned int ld_reserved:26; + }; + struct { + unsigned int st_l1d_hit:1; + unsigned int st_reserved1:3; + unsigned int st_stlb_miss:1; + unsigned int st_locked:1; + unsigned int st_reserved2:26; + }; +}; + + +/* + * Map PEBS Load Latency Data Source encodings to generic + * memory data source information + */ +#define P(a, b) PERF_MEM_S(a, b) +#define OP_LH (P(OP, LOAD) | P(LVL, HIT)) +#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS)) + +static const u64 pebs_data_source[] = { + P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */ + OP_LH | P(LVL, L1) | P(SNOOP, NONE), /* 0x01: L1 local */ + OP_LH | P(LVL, LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */ + OP_LH | P(LVL, L2) | P(SNOOP, NONE), /* 0x03: L2 hit */ + OP_LH | P(LVL, L3) | P(SNOOP, NONE), /* 0x04: L3 hit */ + OP_LH | P(LVL, L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */ + OP_LH | P(LVL, L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */ + OP_LH | P(LVL, L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */ + OP_LH | P(LVL, REM_CCE1) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */ + OP_LH | P(LVL, REM_CCE1) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/ + OP_LH | P(LVL, LOC_RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */ + OP_LH | P(LVL, REM_RAM1) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */ + OP_LH | P(LVL, LOC_RAM) | SNOOP_NONE_MISS,/* 0x0c: L3 miss, excl */ + OP_LH | P(LVL, REM_RAM1) | SNOOP_NONE_MISS,/* 0x0d: L3 miss, excl */ + OP_LH | P(LVL, IO) | P(SNOOP, NONE), /* 0x0e: I/O */ + OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */ +}; + +static u64 precise_store_data(u64 status) +{ + union intel_x86_pebs_dse dse; + u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2); + + dse.val = status; + + /* + * bit 4: TLB access + * 1 = stored missed 2nd level TLB + * + * so it either hit the walker or the OS + * otherwise hit 2nd level TLB + */ + if (dse.st_stlb_miss) + val |= P(TLB, MISS); + else + val |= P(TLB, HIT); + + /* + * bit 0: hit L1 data cache + * if not set, then all we know is that + * it missed L1D + */ + if (dse.st_l1d_hit) + val |= P(LVL, HIT); + else + val |= P(LVL, MISS); + + /* + * bit 5: Locked prefix + */ + if (dse.st_locked) + val |= P(LOCK, LOCKED); + + return val; +} + +static u64 load_latency_data(u64 status) +{ + union intel_x86_pebs_dse dse; + u64 val; + int model = boot_cpu_data.x86_model; + int fam = boot_cpu_data.x86; + + dse.val = status; + + /* + * use the mapping table for bit 0-3 + */ + val = pebs_data_source[dse.ld_dse]; + + /* + * Nehalem models do not support TLB, Lock infos + */ + if (fam == 0x6 && (model == 26 || model == 30 + || model == 31 || model == 46)) { + val |= P(TLB, NA) | P(LOCK, NA); + return val; + } + /* + * bit 4: TLB access + * 0 = did not miss 2nd level TLB + * 1 = missed 2nd level TLB + */ + if (dse.ld_stlb_miss) + val |= P(TLB, MISS) | P(TLB, L2); + else + val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2); + + /* + * bit 5: locked prefix + */ + if (dse.ld_locked) + val |= P(LOCK, LOCKED); + + return val; +} + struct pebs_record_core { u64 flags, ip; u64 ax, bx, cx, dx; @@ -314,10 +438,11 @@ int intel_pmu_drain_bts_buffer(void) if (top <= at) return 0; + memset(®s, 0, sizeof(regs)); + ds->bts_index = ds->bts_buffer_base; perf_sample_data_init(&data, 0, event->hw.last_period); - regs.ip = 0; /* * Prepare a generic sample, i.e. fill in the invariant fields. @@ -364,7 +489,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { }; struct event_constraint intel_nehalem_pebs_event_constraints[] = { - INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ + INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ @@ -379,7 +504,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = { }; struct event_constraint intel_westmere_pebs_event_constraints[] = { - INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ + INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ @@ -399,7 +524,8 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ + INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ + INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ @@ -413,7 +539,8 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = { INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ + INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ + INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ @@ -430,8 +557,10 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event) if (x86_pmu.pebs_constraints) { for_each_event_constraint(c, x86_pmu.pebs_constraints) { - if ((event->hw.config & c->cmask) == c->code) + if ((event->hw.config & c->cmask) == c->code) { + event->hw.flags |= c->flags; return c; + } } } @@ -446,6 +575,11 @@ void intel_pmu_pebs_enable(struct perf_event *event) hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; cpuc->pebs_enabled |= 1ULL << hwc->idx; + + if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) + cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32); + else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) + cpuc->pebs_enabled |= 1ULL << 63; } void intel_pmu_pebs_disable(struct perf_event *event) @@ -558,20 +692,51 @@ static void __intel_pmu_pebs_event(struct perf_event *event, struct pt_regs *iregs, void *__pebs) { /* - * We cast to pebs_record_core since that is a subset of - * both formats and we don't use the other fields in this - * routine. + * We cast to pebs_record_nhm to get the load latency data + * if extra_reg MSR_PEBS_LD_LAT_THRESHOLD used */ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct pebs_record_core *pebs = __pebs; + struct pebs_record_nhm *pebs = __pebs; struct perf_sample_data data; struct pt_regs regs; + u64 sample_type; + int fll, fst; if (!intel_pmu_save_and_restart(event)) return; + fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; + fst = event->hw.flags & PERF_X86_EVENT_PEBS_ST; + perf_sample_data_init(&data, 0, event->hw.last_period); + data.period = event->hw.last_period; + sample_type = event->attr.sample_type; + + /* + * if PEBS-LL or PreciseStore + */ + if (fll || fst) { + if (sample_type & PERF_SAMPLE_ADDR) + data.addr = pebs->dla; + + /* + * Use latency for weight (only avail with PEBS-LL) + */ + if (fll && (sample_type & PERF_SAMPLE_WEIGHT)) + data.weight = pebs->lat; + + /* + * data.data_src encodes the data source + */ + if (sample_type & PERF_SAMPLE_DATA_SRC) { + if (fll) + data.data_src.val = load_latency_data(pebs->dse); + else + data.data_src.val = precise_store_data(pebs->dse); + } + } + /* * We use the interrupt regs as a base because the PEBS record * does not contain a full regs set, specifically it seems to @@ -729,3 +894,13 @@ void intel_ds_init(void) } } } + +void perf_restore_debug_store(void) +{ + struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); + + if (!x86_pmu.bts && !x86_pmu.pebs) + return; + + wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds); +} diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index b43200d..d0f9e5a 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -17,6 +17,9 @@ static struct event_constraint constraint_fixed = static struct event_constraint constraint_empty = EVENT_CONSTRAINT(0, 0, 0); +#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ + ((1ULL << (n)) - 1))) + DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); @@ -31,9 +34,13 @@ DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15"); DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); +DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8"); DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); +DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47"); DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); +DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22"); DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); +DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60"); DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); @@ -110,6 +117,21 @@ static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_even reg1->alloc = 0; } +static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) +{ + struct intel_uncore_extra_reg *er; + unsigned long flags; + u64 config; + + er = &box->shared_regs[idx]; + + raw_spin_lock_irqsave(&er->lock, flags); + config = er->config; + raw_spin_unlock_irqrestore(&er->lock, flags); + + return config; +} + /* Sandy Bridge-EP uncore support */ static struct intel_uncore_type snbep_uncore_cbox; static struct intel_uncore_type snbep_uncore_pcu; @@ -205,7 +227,7 @@ static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct p struct hw_perf_event_extra *reg1 = &hwc->extra_reg; if (reg1->idx != EXTRA_REG_NONE) - wrmsrl(reg1->reg, reg1->config); + wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0)); wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); } @@ -226,29 +248,6 @@ static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); } -static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - - if (box->pmu->type == &snbep_uncore_cbox) { - reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + - SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; - reg1->config = event->attr.config1 & - SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK; - } else { - if (box->pmu->type == &snbep_uncore_pcu) { - reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; - reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK; - } else { - return 0; - } - } - reg1->idx = 0; - - return 0; -} - static struct attribute *snbep_uncore_formats_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, @@ -345,16 +344,16 @@ static struct attribute_group snbep_uncore_qpi_format_group = { .attrs = snbep_uncore_qpi_formats_attr, }; +#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ + .init_box = snbep_uncore_msr_init_box, \ + .disable_box = snbep_uncore_msr_disable_box, \ + .enable_box = snbep_uncore_msr_enable_box, \ + .disable_event = snbep_uncore_msr_disable_event, \ + .enable_event = snbep_uncore_msr_enable_event, \ + .read_counter = uncore_msr_read_counter + static struct intel_uncore_ops snbep_uncore_msr_ops = { - .init_box = snbep_uncore_msr_init_box, - .disable_box = snbep_uncore_msr_disable_box, - .enable_box = snbep_uncore_msr_enable_box, - .disable_event = snbep_uncore_msr_disable_event, - .enable_event = snbep_uncore_msr_enable_event, - .read_counter = uncore_msr_read_counter, - .get_constraint = uncore_get_constraint, - .put_constraint = uncore_put_constraint, - .hw_config = snbep_uncore_hw_config, + SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), }; static struct intel_uncore_ops snbep_uncore_pci_ops = { @@ -372,6 +371,7 @@ static struct event_constraint snbep_uncore_cbox_constraints[] = { UNCORE_EVENT_CONSTRAINT(0x04, 0x3), UNCORE_EVENT_CONSTRAINT(0x05, 0x3), UNCORE_EVENT_CONSTRAINT(0x07, 0x3), + UNCORE_EVENT_CONSTRAINT(0x09, 0x3), UNCORE_EVENT_CONSTRAINT(0x11, 0x1), UNCORE_EVENT_CONSTRAINT(0x12, 0x3), UNCORE_EVENT_CONSTRAINT(0x13, 0x3), @@ -421,6 +421,14 @@ static struct event_constraint snbep_uncore_r3qpi_constraints[] = { UNCORE_EVENT_CONSTRAINT(0x24, 0x3), UNCORE_EVENT_CONSTRAINT(0x25, 0x3), UNCORE_EVENT_CONSTRAINT(0x26, 0x3), + UNCORE_EVENT_CONSTRAINT(0x28, 0x3), + UNCORE_EVENT_CONSTRAINT(0x29, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2a, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2b, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), UNCORE_EVENT_CONSTRAINT(0x30, 0x3), UNCORE_EVENT_CONSTRAINT(0x31, 0x3), UNCORE_EVENT_CONSTRAINT(0x32, 0x3), @@ -428,6 +436,8 @@ static struct event_constraint snbep_uncore_r3qpi_constraints[] = { UNCORE_EVENT_CONSTRAINT(0x34, 0x3), UNCORE_EVENT_CONSTRAINT(0x36, 0x3), UNCORE_EVENT_CONSTRAINT(0x37, 0x3), + UNCORE_EVENT_CONSTRAINT(0x38, 0x3), + UNCORE_EVENT_CONSTRAINT(0x39, 0x3), EVENT_CONSTRAINT_END }; @@ -446,6 +456,145 @@ static struct intel_uncore_type snbep_uncore_ubox = { .format_group = &snbep_uncore_ubox_format_group, }; +static struct extra_reg snbep_uncore_cbox_extra_regs[] = { + SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, + SNBEP_CBO_PMON_CTL_TID_EN, 0x1), + SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), + SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc), + SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc), + SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc), + SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc), + SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2), + EVENT_EXTRA_END +}; + +static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct intel_uncore_extra_reg *er = &box->shared_regs[0]; + int i; + + if (uncore_box_is_fake(box)) + return; + + for (i = 0; i < 5; i++) { + if (reg1->alloc & (0x1 << i)) + atomic_sub(1 << (i * 6), &er->ref); + } + reg1->alloc = 0; +} + +static struct event_constraint * +__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event, + u64 (*cbox_filter_mask)(int fields)) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct intel_uncore_extra_reg *er = &box->shared_regs[0]; + int i, alloc = 0; + unsigned long flags; + u64 mask; + + if (reg1->idx == EXTRA_REG_NONE) + return NULL; + + raw_spin_lock_irqsave(&er->lock, flags); + for (i = 0; i < 5; i++) { + if (!(reg1->idx & (0x1 << i))) + continue; + if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) + continue; + + mask = cbox_filter_mask(0x1 << i); + if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) || + !((reg1->config ^ er->config) & mask)) { + atomic_add(1 << (i * 6), &er->ref); + er->config &= ~mask; + er->config |= reg1->config & mask; + alloc |= (0x1 << i); + } else { + break; + } + } + raw_spin_unlock_irqrestore(&er->lock, flags); + if (i < 5) + goto fail; + + if (!uncore_box_is_fake(box)) + reg1->alloc |= alloc; + + return 0; +fail: + for (; i >= 0; i--) { + if (alloc & (0x1 << i)) + atomic_sub(1 << (i * 6), &er->ref); + } + return &constraint_empty; +} + +static u64 snbep_cbox_filter_mask(int fields) +{ + u64 mask = 0; + + if (fields & 0x1) + mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID; + if (fields & 0x2) + mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID; + if (fields & 0x4) + mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE; + if (fields & 0x8) + mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC; + + return mask; +} + +static struct event_constraint * +snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask); +} + +static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct extra_reg *er; + int idx = 0; + + for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) { + if (er->event != (event->hw.config & er->config_mask)) + continue; + idx |= er->idx; + } + + if (idx) { + reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + + SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; + reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx); + reg1->idx = idx; + } + return 0; +} + +static struct intel_uncore_ops snbep_uncore_cbox_ops = { + SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), + .hw_config = snbep_cbox_hw_config, + .get_constraint = snbep_cbox_get_constraint, + .put_constraint = snbep_cbox_put_constraint, +}; + static struct intel_uncore_type snbep_uncore_cbox = { .name = "cbox", .num_counters = 4, @@ -458,10 +607,104 @@ static struct intel_uncore_type snbep_uncore_cbox = { .msr_offset = SNBEP_CBO_MSR_OFFSET, .num_shared_regs = 1, .constraints = snbep_uncore_cbox_constraints, - .ops = &snbep_uncore_msr_ops, + .ops = &snbep_uncore_cbox_ops, .format_group = &snbep_uncore_cbox_format_group, }; +static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + u64 config = reg1->config; + + if (new_idx > reg1->idx) + config <<= 8 * (new_idx - reg1->idx); + else + config >>= 8 * (reg1->idx - new_idx); + + if (modify) { + hwc->config += new_idx - reg1->idx; + reg1->config = config; + reg1->idx = new_idx; + } + return config; +} + +static struct event_constraint * +snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct intel_uncore_extra_reg *er = &box->shared_regs[0]; + unsigned long flags; + int idx = reg1->idx; + u64 mask, config1 = reg1->config; + bool ok = false; + + if (reg1->idx == EXTRA_REG_NONE || + (!uncore_box_is_fake(box) && reg1->alloc)) + return NULL; +again: + mask = 0xff << (idx * 8); + raw_spin_lock_irqsave(&er->lock, flags); + if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) || + !((config1 ^ er->config) & mask)) { + atomic_add(1 << (idx * 8), &er->ref); + er->config &= ~mask; + er->config |= config1 & mask; + ok = true; + } + raw_spin_unlock_irqrestore(&er->lock, flags); + + if (!ok) { + idx = (idx + 1) % 4; + if (idx != reg1->idx) { + config1 = snbep_pcu_alter_er(event, idx, false); + goto again; + } + return &constraint_empty; + } + + if (!uncore_box_is_fake(box)) { + if (idx != reg1->idx) + snbep_pcu_alter_er(event, idx, true); + reg1->alloc = 1; + } + return NULL; +} + +static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct intel_uncore_extra_reg *er = &box->shared_regs[0]; + + if (uncore_box_is_fake(box) || !reg1->alloc) + return; + + atomic_sub(1 << (reg1->idx * 8), &er->ref); + reg1->alloc = 0; +} + +static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; + + if (ev_sel >= 0xb && ev_sel <= 0xe) { + reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; + reg1->idx = ev_sel - 0xb; + reg1->config = event->attr.config1 & (0xff << reg1->idx); + } + return 0; +} + +static struct intel_uncore_ops snbep_uncore_pcu_ops = { + SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), + .hw_config = snbep_pcu_hw_config, + .get_constraint = snbep_pcu_get_constraint, + .put_constraint = snbep_pcu_put_constraint, +}; + static struct intel_uncore_type snbep_uncore_pcu = { .name = "pcu", .num_counters = 4, @@ -472,7 +715,7 @@ static struct intel_uncore_type snbep_uncore_pcu = { .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, .num_shared_regs = 1, - .ops = &snbep_uncore_msr_ops, + .ops = &snbep_uncore_pcu_ops, .format_group = &snbep_uncore_pcu_format_group, }; @@ -544,55 +787,63 @@ static struct intel_uncore_type snbep_uncore_r3qpi = { SNBEP_UNCORE_PCI_COMMON_INIT(), }; +enum { + SNBEP_PCI_UNCORE_HA, + SNBEP_PCI_UNCORE_IMC, + SNBEP_PCI_UNCORE_QPI, + SNBEP_PCI_UNCORE_R2PCIE, + SNBEP_PCI_UNCORE_R3QPI, +}; + static struct intel_uncore_type *snbep_pci_uncores[] = { - &snbep_uncore_ha, - &snbep_uncore_imc, - &snbep_uncore_qpi, - &snbep_uncore_r2pcie, - &snbep_uncore_r3qpi, + [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha, + [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc, + [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi, + [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie, + [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi, NULL, }; static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = { { /* Home Agent */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), - .driver_data = (unsigned long)&snbep_uncore_ha, + .driver_data = SNBEP_PCI_UNCORE_HA, }, { /* MC Channel 0 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), - .driver_data = (unsigned long)&snbep_uncore_imc, + .driver_data = SNBEP_PCI_UNCORE_IMC, }, { /* MC Channel 1 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), - .driver_data = (unsigned long)&snbep_uncore_imc, + .driver_data = SNBEP_PCI_UNCORE_IMC, }, { /* MC Channel 2 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), - .driver_data = (unsigned long)&snbep_uncore_imc, + .driver_data = SNBEP_PCI_UNCORE_IMC, }, { /* MC Channel 3 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), - .driver_data = (unsigned long)&snbep_uncore_imc, + .driver_data = SNBEP_PCI_UNCORE_IMC, }, { /* QPI Port 0 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), - .driver_data = (unsigned long)&snbep_uncore_qpi, + .driver_data = SNBEP_PCI_UNCORE_QPI, }, { /* QPI Port 1 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), - .driver_data = (unsigned long)&snbep_uncore_qpi, + .driver_data = SNBEP_PCI_UNCORE_QPI, }, - { /* P2PCIe */ + { /* R2PCIe */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), - .driver_data = (unsigned long)&snbep_uncore_r2pcie, + .driver_data = SNBEP_PCI_UNCORE_R2PCIE, }, { /* R3QPI Link 0 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), - .driver_data = (unsigned long)&snbep_uncore_r3qpi, + .driver_data = SNBEP_PCI_UNCORE_R3QPI, }, { /* R3QPI Link 1 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), - .driver_data = (unsigned long)&snbep_uncore_r3qpi, + .driver_data = SNBEP_PCI_UNCORE_R3QPI, }, { /* end: all zeroes */ } }; @@ -605,7 +856,7 @@ static struct pci_driver snbep_uncore_pci_driver = { /* * build pci bus to socket mapping */ -static int snbep_pci2phy_map_init(void) +static int snbep_pci2phy_map_init(int devid) { struct pci_dev *ubox_dev = NULL; int i, bus, nodeid; @@ -614,9 +865,7 @@ static int snbep_pci2phy_map_init(void) while (1) { /* find the UBOX device */ - ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX, - ubox_dev); + ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev); if (!ubox_dev) break; bus = ubox_dev->bus->number; @@ -639,7 +888,7 @@ static int snbep_pci2phy_map_init(void) break; } } - }; + } if (ubox_dev) pci_dev_put(ubox_dev); @@ -648,6 +897,440 @@ static int snbep_pci2phy_map_init(void) } /* end of Sandy Bridge-EP uncore support */ +/* IvyTown uncore support */ +static void ivt_uncore_msr_init_box(struct intel_uncore_box *box) +{ + unsigned msr = uncore_msr_box_ctl(box); + if (msr) + wrmsrl(msr, IVT_PMON_BOX_CTL_INT); +} + +static void ivt_uncore_pci_init_box(struct intel_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + + pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT); +} + +#define IVT_UNCORE_MSR_OPS_COMMON_INIT() \ + .init_box = ivt_uncore_msr_init_box, \ + .disable_box = snbep_uncore_msr_disable_box, \ + .enable_box = snbep_uncore_msr_enable_box, \ + .disable_event = snbep_uncore_msr_disable_event, \ + .enable_event = snbep_uncore_msr_enable_event, \ + .read_counter = uncore_msr_read_counter + +static struct intel_uncore_ops ivt_uncore_msr_ops = { + IVT_UNCORE_MSR_OPS_COMMON_INIT(), +}; + +static struct intel_uncore_ops ivt_uncore_pci_ops = { + .init_box = ivt_uncore_pci_init_box, + .disable_box = snbep_uncore_pci_disable_box, + .enable_box = snbep_uncore_pci_enable_box, + .disable_event = snbep_uncore_pci_disable_event, + .enable_event = snbep_uncore_pci_enable_event, + .read_counter = snbep_uncore_pci_read_counter, +}; + +#define IVT_UNCORE_PCI_COMMON_INIT() \ + .perf_ctr = SNBEP_PCI_PMON_CTR0, \ + .event_ctl = SNBEP_PCI_PMON_CTL0, \ + .event_mask = IVT_PMON_RAW_EVENT_MASK, \ + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ + .ops = &ivt_uncore_pci_ops, \ + .format_group = &ivt_uncore_format_group + +static struct attribute *ivt_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute *ivt_uncore_ubox_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh5.attr, + NULL, +}; + +static struct attribute *ivt_uncore_cbox_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_tid_en.attr, + &format_attr_thresh8.attr, + &format_attr_filter_tid.attr, + &format_attr_filter_link.attr, + &format_attr_filter_state2.attr, + &format_attr_filter_nid2.attr, + &format_attr_filter_opc2.attr, + NULL, +}; + +static struct attribute *ivt_uncore_pcu_formats_attr[] = { + &format_attr_event_ext.attr, + &format_attr_occ_sel.attr, + &format_attr_edge.attr, + &format_attr_thresh5.attr, + &format_attr_occ_invert.attr, + &format_attr_occ_edge.attr, + &format_attr_filter_band0.attr, + &format_attr_filter_band1.attr, + &format_attr_filter_band2.attr, + &format_attr_filter_band3.attr, + NULL, +}; + +static struct attribute *ivt_uncore_qpi_formats_attr[] = { + &format_attr_event_ext.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute_group ivt_uncore_format_group = { + .name = "format", + .attrs = ivt_uncore_formats_attr, +}; + +static struct attribute_group ivt_uncore_ubox_format_group = { + .name = "format", + .attrs = ivt_uncore_ubox_formats_attr, +}; + +static struct attribute_group ivt_uncore_cbox_format_group = { + .name = "format", + .attrs = ivt_uncore_cbox_formats_attr, +}; + +static struct attribute_group ivt_uncore_pcu_format_group = { + .name = "format", + .attrs = ivt_uncore_pcu_formats_attr, +}; + +static struct attribute_group ivt_uncore_qpi_format_group = { + .name = "format", + .attrs = ivt_uncore_qpi_formats_attr, +}; + +static struct intel_uncore_type ivt_uncore_ubox = { + .name = "ubox", + .num_counters = 2, + .num_boxes = 1, + .perf_ctr_bits = 44, + .fixed_ctr_bits = 48, + .perf_ctr = SNBEP_U_MSR_PMON_CTR0, + .event_ctl = SNBEP_U_MSR_PMON_CTL0, + .event_mask = IVT_U_MSR_PMON_RAW_EVENT_MASK, + .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, + .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, + .ops = &ivt_uncore_msr_ops, + .format_group = &ivt_uncore_ubox_format_group, +}; + +static struct extra_reg ivt_uncore_cbox_extra_regs[] = { + SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, + SNBEP_CBO_PMON_CTL_TID_EN, 0x1), + SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), + SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18), + SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18), + SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), + SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), + SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8), + EVENT_EXTRA_END +}; + +static u64 ivt_cbox_filter_mask(int fields) +{ + u64 mask = 0; + + if (fields & 0x1) + mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID; + if (fields & 0x2) + mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK; + if (fields & 0x4) + mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE; + if (fields & 0x8) + mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID; + if (fields & 0x10) + mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC; + + return mask; +} + +static struct event_constraint * +ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask); +} + +static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct extra_reg *er; + int idx = 0; + + for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) { + if (er->event != (event->hw.config & er->config_mask)) + continue; + idx |= er->idx; + } + + if (idx) { + reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + + SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; + reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx); + reg1->idx = idx; + } + return 0; +} + +static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + + if (reg1->idx != EXTRA_REG_NONE) { + u64 filter = uncore_shared_reg_config(box, 0); + wrmsrl(reg1->reg, filter & 0xffffffff); + wrmsrl(reg1->reg + 6, filter >> 32); + } + + wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); +} + +static struct intel_uncore_ops ivt_uncore_cbox_ops = { + .init_box = ivt_uncore_msr_init_box, + .disable_box = snbep_uncore_msr_disable_box, + .enable_box = snbep_uncore_msr_enable_box, + .disable_event = snbep_uncore_msr_disable_event, + .enable_event = ivt_cbox_enable_event, + .read_counter = uncore_msr_read_counter, + .hw_config = ivt_cbox_hw_config, + .get_constraint = ivt_cbox_get_constraint, + .put_constraint = snbep_cbox_put_constraint, +}; + +static struct intel_uncore_type ivt_uncore_cbox = { + .name = "cbox", + .num_counters = 4, + .num_boxes = 15, + .perf_ctr_bits = 44, + .event_ctl = SNBEP_C0_MSR_PMON_CTL0, + .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, + .event_mask = IVT_CBO_MSR_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, + .msr_offset = SNBEP_CBO_MSR_OFFSET, + .num_shared_regs = 1, + .constraints = snbep_uncore_cbox_constraints, + .ops = &ivt_uncore_cbox_ops, + .format_group = &ivt_uncore_cbox_format_group, +}; + +static struct intel_uncore_ops ivt_uncore_pcu_ops = { + IVT_UNCORE_MSR_OPS_COMMON_INIT(), + .hw_config = snbep_pcu_hw_config, + .get_constraint = snbep_pcu_get_constraint, + .put_constraint = snbep_pcu_put_constraint, +}; + +static struct intel_uncore_type ivt_uncore_pcu = { + .name = "pcu", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, + .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, + .event_mask = IVT_PCU_MSR_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, + .num_shared_regs = 1, + .ops = &ivt_uncore_pcu_ops, + .format_group = &ivt_uncore_pcu_format_group, +}; + +static struct intel_uncore_type *ivt_msr_uncores[] = { + &ivt_uncore_ubox, + &ivt_uncore_cbox, + &ivt_uncore_pcu, + NULL, +}; + +static struct intel_uncore_type ivt_uncore_ha = { + .name = "ha", + .num_counters = 4, + .num_boxes = 2, + .perf_ctr_bits = 48, + IVT_UNCORE_PCI_COMMON_INIT(), +}; + +static struct intel_uncore_type ivt_uncore_imc = { + .name = "imc", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, + .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, + IVT_UNCORE_PCI_COMMON_INIT(), +}; + +static struct intel_uncore_type ivt_uncore_qpi = { + .name = "qpi", + .num_counters = 4, + .num_boxes = 3, + .perf_ctr_bits = 48, + .perf_ctr = SNBEP_PCI_PMON_CTR0, + .event_ctl = SNBEP_PCI_PMON_CTL0, + .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, + .ops = &ivt_uncore_pci_ops, + .format_group = &ivt_uncore_qpi_format_group, +}; + +static struct intel_uncore_type ivt_uncore_r2pcie = { + .name = "r2pcie", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 44, + .constraints = snbep_uncore_r2pcie_constraints, + IVT_UNCORE_PCI_COMMON_INIT(), +}; + +static struct intel_uncore_type ivt_uncore_r3qpi = { + .name = "r3qpi", + .num_counters = 3, + .num_boxes = 2, + .perf_ctr_bits = 44, + .constraints = snbep_uncore_r3qpi_constraints, + IVT_UNCORE_PCI_COMMON_INIT(), +}; + +enum { + IVT_PCI_UNCORE_HA, + IVT_PCI_UNCORE_IMC, + IVT_PCI_UNCORE_QPI, + IVT_PCI_UNCORE_R2PCIE, + IVT_PCI_UNCORE_R3QPI, +}; + +static struct intel_uncore_type *ivt_pci_uncores[] = { + [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha, + [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc, + [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi, + [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie, + [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi, + NULL, +}; + +static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = { + { /* Home Agent 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30), + .driver_data = IVT_PCI_UNCORE_HA, + }, + { /* Home Agent 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38), + .driver_data = IVT_PCI_UNCORE_HA, + }, + { /* MC0 Channel 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4), + .driver_data = IVT_PCI_UNCORE_IMC, + }, + { /* MC0 Channel 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5), + .driver_data = IVT_PCI_UNCORE_IMC, + }, + { /* MC0 Channel 3 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0), + .driver_data = IVT_PCI_UNCORE_IMC, + }, + { /* MC0 Channel 4 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1), + .driver_data = IVT_PCI_UNCORE_IMC, + }, + { /* MC1 Channel 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4), + .driver_data = IVT_PCI_UNCORE_IMC, + }, + { /* MC1 Channel 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5), + .driver_data = IVT_PCI_UNCORE_IMC, + }, + { /* MC1 Channel 3 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0), + .driver_data = IVT_PCI_UNCORE_IMC, + }, + { /* MC1 Channel 4 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1), + .driver_data = IVT_PCI_UNCORE_IMC, + }, + { /* QPI0 Port 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32), + .driver_data = IVT_PCI_UNCORE_QPI, + }, + { /* QPI0 Port 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33), + .driver_data = IVT_PCI_UNCORE_QPI, + }, + { /* QPI1 Port 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a), + .driver_data = IVT_PCI_UNCORE_QPI, + }, + { /* R2PCIe */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34), + .driver_data = IVT_PCI_UNCORE_R2PCIE, + }, + { /* R3QPI0 Link 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36), + .driver_data = IVT_PCI_UNCORE_R3QPI, + }, + { /* R3QPI0 Link 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37), + .driver_data = IVT_PCI_UNCORE_R3QPI, + }, + { /* R3QPI1 Link 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e), + .driver_data = IVT_PCI_UNCORE_R3QPI, + }, + { /* end: all zeroes */ } +}; + +static struct pci_driver ivt_uncore_pci_driver = { + .name = "ivt_uncore", + .id_table = ivt_uncore_pci_ids, +}; +/* end of IvyTown uncore support */ + /* Sandy Bridge uncore support */ static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) { @@ -808,9 +1491,6 @@ static struct intel_uncore_type *nhm_msr_uncores[] = { /* end of Nehalem uncore support */ /* Nehalem-EX uncore support */ -#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ - ((1ULL << (n)) - 1))) - DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); @@ -1161,7 +1841,7 @@ static struct extra_reg nhmex_uncore_mbox_extra_regs[] = { }; /* Nehalem-EX or Westmere-EX ? */ -bool uncore_nhmex; +static bool uncore_nhmex; static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) { @@ -1239,7 +1919,7 @@ static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx) atomic_sub(1 << (idx * 8), &er->ref); } -u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) +static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) { struct hw_perf_event *hwc = &event->hw; struct hw_perf_event_extra *reg1 = &hwc->extra_reg; @@ -1554,7 +2234,7 @@ static struct intel_uncore_type nhmex_uncore_mbox = { .format_group = &nhmex_uncore_mbox_format_group, }; -void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) +static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; struct hw_perf_event_extra *reg1 = &hwc->extra_reg; @@ -1724,21 +2404,6 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event return 0; } -static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx) -{ - struct intel_uncore_extra_reg *er; - unsigned long flags; - u64 config; - - er = &box->shared_regs[idx]; - - raw_spin_lock_irqsave(&er->lock, flags); - config = er->config; - raw_spin_unlock_irqrestore(&er->lock, flags); - - return config; -} - static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -1759,7 +2424,7 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per case 2: case 3: wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), - nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5)); + uncore_shared_reg_config(box, 2 + (idx / 6) * 5)); break; case 4: wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), @@ -2285,7 +2950,7 @@ out: return ret; } -int uncore_pmu_event_init(struct perf_event *event) +static int uncore_pmu_event_init(struct perf_event *event) { struct intel_uncore_pmu *pmu; struct intel_uncore_box *box; @@ -2438,7 +3103,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type) type->unconstrainted = (struct event_constraint) __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, - 0, type->num_counters, 0); + 0, type->num_counters, 0, 0); for (i = 0; i < type->num_boxes; i++) { pmus[i].func_id = -1; @@ -2556,6 +3221,8 @@ static void uncore_pci_remove(struct pci_dev *pdev) if (WARN_ON_ONCE(phys_id != box->phys_id)) return; + pci_set_drvdata(pdev, NULL); + raw_spin_lock(&uncore_box_lock); list_del(&box->list); raw_spin_unlock(&uncore_box_lock); @@ -2574,11 +3241,7 @@ static void uncore_pci_remove(struct pci_dev *pdev) static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - struct intel_uncore_type *type; - - type = (struct intel_uncore_type *)id->driver_data; - - return uncore_pci_add(type, pdev); + return uncore_pci_add(pci_uncores[id->driver_data], pdev); } static int __init uncore_pci_init(void) @@ -2587,12 +3250,19 @@ static int __init uncore_pci_init(void) switch (boot_cpu_data.x86_model) { case 45: /* Sandy Bridge-EP */ - ret = snbep_pci2phy_map_init(); + ret = snbep_pci2phy_map_init(0x3ce0); if (ret) return ret; pci_uncores = snbep_pci_uncores; uncore_pci_driver = &snbep_uncore_pci_driver; break; + case 62: /* IvyTown */ + ret = snbep_pci2phy_map_init(0x0e1e); + if (ret) + return ret; + pci_uncores = ivt_pci_uncores; + uncore_pci_driver = &ivt_uncore_pci_driver; + break; default: return 0; } @@ -2622,6 +3292,21 @@ static void __init uncore_pci_exit(void) } } +/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */ +static LIST_HEAD(boxes_to_free); + +static void __cpuinit uncore_kfree_boxes(void) +{ + struct intel_uncore_box *box; + + while (!list_empty(&boxes_to_free)) { + box = list_entry(boxes_to_free.next, + struct intel_uncore_box, list); + list_del(&box->list); + kfree(box); + } +} + static void __cpuinit uncore_cpu_dying(int cpu) { struct intel_uncore_type *type; @@ -2636,7 +3321,7 @@ static void __cpuinit uncore_cpu_dying(int cpu) box = *per_cpu_ptr(pmu->box, cpu); *per_cpu_ptr(pmu->box, cpu) = NULL; if (box && atomic_dec_and_test(&box->refcnt)) - kfree(box); + list_add(&box->list, &boxes_to_free); } } } @@ -2666,8 +3351,11 @@ static int __cpuinit uncore_cpu_starting(int cpu) if (exist && exist->phys_id == phys_id) { atomic_inc(&exist->refcnt); *per_cpu_ptr(pmu->box, cpu) = exist; - kfree(box); - box = NULL; + if (box) { + list_add(&box->list, + &boxes_to_free); + box = NULL; + } break; } } @@ -2806,6 +3494,10 @@ static int case CPU_DYING: uncore_cpu_dying(cpu); break; + case CPU_ONLINE: + case CPU_DEAD: + uncore_kfree_boxes(); + break; default: break; } @@ -2871,6 +3563,12 @@ static int __init uncore_cpu_init(void) nhmex_uncore_cbox.num_boxes = max_cores; msr_uncores = nhmex_msr_uncores; break; + case 62: /* IvyTown */ + if (ivt_uncore_cbox.num_boxes > max_cores) + ivt_uncore_cbox.num_boxes = max_cores; + msr_uncores = ivt_msr_uncores; + break; + default: return 0; } diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index e68a455..f952891 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -76,7 +76,7 @@ #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00 #define SNBEP_PMON_CTL_RST (1 << 17) #define SNBEP_PMON_CTL_EDGE_DET (1 << 18) -#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) /* only for QPI */ +#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) #define SNBEP_PMON_CTL_EN (1 << 22) #define SNBEP_PMON_CTL_INVERT (1 << 23) #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000 @@ -148,9 +148,20 @@ #define SNBEP_C0_MSR_PMON_CTL0 0xd10 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14 -#define SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK 0xfffffc1f #define SNBEP_CBO_MSR_OFFSET 0x20 +#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f +#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00 +#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000 +#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000 + +#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \ + .event = (e), \ + .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \ + .config_mask = (m), \ + .idx = (i) \ +} + /* SNB-EP PCU register */ #define SNBEP_PCU_MSR_PMON_CTR0 0xc36 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30 @@ -160,6 +171,55 @@ #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd +/* IVT event control */ +#define IVT_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ + SNBEP_PMON_BOX_CTL_RST_CTRS) +#define IVT_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ + SNBEP_PMON_CTL_UMASK_MASK | \ + SNBEP_PMON_CTL_EDGE_DET | \ + SNBEP_PMON_CTL_TRESH_MASK) +/* IVT Ubox */ +#define IVT_U_MSR_PMON_GLOBAL_CTL 0xc00 +#define IVT_U_PMON_GLOBAL_FRZ_ALL (1 << 31) +#define IVT_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29) + +#define IVT_U_MSR_PMON_RAW_EVENT_MASK \ + (SNBEP_PMON_CTL_EV_SEL_MASK | \ + SNBEP_PMON_CTL_UMASK_MASK | \ + SNBEP_PMON_CTL_EDGE_DET | \ + SNBEP_U_MSR_PMON_CTL_TRESH_MASK) +/* IVT Cbo */ +#define IVT_CBO_MSR_PMON_RAW_EVENT_MASK (IVT_PMON_RAW_EVENT_MASK | \ + SNBEP_CBO_PMON_CTL_TID_EN) + +#define IVT_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0) +#define IVT_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5) +#define IVT_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17) +#define IVT_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32) +#define IVT_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52) +#define IVT_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) +#define IVT_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) +#define IVT_CB0_MSR_PMON_BOX_FILTER_IOSC (0x1ULL << 63) + +/* IVT home agent */ +#define IVT_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16) +#define IVT_HA_PCI_PMON_RAW_EVENT_MASK \ + (IVT_PMON_RAW_EVENT_MASK | \ + IVT_HA_PCI_PMON_CTL_Q_OCC_RST) +/* IVT PCU */ +#define IVT_PCU_MSR_PMON_RAW_EVENT_MASK \ + (SNBEP_PMON_CTL_EV_SEL_MASK | \ + SNBEP_PMON_CTL_EV_SEL_EXT | \ + SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ + SNBEP_PMON_CTL_EDGE_DET | \ + SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ + SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ + SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) +/* IVT QPI */ +#define IVT_QPI_PCI_PMON_RAW_EVENT_MASK \ + (IVT_PMON_RAW_EVENT_MASK | \ + SNBEP_PMON_CTL_EV_SEL_EXT) + /* NHM-EX event control */ #define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff #define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00 diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c index 4b7731b..838fa87 100644 --- a/arch/x86/kernel/cpu/perf_event_knc.c +++ b/arch/x86/kernel/cpu/perf_event_knc.c @@ -17,7 +17,7 @@ static const u64 knc_perfmon_event_map[] = [PERF_COUNT_HW_BRANCH_MISSES] = 0x002b, }; -static __initconst u64 knc_hw_cache_event_ids +static const u64 __initconst knc_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = @@ -284,7 +284,7 @@ static struct attribute *intel_knc_formats_attr[] = { NULL, }; -static __initconst struct x86_pmu knc_pmu = { +static const struct x86_pmu knc_pmu __initconst = { .name = "knc", .handle_irq = knc_pmu_handle_irq, .disable_all = knc_pmu_disable_all, diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 92c7e39..3486e66 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void) * So at moment let leave metrics turned on forever -- it's * ok for now but need to be revisited! * - * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0); - * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0); + * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, 0); + * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, 0); */ } @@ -910,8 +910,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event) * asserted again and again */ (void)wrmsrl_safe(hwc->config_base, - (u64)(p4_config_unpack_cccr(hwc->config)) & - ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); + p4_config_unpack_cccr(hwc->config) & ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); } static void p4_pmu_disable_all(void) @@ -957,7 +956,7 @@ static void p4_pmu_enable_event(struct perf_event *event) u64 escr_addr, cccr; bind = &p4_event_bind_map[idx]; - escr_addr = (u64)bind->escr_msr[thread]; + escr_addr = bind->escr_msr[thread]; /* * - we dont support cascaded counters yet diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index 4820c23..b1e2fe1 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c @@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] = }; -static u64 p6_hw_cache_event_ids +static const u64 __initconst p6_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index e280253..37a198b 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -34,9 +34,9 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) "fpu_exception\t: %s\n" "cpuid level\t: %d\n" "wp\t\t: %s\n", - c->fdiv_bug ? "yes" : "no", - c->f00f_bug ? "yes" : "no", - c->coma_bug ? "yes" : "no", + static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no", + static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no", + static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no", c->hard_math ? "yes" : "no", c->hard_math ? "yes" : "no", c->cpuid_level, diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index ee8e9ab..d92b5da 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -39,8 +39,9 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, { X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 }, - { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, + { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, + { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 }, { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 }, { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 }, { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 }, diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c index 37250fe..155a13f 100644 --- a/arch/x86/kernel/doublefault_32.c +++ b/arch/x86/kernel/doublefault_32.c @@ -20,7 +20,7 @@ static void doublefault_fn(void) struct desc_ptr gdt_desc = {0, 0}; unsigned long gdt, tss; - store_gdt(&gdt_desc); + native_store_gdt(&gdt_desc); gdt = gdt_desc.address; printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index c8797d5..deb6421 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -176,26 +176,20 @@ void show_trace(struct task_struct *task, struct pt_regs *regs, void show_stack(struct task_struct *task, unsigned long *sp) { - show_stack_log_lvl(task, NULL, sp, 0, ""); -} - -/* - * The architecture-independent dump_stack generator - */ -void dump_stack(void) -{ - unsigned long bp; + unsigned long bp = 0; unsigned long stack; - bp = stack_frame(current, NULL); - printk("Pid: %d, comm: %.20s %s %s %.*s\n", - current->pid, current->comm, print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); - show_trace(NULL, NULL, &stack, bp); + /* + * Stack frames below this one aren't interesting. Don't show them + * if we're printing for %current. + */ + if (!sp && (!task || task == current)) { + sp = &stack; + bp = stack_frame(current, NULL); + } + + show_stack_log_lvl(task, NULL, sp, bp, ""); } -EXPORT_SYMBOL(dump_stack); static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; static int die_owner = -1; diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index 1038a41..f2a1770 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -86,11 +86,9 @@ void show_regs(struct pt_regs *regs) { int i; + show_regs_print_info(KERN_EMERG); __show_regs(regs, !user_mode_vm(regs)); - pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n", - TASK_COMM_LEN, current->comm, task_pid_nr(current), - current_thread_info(), current, task_thread_info(current)); /* * When in-kernel, we also print out the stack and code at the * time of the fault.. diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index b653675..addb207 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -249,14 +249,10 @@ void show_regs(struct pt_regs *regs) { int i; unsigned long sp; - const int cpu = smp_processor_id(); - struct task_struct *cur = current; sp = regs->sp; - printk("CPU %d ", cpu); + show_regs_print_info(KERN_DEFAULT); __show_regs(regs, 1); - printk(KERN_DEFAULT "Process %s (pid: %d, threadinfo %p, task %p)\n", - cur->comm, cur->pid, task_thread_info(cur), cur); /* * When in-kernel, we also print out the stack and code at the diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 9b9f18b..d15f575 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -169,25 +169,9 @@ static struct console early_serial_console = { .index = -1, }; -/* Direct interface for emergencies */ -static struct console *early_console = &early_vga_console; -static int __initdata early_console_initialized; - -asmlinkage void early_printk(const char *fmt, ...) -{ - char buf[512]; - int n; - va_list ap; - - va_start(ap, fmt); - n = vscnprintf(buf, sizeof(buf), fmt, ap); - early_console->write(early_console, buf, n); - va_end(ap); -} - static inline void early_console_register(struct console *con, int keep_early) { - if (early_console->index != -1) { + if (con->index != -1) { printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n", con->name); return; @@ -207,9 +191,8 @@ static int __init setup_early_printk(char *buf) if (!buf) return 0; - if (early_console_initialized) + if (early_console) return 0; - early_console_initialized = 1; keep = (strstr(buf, "keep") != NULL); diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index c5e403f..101ac1a9 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -144,10 +144,10 @@ void __init x86_64_start_kernel(char * real_mode_data) * Build-time sanity checks on the kernel image and module * area mappings. (these are purely build-time and produce no code) */ - BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START); - BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE); + BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map); + BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE); BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); - BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0); + BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0); BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 3f06e61..9895a9a 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -353,7 +353,11 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src) * have given. */ newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; - BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ + if ((s64) (s32) newdisp != newdisp) { + pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); + pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value); + return 0; + } disp = (u8 *) dest + insn_offset_displacement(&insn); *(s32 *) disp = (s32) newdisp; } @@ -375,6 +379,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) else p->ainsn.boostable = -1; + /* Check whether the instruction modifies Interrupt Flag or not */ + p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn); + /* Also, displacement change doesn't affect the first byte */ p->opcode = p->ainsn.insn[0]; } @@ -434,7 +441,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, __this_cpu_write(current_kprobe, p); kcb->kprobe_saved_flags = kcb->kprobe_old_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); - if (is_IF_modifier(p->ainsn.insn)) + if (p->ainsn.if_modifier) kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; } diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index b686a90..cd6d9a5 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -20,6 +20,7 @@ * Authors: Anthony Liguori <aliguori@us.ibm.com> */ +#include <linux/context_tracking.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/kvm_para.h> @@ -43,7 +44,6 @@ #include <asm/apicdef.h> #include <asm/hypervisor.h> #include <asm/kvm_guest.h> -#include <asm/context_tracking.h> static int kvmapf = 1; @@ -254,16 +254,18 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); dotraplinkage void __kprobes do_async_page_fault(struct pt_regs *regs, unsigned long error_code) { + enum ctx_state prev_state; + switch (kvm_read_and_reset_pf_reason()) { default: do_page_fault(regs, error_code); break; case KVM_PV_REASON_PAGE_NOT_PRESENT: /* page is swapped out by the host. */ - exception_enter(regs); + prev_state = exception_enter(); exit_idle(); kvm_async_pf_task_wait((u32)read_cr2()); - exception_exit(regs); + exception_exit(prev_state); break; case KVM_PV_REASON_PAGE_READY: rcu_irq_enter(); diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c index 577db84..833d51d 100644 --- a/arch/x86/kernel/microcode_core_early.c +++ b/arch/x86/kernel/microcode_core_early.c @@ -45,9 +45,6 @@ static int __cpuinit x86_vendor(void) u32 eax = 0x00000000; u32 ebx, ecx = 0, edx; - if (!have_cpuid_p()) - return X86_VENDOR_UNKNOWN; - native_cpuid(&eax, &ebx, &ecx, &edx); if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) @@ -59,18 +56,45 @@ static int __cpuinit x86_vendor(void) return X86_VENDOR_UNKNOWN; } +static int __cpuinit x86_family(void) +{ + u32 eax = 0x00000001; + u32 ebx, ecx = 0, edx; + int x86; + + native_cpuid(&eax, &ebx, &ecx, &edx); + + x86 = (eax >> 8) & 0xf; + if (x86 == 15) + x86 += (eax >> 20) & 0xff; + + return x86; +} + void __init load_ucode_bsp(void) { - int vendor = x86_vendor(); + int vendor, x86; + + if (!have_cpuid_p()) + return; - if (vendor == X86_VENDOR_INTEL) + vendor = x86_vendor(); + x86 = x86_family(); + + if (vendor == X86_VENDOR_INTEL && x86 >= 6) load_ucode_intel_bsp(); } void __cpuinit load_ucode_ap(void) { - int vendor = x86_vendor(); + int vendor, x86; + + if (!have_cpuid_p()) + return; + + vendor = x86_vendor(); + x86 = x86_family(); - if (vendor == X86_VENDOR_INTEL) + if (vendor == X86_VENDOR_INTEL && x86 >= 6) load_ucode_intel_ap(); } diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c index 7890bc8..d893e8e 100644 --- a/arch/x86/kernel/microcode_intel_early.c +++ b/arch/x86/kernel/microcode_intel_early.c @@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp, struct microcode_intel ***mc_saved; mc_saved = (struct microcode_intel ***) - __pa_symbol(&mc_saved_data->mc_saved); + __pa_nodebug(&mc_saved_data->mc_saved); for (i = 0; i < mc_saved_data->mc_saved_count; i++) { struct microcode_intel *p; p = *(struct microcode_intel **) - __pa(mc_saved_data->mc_saved + i); - mc_saved_tmp[i] = (struct microcode_intel *)__pa(p); + __pa_nodebug(mc_saved_data->mc_saved + i); + mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p); } } #endif @@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end, struct cpio_data cd; long offset = 0; #ifdef CONFIG_X86_32 - char *p = (char *)__pa_symbol(ucode_name); + char *p = (char *)__pa_nodebug(ucode_name); #else char *p = ucode_name; #endif @@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci) if (mc_intel == NULL) return; - delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info); - current_mc_date_p = (int *)__pa_symbol(¤t_mc_date); + delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); + current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); *delay_ucode_info_p = 1; *current_mc_date_p = mc_intel->hdr.date; @@ -659,8 +659,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci) } #endif -static int apply_microcode_early(struct mc_saved_data *mc_saved_data, - struct ucode_cpu_info *uci) +static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data, + struct ucode_cpu_info *uci) { struct microcode_intel *mc_intel; unsigned int val[2]; @@ -741,15 +741,15 @@ load_ucode_intel_bsp(void) #ifdef CONFIG_X86_32 struct boot_params *boot_params_p; - boot_params_p = (struct boot_params *)__pa_symbol(&boot_params); + boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params); ramdisk_image = boot_params_p->hdr.ramdisk_image; ramdisk_size = boot_params_p->hdr.ramdisk_size; initrd_start_early = ramdisk_image; initrd_end_early = initrd_start_early + ramdisk_size; _load_ucode_intel_bsp( - (struct mc_saved_data *)__pa_symbol(&mc_saved_data), - (unsigned long *)__pa_symbol(&mc_saved_in_initrd), + (struct mc_saved_data *)__pa_nodebug(&mc_saved_data), + (unsigned long *)__pa_nodebug(&mc_saved_in_initrd), initrd_start_early, initrd_end_early, &uci); #else ramdisk_image = boot_params.hdr.ramdisk_image; @@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void) unsigned long *initrd_start_p; mc_saved_in_initrd_p = - (unsigned long *)__pa_symbol(mc_saved_in_initrd); - mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data); - initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start); - initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p); + (unsigned long *)__pa_nodebug(mc_saved_in_initrd); + mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data); + initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start); + initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p); #else mc_saved_data_p = &mc_saved_data; mc_saved_in_initrd_p = mc_saved_in_initrd; diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 17fff18..cd6de64 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void) leave_lazy(PARAVIRT_LAZY_MMU); } +void paravirt_flush_lazy_mmu(void) +{ + preempt_disable(); + + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { + arch_leave_lazy_mmu_mode(); + arch_enter_lazy_mmu_mode(); + } + + preempt_enable(); +} + void paravirt_start_context_switch(struct task_struct *prev) { BUG_ON(preemptible()); @@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return this_cpu_read(paravirt_lazy_mode); } -void arch_flush_lazy_mmu_mode(void) -{ - preempt_disable(); - - if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { - arch_leave_lazy_mmu_mode(); - arch_enter_lazy_mmu_mode(); - } - - preempt_enable(); -} - struct pv_info pv_info = { .name = "bare hardware", .paravirt_enabled = 0, @@ -360,7 +360,6 @@ struct pv_cpu_ops pv_cpu_ops = { .set_ldt = native_set_ldt, .load_gdt = native_load_gdt, .load_idt = native_load_idt, - .store_gdt = native_store_gdt, .store_idt = native_store_idt, .store_tr = native_store_tr, .load_tls = native_load_tls, @@ -475,6 +474,7 @@ struct pv_mmu_ops pv_mmu_ops = { .lazy_mode = { .enter = paravirt_nop, .leave = paravirt_nop, + .flush = paravirt_nop, }, .set_fixmap = native_set_fixmap, diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 14ae100..607af0d 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -121,30 +121,6 @@ void exit_thread(void) drop_fpu(me); } -void show_regs_common(void) -{ - const char *vendor, *product, *board; - - vendor = dmi_get_system_info(DMI_SYS_VENDOR); - if (!vendor) - vendor = ""; - product = dmi_get_system_info(DMI_PRODUCT_NAME); - if (!product) - product = ""; - - /* Board Name is optional */ - board = dmi_get_system_info(DMI_BOARD_NAME); - - printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n", - current->pid, current->comm, print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version, - vendor, product, - board ? "/" : "", - board ? board : ""); -} - void flush_thread(void) { struct task_struct *tsk = current; @@ -301,13 +277,7 @@ void exit_idle(void) } #endif -/* - * The idle thread. There's no useful work to be - * done, so just try to conserve power and have a - * low exit latency (ie sit in a loop waiting for - * somebody to say that they'd like to reschedule) - */ -void cpu_idle(void) +void arch_cpu_idle_prepare(void) { /* * If we're the non-boot CPU, nothing set the stack canary up @@ -317,71 +287,40 @@ void cpu_idle(void) * canaries already on the stack wont ever trigger). */ boot_init_stack_canary(); - current_thread_info()->status |= TS_POLLING; - - while (1) { - tick_nohz_idle_enter(); - - while (!need_resched()) { - rmb(); - - if (cpu_is_offline(smp_processor_id())) - play_dead(); - - /* - * Idle routines should keep interrupts disabled - * from here on, until they go to idle. - * Otherwise, idle callbacks can misfire. - */ - local_touch_nmi(); - local_irq_disable(); - - enter_idle(); - - /* Don't trace irqs off for idle */ - stop_critical_timings(); - - /* enter_idle() needs rcu for notifiers */ - rcu_idle_enter(); +} - if (cpuidle_idle_call()) - x86_idle(); +void arch_cpu_idle_enter(void) +{ + local_touch_nmi(); + enter_idle(); +} - rcu_idle_exit(); - start_critical_timings(); +void arch_cpu_idle_exit(void) +{ + __exit_idle(); +} - /* In many cases the interrupt that ended idle - has already called exit_idle. But some idle - loops can be woken up without interrupt. */ - __exit_idle(); - } +void arch_cpu_idle_dead(void) +{ + play_dead(); +} - tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); - } +/* + * Called from the generic idle code. + */ +void arch_cpu_idle(void) +{ + if (cpuidle_idle_call()) + x86_idle(); } /* - * We use this if we don't have any better - * idle routine.. + * We use this if we don't have any better idle routine.. */ void default_idle(void) { trace_cpu_idle_rcuidle(1, smp_processor_id()); - current_thread_info()->status &= ~TS_POLLING; - /* - * TS_POLLING-cleared state must be visible before we - * test NEED_RESCHED: - */ - smp_mb(); - - if (!need_resched()) - safe_halt(); /* enables interrupts racelessly */ - else - local_irq_enable(); - current_thread_info()->status |= TS_POLLING; + safe_halt(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } #ifdef CONFIG_APM_MODULE @@ -411,20 +350,6 @@ void stop_this_cpu(void *dummy) halt(); } -/* - * On SMP it's slightly faster (but much more power-consuming!) - * to poll the ->work.need_resched flag instead of waiting for the - * cross-CPU IPI to arrive. Use this option with caution. - */ -static void poll_idle(void) -{ - trace_cpu_idle_rcuidle(0, smp_processor_id()); - local_irq_enable(); - while (!need_resched()) - cpu_relax(); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); -} - bool amd_e400_c1e_detected; EXPORT_SYMBOL(amd_e400_c1e_detected); @@ -489,13 +414,13 @@ static void amd_e400_idle(void) void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP - if (x86_idle == poll_idle && smp_num_siblings > 1) + if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); #endif - if (x86_idle) + if (x86_idle || boot_option_idle_override == IDLE_POLL) return; - if (cpu_has_amd_erratum(amd_erratum_400)) { + if (cpu_has_bug(c, X86_BUG_AMD_APIC_C1E)) { /* E400: APIC timer interrupt does not wake up CPU from C1e */ pr_info("using AMD E400 aware idle routine\n"); x86_idle = amd_e400_idle; @@ -517,8 +442,8 @@ static int __init idle_setup(char *str) if (!strcmp(str, "poll")) { pr_info("using polling idle threads\n"); - x86_idle = poll_idle; boot_option_idle_override = IDLE_POLL; + cpu_idle_poll_ctrl(true); } else if (!strcmp(str, "halt")) { /* * When the boot option of idle=halt is added, halt is diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index b5a8905..7305f7d 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -84,8 +84,6 @@ void __show_regs(struct pt_regs *regs, int all) savesegment(gs, gs); } - show_regs_common(); - printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", (u16)regs->cs, regs->ip, regs->flags, smp_processor_id()); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 0f49677..355ae06 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -62,7 +62,6 @@ void __show_regs(struct pt_regs *regs, int all) unsigned int fsindex, gsindex; unsigned int ds, cs, es; - show_regs_common(); printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); printk_address(regs->ip, 1); printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 26ee48a..04ee1e2 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -354,18 +354,22 @@ static void ati_force_hpet_resume(void) static u32 ati_ixp4x0_rev(struct pci_dev *dev) { - u32 d; - u8 b; + int err = 0; + u32 d = 0; + u8 b = 0; - pci_read_config_byte(dev, 0xac, &b); + err = pci_read_config_byte(dev, 0xac, &b); b &= ~(1<<5); - pci_write_config_byte(dev, 0xac, b); - pci_read_config_dword(dev, 0x70, &d); + err |= pci_write_config_byte(dev, 0xac, b); + err |= pci_read_config_dword(dev, 0x70, &d); d |= 1<<8; - pci_write_config_dword(dev, 0x70, d); - pci_read_config_dword(dev, 0x8, &d); + err |= pci_write_config_dword(dev, 0x70, d); + err |= pci_read_config_dword(dev, 0x8, &d); d &= 0xff; dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d); + + WARN_ON_ONCE(err); + return d; } diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 2e8f3d3..198eb20 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -13,6 +13,7 @@ #include <asm/x86_init.h> #include <asm/time.h> #include <asm/mrst.h> +#include <asm/rtc.h> #ifdef CONFIG_X86_32 /* @@ -36,70 +37,24 @@ EXPORT_SYMBOL(rtc_lock); * nowtime is written into the registers of the CMOS clock, it will * jump to the next second precisely 500 ms later. Check the Motorola * MC146818A or Dallas DS12887 data sheet for details. - * - * BUG: This routine does not handle hour overflow properly; it just - * sets the minutes. Usually you'll only notice that after reboot! */ int mach_set_rtc_mmss(unsigned long nowtime) { - int real_seconds, real_minutes, cmos_minutes; - unsigned char save_control, save_freq_select; - unsigned long flags; + struct rtc_time tm; int retval = 0; - spin_lock_irqsave(&rtc_lock, flags); - - /* tell the clock it's being set */ - save_control = CMOS_READ(RTC_CONTROL); - CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); - - /* stop and reset prescaler */ - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - - cmos_minutes = CMOS_READ(RTC_MINUTES); - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - cmos_minutes = bcd2bin(cmos_minutes); - - /* - * since we're only adjusting minutes and seconds, - * don't interfere with hour overflow. This avoids - * messing with unknown time zones but requires your - * RTC not to be off by more than 15 minutes - */ - real_seconds = nowtime % 60; - real_minutes = nowtime / 60; - /* correct for half hour time zone */ - if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) - real_minutes += 30; - real_minutes %= 60; - - if (abs(real_minutes - cmos_minutes) < 30) { - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - real_seconds = bin2bcd(real_seconds); - real_minutes = bin2bcd(real_minutes); - } - CMOS_WRITE(real_seconds, RTC_SECONDS); - CMOS_WRITE(real_minutes, RTC_MINUTES); + rtc_time_to_tm(nowtime, &tm); + if (!rtc_valid_tm(&tm)) { + retval = set_rtc_time(&tm); + if (retval) + printk(KERN_ERR "%s: RTC write failed with error %d\n", + __FUNCTION__, retval); } else { - printk_once(KERN_NOTICE - "set_rtc_mmss: can't update from %d to %d\n", - cmos_minutes, real_minutes); - retval = -1; + printk(KERN_ERR + "%s: Invalid RTC value: write of %lx to RTC failed\n", + __FUNCTION__, nowtime); + retval = -EINVAL; } - - /* The following flags have to be released exactly in this order, - * otherwise the DS12887 (popular MC146818A clone with integrated - * battery and quartz) will not reset the oscillator and will not - * update precisely 500 ms later. You won't find this mentioned in - * the Dallas Semiconductor data sheets, but who believes data - * sheets anyway ... -- Markus Kuhn - */ - CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); - - spin_unlock_irqrestore(&rtc_lock, flags); - return retval; } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 84d3285..56f7fcf 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -82,7 +82,6 @@ #include <asm/timer.h> #include <asm/i8259.h> #include <asm/sections.h> -#include <asm/dmi.h> #include <asm/io_apic.h> #include <asm/ist.h> #include <asm/setup_arch.h> @@ -171,9 +170,13 @@ static struct resource bss_resource = { #ifdef CONFIG_X86_32 /* cpu data as detected by the assembly code in head.S */ -struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1}; +struct cpuinfo_x86 new_cpu_data __cpuinitdata = { + .wp_works_ok = -1, +}; /* common cpu data for all cpus */ -struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1}; +struct cpuinfo_x86 boot_cpu_data __read_mostly = { + .wp_works_ok = -1, +}; EXPORT_SYMBOL(boot_cpu_data); unsigned int def_to_bigsmp; @@ -501,11 +504,14 @@ static void __init memblock_x86_reserve_range_setup_data(void) /* * Keep the crash kernel below this limit. On 32 bits earlier kernels * would limit the kernel to the low 512 MiB due to mapping restrictions. + * On 64bit, old kexec-tools need to under 896MiB. */ #ifdef CONFIG_X86_32 -# define CRASH_KERNEL_ADDR_MAX (512 << 20) +# define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20) +# define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20) #else -# define CRASH_KERNEL_ADDR_MAX MAXMEM +# define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20) +# define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM #endif static void __init reserve_crashkernel_low(void) @@ -515,19 +521,35 @@ static void __init reserve_crashkernel_low(void) unsigned long long low_base = 0, low_size = 0; unsigned long total_low_mem; unsigned long long base; + bool auto_set = false; int ret; total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); + /* crashkernel=Y,low */ ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base); - if (ret != 0 || low_size <= 0) - return; + if (ret != 0) { + /* + * two parts from lib/swiotlb.c: + * swiotlb size: user specified with swiotlb= or default. + * swiotlb overflow buffer: now is hardcoded to 32k. + * We round it to 8M for other buffers that + * may need to stay low too. + */ + low_size = swiotlb_size_or_default() + (8UL<<20); + auto_set = true; + } else { + /* passed with crashkernel=0,low ? */ + if (!low_size) + return; + } low_base = memblock_find_in_range(low_size, (1ULL<<32), low_size, alignment); if (!low_base) { - pr_info("crashkernel low reservation failed - No suitable area found.\n"); + if (!auto_set) + pr_info("crashkernel low reservation failed - No suitable area found.\n"); return; } @@ -548,14 +570,22 @@ static void __init reserve_crashkernel(void) const unsigned long long alignment = 16<<20; /* 16M */ unsigned long long total_mem; unsigned long long crash_size, crash_base; + bool high = false; int ret; total_mem = memblock_phys_mem_size(); + /* crashkernel=XM */ ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); - if (ret != 0 || crash_size <= 0) - return; + if (ret != 0 || crash_size <= 0) { + /* crashkernel=X,high */ + ret = parse_crashkernel_high(boot_command_line, total_mem, + &crash_size, &crash_base); + if (ret != 0 || crash_size <= 0) + return; + high = true; + } /* 0 means: find the address automatically */ if (crash_base <= 0) { @@ -563,7 +593,9 @@ static void __init reserve_crashkernel(void) * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX */ crash_base = memblock_find_in_range(alignment, - CRASH_KERNEL_ADDR_MAX, crash_size, alignment); + high ? CRASH_KERNEL_ADDR_HIGH_MAX : + CRASH_KERNEL_ADDR_LOW_MAX, + crash_size, alignment); if (!crash_base) { pr_info("crashkernel reservation failed - No suitable area found.\n"); @@ -964,6 +996,7 @@ void __init setup_arch(char **cmdline_p) efi_init(); dmi_scan_machine(); + dmi_set_dump_stack_arch_desc(); /* * VMware detection requires dmi to be available, so this diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a6ceaed..9c73b51 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -284,7 +284,7 @@ notrace static void __cpuinit start_secondary(void *unused) x86_cpuinit.setup_percpu_clockev(); wmb(); - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } void __init smp_store_boot_cpu_info(void) @@ -1365,9 +1365,8 @@ static inline void mwait_play_dead(void) unsigned int eax, ebx, ecx, edx; unsigned int highest_cstate = 0; unsigned int highest_subcstate = 0; - int i; void *mwait_ptr; - struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); + int i; if (!this_cpu_has(X86_FEATURE_MWAIT)) return; diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 68bda7a..772e2a8 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -12,6 +12,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/context_tracking.h> #include <linux/interrupt.h> #include <linux/kallsyms.h> #include <linux/spinlock.h> @@ -55,8 +56,7 @@ #include <asm/i387.h> #include <asm/fpu-internal.h> #include <asm/mce.h> -#include <asm/context_tracking.h> - +#include <asm/fixmap.h> #include <asm/mach_traps.h> #ifdef CONFIG_X86_64 @@ -176,34 +176,38 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, #define DO_ERROR(trapnr, signr, str, name) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ - exception_enter(regs); \ + enum ctx_state prev_state; \ + \ + prev_state = exception_enter(); \ if (notify_die(DIE_TRAP, str, regs, error_code, \ trapnr, signr) == NOTIFY_STOP) { \ - exception_exit(regs); \ + exception_exit(prev_state); \ return; \ } \ conditional_sti(regs); \ do_trap(trapnr, signr, str, regs, error_code, NULL); \ - exception_exit(regs); \ + exception_exit(prev_state); \ } #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ siginfo_t info; \ + enum ctx_state prev_state; \ + \ info.si_signo = signr; \ info.si_errno = 0; \ info.si_code = sicode; \ info.si_addr = (void __user *)siaddr; \ - exception_enter(regs); \ + prev_state = exception_enter(); \ if (notify_die(DIE_TRAP, str, regs, error_code, \ trapnr, signr) == NOTIFY_STOP) { \ - exception_exit(regs); \ + exception_exit(prev_state); \ return; \ } \ conditional_sti(regs); \ do_trap(trapnr, signr, str, regs, error_code, &info); \ - exception_exit(regs); \ + exception_exit(prev_state); \ } DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, @@ -226,14 +230,16 @@ DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, /* Runs on IST stack */ dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) { - exception_enter(regs); + enum ctx_state prev_state; + + prev_state = exception_enter(); if (notify_die(DIE_TRAP, "stack segment", regs, error_code, X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { preempt_conditional_sti(regs); do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); preempt_conditional_cli(regs); } - exception_exit(regs); + exception_exit(prev_state); } dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) @@ -241,7 +247,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) static const char str[] = "double fault"; struct task_struct *tsk = current; - exception_enter(regs); + exception_enter(); /* Return not checked because double check cannot be ignored */ notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); @@ -261,8 +267,9 @@ dotraplinkage void __kprobes do_general_protection(struct pt_regs *regs, long error_code) { struct task_struct *tsk; + enum ctx_state prev_state; - exception_enter(regs); + prev_state = exception_enter(); conditional_sti(regs); #ifdef CONFIG_X86_32 @@ -300,12 +307,14 @@ do_general_protection(struct pt_regs *regs, long error_code) force_sig(SIGSEGV, tsk); exit: - exception_exit(regs); + exception_exit(prev_state); } /* May run on IST stack. */ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code) { + enum ctx_state prev_state; + #ifdef CONFIG_DYNAMIC_FTRACE /* * ftrace must be first, everything else may cause a recursive crash. @@ -315,7 +324,7 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co ftrace_int3_handler(regs)) return; #endif - exception_enter(regs); + prev_state = exception_enter(); #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) @@ -336,7 +345,7 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co preempt_conditional_cli(regs); debug_stack_usage_dec(); exit: - exception_exit(regs); + exception_exit(prev_state); } #ifdef CONFIG_X86_64 @@ -393,11 +402,12 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) { struct task_struct *tsk = current; + enum ctx_state prev_state; int user_icebp = 0; unsigned long dr6; int si_code; - exception_enter(regs); + prev_state = exception_enter(); get_debugreg(dr6, 6); @@ -467,7 +477,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) debug_stack_usage_dec(); exit: - exception_exit(regs); + exception_exit(prev_state); } /* @@ -561,17 +571,21 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr) dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) { - exception_enter(regs); + enum ctx_state prev_state; + + prev_state = exception_enter(); math_error(regs, error_code, X86_TRAP_MF); - exception_exit(regs); + exception_exit(prev_state); } dotraplinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code) { - exception_enter(regs); + enum ctx_state prev_state; + + prev_state = exception_enter(); math_error(regs, error_code, X86_TRAP_XF); - exception_exit(regs); + exception_exit(prev_state); } dotraplinkage void @@ -639,7 +653,9 @@ EXPORT_SYMBOL_GPL(math_state_restore); dotraplinkage void __kprobes do_device_not_available(struct pt_regs *regs, long error_code) { - exception_enter(regs); + enum ctx_state prev_state; + + prev_state = exception_enter(); BUG_ON(use_eager_fpu()); #ifdef CONFIG_MATH_EMULATION @@ -650,7 +666,7 @@ do_device_not_available(struct pt_regs *regs, long error_code) info.regs = regs; math_emulate(&info); - exception_exit(regs); + exception_exit(prev_state); return; } #endif @@ -658,15 +674,16 @@ do_device_not_available(struct pt_regs *regs, long error_code) #ifdef CONFIG_X86_32 conditional_sti(regs); #endif - exception_exit(regs); + exception_exit(prev_state); } #ifdef CONFIG_X86_32 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) { siginfo_t info; + enum ctx_state prev_state; - exception_enter(regs); + prev_state = exception_enter(); local_irq_enable(); info.si_signo = SIGILL; @@ -678,7 +695,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, &info); } - exception_exit(regs); + exception_exit(prev_state); } #endif @@ -753,6 +770,14 @@ void __init trap_init(void) #endif /* + * Set the IDT descriptor to a fixed read-only location, so that the + * "sidt" instruction will not leak the location of the kernel, and + * to defend the IDT against arbitrary memory write vulnerabilities. + * It will be reloaded in cpu_init() */ + __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); + idt_descr.address = fix_to_virt(FIX_RO_IDT); + + /* * Should be a barrier for any external CPU state: */ cpu_init(); diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 4b9ea10..098b3cf 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -768,7 +768,8 @@ static cycle_t read_tsc(struct clocksource *cs) static void resume_tsc(struct clocksource *cs) { - clocksource_tsc.cycle_last = 0; + if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) + clocksource_tsc.cycle_last = 0; } static struct clocksource clocksource_tsc = { @@ -939,6 +940,9 @@ static int __init init_tsc_clocksource(void) clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; } + if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) + clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; + /* * Trust the results of the earlier calibration on systems * exporting a reliable TSC. diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 0ba4cfb..2ed8459 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -697,3 +697,32 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) send_sig(SIGTRAP, current, 0); return ret; } + +unsigned long +arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) +{ + int rasize, ncopied; + unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */ + + rasize = is_ia32_task() ? 4 : 8; + ncopied = copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize); + if (unlikely(ncopied)) + return -1; + + /* check whether address has been already hijacked */ + if (orig_ret_vaddr == trampoline_vaddr) + return orig_ret_vaddr; + + ncopied = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize); + if (likely(!ncopied)) + return orig_ret_vaddr; + + if (ncopied != rasize) { + pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, " + "%%ip=%#lx\n", current->pid, regs->sp, regs->ip); + + force_sig_info(SIGSEGV, SEND_SIG_FORCED, current); + } + + return -1; +} diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 22a1530..10c4f30 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -94,10 +94,6 @@ SECTIONS _text = .; /* bootstrapping code */ HEAD_TEXT -#ifdef CONFIG_X86_32 - . = ALIGN(PAGE_SIZE); - *(.text..page_aligned) -#endif . = ALIGN(8); _stext = .; TEXT_TEXT diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 02b51dd..f77df1c 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) if (!pv_eoi_enabled(vcpu)) return 0; return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, - addr); + addr, sizeof(u8)); } void kvm_lapic_init(void) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e1b1ce2..7d39d70 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -555,7 +555,7 @@ static void svm_init_erratum_383(void) int err; u64 val; - if (!cpu_has_amd_erratum(amd_erratum_383)) + if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH)) return; /* Use _safe variants to not break nested virtualization */ diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 6667042..867b810 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2459,7 +2459,7 @@ static int hardware_enable(void *garbage) ept_sync_global(); } - store_gdt(&__get_cpu_var(host_gdt)); + native_store_gdt(&__get_cpu_var(host_gdt)); return 0; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f71500a..e172132 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) unsigned long flags, this_tsc_khz; struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_arch *ka = &v->kvm->arch; - void *shared_kaddr; s64 kernel_ns, max_kernel_ns; u64 tsc_timestamp, host_tsc; - struct pvclock_vcpu_time_info *guest_hv_clock; + struct pvclock_vcpu_time_info guest_hv_clock; u8 pvclock_flags; bool use_master_clock; kernel_ns = 0; host_tsc = 0; - /* Keep irq disabled to prevent changes to the clock */ - local_irq_save(flags); - this_tsc_khz = __get_cpu_var(cpu_tsc_khz); - if (unlikely(this_tsc_khz == 0)) { - local_irq_restore(flags); - kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); - return 1; - } - /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. @@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) kernel_ns = ka->master_kernel_ns; } spin_unlock(&ka->pvclock_gtod_sync_lock); + + /* Keep irq disabled to prevent changes to the clock */ + local_irq_save(flags); + this_tsc_khz = __get_cpu_var(cpu_tsc_khz); + if (unlikely(this_tsc_khz == 0)) { + local_irq_restore(flags); + kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); + return 1; + } if (!use_master_clock) { host_tsc = native_read_tsc(); kernel_ns = get_kernel_ns(); @@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) local_irq_restore(flags); - if (!vcpu->time_page) + if (!vcpu->pv_time_enabled) return 0; /* @@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) */ vcpu->hv_clock.version += 2; - shared_kaddr = kmap_atomic(vcpu->time_page); - - guest_hv_clock = shared_kaddr + vcpu->time_offset; + if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, + &guest_hv_clock, sizeof(guest_hv_clock)))) + return 0; /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ - pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); + pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); if (vcpu->pvclock_set_guest_stopped_request) { pvclock_flags |= PVCLOCK_GUEST_STOPPED; @@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) vcpu->hv_clock.flags = pvclock_flags; - memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, - sizeof(vcpu->hv_clock)); - - kunmap_atomic(shared_kaddr); - - mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); + kvm_write_guest_cached(v->kvm, &vcpu->pv_time, + &vcpu->hv_clock, + sizeof(vcpu->hv_clock)); return 0; } @@ -1827,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) return 0; } - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, + sizeof(u32))) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); @@ -1837,10 +1834,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) static void kvmclock_reset(struct kvm_vcpu *vcpu) { - if (vcpu->arch.time_page) { - kvm_release_page_dirty(vcpu->arch.time_page); - vcpu->arch.time_page = NULL; - } + vcpu->arch.pv_time_enabled = false; } static void accumulate_steal_time(struct kvm_vcpu *vcpu) @@ -1947,6 +1941,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { + u64 gpa_offset; kvmclock_reset(vcpu); vcpu->arch.time = data; @@ -1956,14 +1951,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!(data & 1)) break; - /* ...but clean it before doing the actual write */ - vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); - - vcpu->arch.time_page = - gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); + gpa_offset = data & ~(PAGE_MASK | 1); - if (is_error_page(vcpu->arch.time_page)) - vcpu->arch.time_page = NULL; + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.pv_time, data & ~1ULL, + sizeof(struct pvclock_vcpu_time_info))) + vcpu->arch.pv_time_enabled = false; + else + vcpu->arch.pv_time_enabled = true; break; } @@ -1980,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, - data & KVM_STEAL_VALID_BITS)) + data & KVM_STEAL_VALID_BITS, + sizeof(struct kvm_steal_time))) return 1; vcpu->arch.st.msr_val = data; @@ -2967,7 +2963,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, */ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) { - if (!vcpu->arch.time_page) + if (!vcpu->arch.pv_time_enabled) return -EINVAL; vcpu->arch.pvclock_set_guest_stopped_request = true; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); @@ -6718,6 +6714,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) goto fail_free_wbinvd_dirty_mask; vcpu->arch.ia32_tsc_adjust_msr = 0x0; + vcpu->arch.pv_time_enabled = false; kvm_async_pf_hash_reset(vcpu); kvm_pmu_init(vcpu); diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig index 29043d2..4a0890f8 100644 --- a/arch/x86/lguest/Kconfig +++ b/arch/x86/lguest/Kconfig @@ -1,7 +1,6 @@ config LGUEST_GUEST bool "Lguest guest support" - select PARAVIRT - depends on X86_32 + depends on X86_32 && PARAVIRT select TTY select VIRTUALIZATION select VIRTIO diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 1cbd89c..7114c63 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1334,6 +1334,7 @@ __init void lguest_init(void) pv_mmu_ops.read_cr3 = lguest_read_cr3; pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; + pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu; pv_mmu_ops.pte_update = lguest_pte_update; pv_mmu_ops.pte_update_defer = lguest_pte_update; diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 2af5df3..e78b8ee 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -61,7 +61,7 @@ ENTRY(csum_partial) testl $3, %esi # Check alignment. jz 2f # Jump if alignment is ok. testl $1, %esi # Check alignment. - jz 10f # Jump if alignment is boundary of 2bytes. + jz 10f # Jump if alignment is boundary of 2 bytes. # buf is odd dec %ecx diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c index b908a59..e78761d 100644 --- a/arch/x86/lib/memcpy_32.c +++ b/arch/x86/lib/memcpy_32.c @@ -26,7 +26,7 @@ void *memmove(void *dest, const void *src, size_t n) char *ret = dest; __asm__ __volatile__( - /* Handle more 16bytes in loop */ + /* Handle more 16 bytes in loop */ "cmp $0x10, %0\n\t" "jb 1f\n\t" @@ -51,7 +51,7 @@ void *memmove(void *dest, const void *src, size_t n) "sub $0x10, %0\n\t" /* - * We gobble 16byts forward in each loop. + * We gobble 16 bytes forward in each loop. */ "3:\n\t" "sub $0x10, %0\n\t" @@ -117,7 +117,7 @@ void *memmove(void *dest, const void *src, size_t n) "sub $0x10, %0\n\t" /* - * We gobble 16byts backward in each loop. + * We gobble 16 bytes backward in each loop. */ "7:\n\t" "sub $0x10, %0\n\t" diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 1c273be..56313a3 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -98,7 +98,7 @@ ENTRY(memcpy) subq $0x20, %rdx /* * At most 3 ALU operations in one cycle, - * so append NOPS in the same 16bytes trunk. + * so append NOPS in the same 16 bytes trunk. */ .p2align 4 .Lcopy_backward_loop: diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index ee16461..65268a6 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -27,7 +27,7 @@ ENTRY(memmove) CFI_STARTPROC - /* Handle more 32bytes in loop */ + /* Handle more 32 bytes in loop */ mov %rdi, %rax cmp $0x20, %rdx jb 1f @@ -56,7 +56,7 @@ ENTRY(memmove) 3: sub $0x20, %rdx /* - * We gobble 32byts forward in each loop. + * We gobble 32 bytes forward in each loop. */ 5: sub $0x20, %rdx @@ -122,7 +122,7 @@ ENTRY(memmove) addq %rdx, %rdi subq $0x20, %rdx /* - * We gobble 32byts backward in each loop. + * We gobble 32 bytes backward in each loop. */ 8: subq $0x20, %rdx diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index f0312d7..3eb18ac 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -689,9 +689,3 @@ _copy_from_user(void *to, const void __user *from, unsigned long n) return n; } EXPORT_SYMBOL(_copy_from_user); - -void copy_from_user_overflow(void) -{ - WARN(1, "Buffer overflow detected!\n"); -} -EXPORT_SYMBOL(copy_from_user_overflow); diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 05928aa..906fea3 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) char c; unsigned zero_len; - for (; len; --len) { + for (; len; --len, to++) { if (__get_user_nocheck(c, from++, sizeof(char))) break; - if (__put_user_nocheck(c, to++, sizeof(char))) + if (__put_user_nocheck(c, to, sizeof(char))) break; } diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c index 5247d01..2ca15b59 100644 --- a/arch/x86/mm/amdtopology.c +++ b/arch/x86/mm/amdtopology.c @@ -130,9 +130,8 @@ int __init amd_numa_init(void) } limit >>= 16; - limit <<= 24; - limit |= (1<<24)-1; limit++; + limit <<= 24; if (limit > end) limit = end; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2b97525..654be4a 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -13,12 +13,12 @@ #include <linux/perf_event.h> /* perf_sw_event */ #include <linux/hugetlb.h> /* hstate_index_to_shift */ #include <linux/prefetch.h> /* prefetchw */ +#include <linux/context_tracking.h> /* exception_enter(), ... */ #include <asm/traps.h> /* dotraplinkage, ... */ #include <asm/pgalloc.h> /* pgd_*(), ... */ #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ #include <asm/fixmap.h> /* VSYSCALL_START */ -#include <asm/context_tracking.h> /* exception_enter(), ... */ /* * Page fault error code bits: @@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) if (pgd_none(*pgd_ref)) return -1; - if (pgd_none(*pgd)) + if (pgd_none(*pgd)) { set_pgd(pgd, *pgd_ref); - else + arch_flush_lazy_mmu_mode(); + } else { BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); + } /* * Below here mismatches are bugs because these lower tables @@ -555,7 +557,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) /* * Pentium F0 0F C7 C8 bug workaround: */ - if (boot_cpu_data.f00f_bug) { + if (boot_cpu_has_bug(X86_BUG_F00F)) { nr = (address - idt_descr.address) >> 3; if (nr == 6) { @@ -1222,7 +1224,9 @@ good_area: dotraplinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) { - exception_enter(regs); + enum ctx_state prev_state; + + prev_state = exception_enter(); __do_page_fault(regs, error_code); - exception_exit(regs); + exception_exit(prev_state); } diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 6f31ee5..252b8f5 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -137,5 +137,4 @@ void __init set_highmem_pages_init(void) add_highpages_with_active_regions(nid, zone_start_pfn, zone_end_pfn); } - totalram_pages += totalhigh_pages; } diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 4903a03..fdc5dca 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -410,9 +410,8 @@ void __init init_mem_mapping(void) /* the ISA range is always mapped regardless of memory holes */ init_memory_mapping(0, ISA_END_ADDRESS); - /* xen has big range in reserved near end of ram, skip it at first */ - addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, - PAGE_SIZE); + /* xen has big range in reserved near end of ram, skip it at first.*/ + addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE); real_end = addr + PMD_SIZE; /* step_size need to be small so pgt_buf from BRK could cover it */ @@ -516,11 +515,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); for (; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); - free_page(addr); - totalram_pages++; + free_reserved_page(virt_to_page(addr)); } #endif } diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 2d19001..3ac7e31 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -427,14 +427,6 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base) pkmap_page_table = pte; } -static void __init add_one_highpage_init(struct page *page) -{ - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalhigh_pages++; -} - void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, unsigned long end_pfn) { @@ -448,7 +440,7 @@ void __init add_highpages_with_active_regions(int nid, start_pfn, end_pfn); for ( ; pfn < e_pfn; pfn++) if (pfn_valid(pfn)) - add_one_highpage_init(pfn_to_page(pfn)); + free_highmem_page(pfn_to_page(pfn)); } } #else diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 474e28f..caad9a0 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1011,14 +1011,12 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct) flush_tlb_all(); } -void __ref vmemmap_free(struct page *memmap, unsigned long nr_pages) +void __ref vmemmap_free(unsigned long start, unsigned long end) { - unsigned long start = (unsigned long)memmap; - unsigned long end = (unsigned long)(memmap + nr_pages); - remove_pagetable(start, end, false); } +#ifdef CONFIG_MEMORY_HOTREMOVE static void __meminit kernel_physical_mapping_remove(unsigned long start, unsigned long end) { @@ -1028,7 +1026,6 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end) remove_pagetable(start, end, true); } -#ifdef CONFIG_MEMORY_HOTREMOVE int __ref arch_remove_memory(u64 start, u64 size) { unsigned long start_pfn = start >> PAGE_SHIFT; @@ -1067,10 +1064,9 @@ void __init mem_init(void) /* clear_bss() already clear the empty_zero_page */ - reservedpages = 0; - - /* this will put all low memory onto the freelists */ register_page_bootmem_info(); + + /* this will put all memory onto the freelists */ totalram_pages = free_all_bootmem(); absent_pages = absent_pages_in_range(0, max_pfn); @@ -1285,18 +1281,17 @@ static long __meminitdata addr_start, addr_end; static void __meminitdata *p_start, *p_end; static int __meminitdata node_start; -int __meminit -vmemmap_populate(struct page *start_page, unsigned long size, int node) +static int __meminit vmemmap_populate_hugepages(unsigned long start, + unsigned long end, int node) { - unsigned long addr = (unsigned long)start_page; - unsigned long end = (unsigned long)(start_page + size); + unsigned long addr; unsigned long next; pgd_t *pgd; pud_t *pud; pmd_t *pmd; - for (; addr < end; addr = next) { - void *p = NULL; + for (addr = start; addr < end; addr = next) { + next = pmd_addr_end(addr, end); pgd = vmemmap_pgd_populate(addr, node); if (!pgd) @@ -1306,31 +1301,14 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node) if (!pud) return -ENOMEM; - if (!cpu_has_pse) { - next = (addr + PAGE_SIZE) & PAGE_MASK; - pmd = vmemmap_pmd_populate(pud, addr, node); - - if (!pmd) - return -ENOMEM; - - p = vmemmap_pte_populate(pmd, addr, node); + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + void *p; - if (!p) - return -ENOMEM; - - addr_end = addr + PAGE_SIZE; - p_end = p + PAGE_SIZE; - } else { - next = pmd_addr_end(addr, end); - - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) { + p = vmemmap_alloc_block_buf(PMD_SIZE, node); + if (p) { pte_t entry; - p = vmemmap_alloc_block_buf(PMD_SIZE, node); - if (!p) - return -ENOMEM; - entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL_LARGE); set_pmd(pmd, __pmd(pte_val(entry))); @@ -1347,15 +1325,32 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node) addr_end = addr + PMD_SIZE; p_end = p + PMD_SIZE; - } else - vmemmap_verify((pte_t *)pmd, node, addr, next); + continue; + } + } else if (pmd_large(*pmd)) { + vmemmap_verify((pte_t *)pmd, node, addr, next); + continue; } - + pr_warn_once("vmemmap: falling back to regular page backing\n"); + if (vmemmap_populate_basepages(addr, next, node)) + return -ENOMEM; } - sync_global_pgds((unsigned long)start_page, end - 1); return 0; } +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) +{ + int err; + + if (cpu_has_pse) + err = vmemmap_populate_hugepages(start, end, node); + else + err = vmemmap_populate_basepages(start, end, node); + if (!err) + sync_global_pgds(start, end - 1); + return err; +} + #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) void register_page_bootmem_memmap(unsigned long section_nr, struct page *start_page, unsigned long size) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 78fe3f1..9a1e658 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr) in parallel. Reuse of the virtual address is prevented by leaving it in the global lists until we're done with it. cpa takes care of the direct mappings. */ - read_lock(&vmlist_lock); - for (p = vmlist; p; p = p->next) { - if (p->addr == (void __force *)addr) - break; - } - read_unlock(&vmlist_lock); + p = find_vm_area((void __force *)addr); if (!p) { printk(KERN_ERR "iounmap: bad address %p\n", addr); diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 72fe01e..a71c4e2 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -114,14 +114,11 @@ void numa_clear_node(int cpu) */ void __init setup_node_to_cpumask_map(void) { - unsigned int node, num = 0; + unsigned int node; /* setup nr_node_ids if not done yet */ - if (nr_node_ids == MAX_NUMNODES) { - for_each_node_mask(node, node_possible_map) - num = node; - nr_node_ids = num + 1; - } + if (nr_node_ids == MAX_NUMNODES) + setup_nr_node_ids(); /* allocate the map */ for (node = 0; node < nr_node_ids; node++) diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index b008656..d0b1773 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -68,7 +68,7 @@ static int print_split(struct split_state *s) s->gpg++; i += GPS/PAGE_SIZE; } else if (level == PG_LEVEL_2M) { - if (!(pte_val(*pte) & _PAGE_PSE)) { + if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) { printk(KERN_ERR "%lx level %d but not PSE %Lx\n", addr, level, (u64)pte_val(*pte)); @@ -130,13 +130,12 @@ static int pageattr_test(void) } failed += print_split(&sa); - srandom32(100); for (i = 0; i < NTEST; i++) { - unsigned long pfn = random32() % max_pfn_mapped; + unsigned long pfn = prandom_u32() % max_pfn_mapped; addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT); - len[i] = random32() % 100; + len[i] = prandom_u32() % 100; len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1); if (len[i] == 0) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 091934e..bb32480 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * We are safe now. Check whether the new pgprot is the same: */ old_pte = *kpte; - old_prot = new_prot = req_prot = pte_pgprot(old_pte); + old_prot = req_prot = pte_pgprot(old_pte); pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); @@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL * for the ancient hardware that doesn't support it. */ - if (pgprot_val(new_prot) & _PAGE_PRESENT) - pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL; + if (pgprot_val(req_prot) & _PAGE_PRESENT) + pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL; else - pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); + pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); - new_prot = canon_pgprot(new_prot); + req_prot = canon_pgprot(req_prot); /* * old_pte points to the large page base address. So we need @@ -542,13 +542,14 @@ out_unlock: return do_split; } -int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase) +static int +__split_large_page(pte_t *kpte, unsigned long address, struct page *base) { + pte_t *pbase = (pte_t *)page_address(base); unsigned long pfn, pfninc = 1; unsigned int i, level; pte_t *tmp; pgprot_t ref_prot; - struct page *base = virt_to_page(pbase); spin_lock(&pgd_lock); /* @@ -633,7 +634,6 @@ int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase) static int split_large_page(pte_t *kpte, unsigned long address) { - pte_t *pbase; struct page *base; if (!debug_pagealloc) @@ -644,8 +644,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) if (!base) return -ENOMEM; - pbase = (pte_t *)page_address(base); - if (__split_large_page(kpte, address, pbase)) + if (__split_large_page(kpte, address, base)) __free_page(base); return 0; @@ -1413,6 +1412,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable) * but that can deadlock->flush only current cpu: */ __flush_tlb_all(); + + arch_flush_lazy_mmu_mode(); } #ifdef CONFIG_HIBERNATION diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 2610bd9..6574388 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -563,6 +563,13 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) if (base > __pa(high_memory-1)) return 0; + /* + * some areas in the middle of the kernel identity range + * are not mapped, like the PCI space. + */ + if (!page_is_ram(base >> PAGE_SHIFT)) + return 0; + id_sz = (__pa(high_memory-1) <= base + size) ? __pa(high_memory) - base : size; diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 193350b..17fda6a 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) { paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); + /* + * NOTE! For PAE, any changes to the top page-directory-pointer-table + * entries need a full cr3 reload to flush. + */ +#ifdef CONFIG_X86_PAE + tlb->need_flush_all = 1; +#endif tlb_remove_page(tlb, virt_to_page(pmd)); } diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 901177d..305c68b 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -6,6 +6,7 @@ #include <linux/sched.h> #include <linux/pci.h> +#include <linux/pci-acpi.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/dmi.h> @@ -170,6 +171,16 @@ void pcibios_fixup_bus(struct pci_bus *b) pcibios_fixup_device_resources(dev); } +void pcibios_add_bus(struct pci_bus *bus) +{ + acpi_pci_add_bus(bus); +} + +void pcibios_remove_bus(struct pci_bus *bus) +{ + acpi_pci_remove_bus(bus); +} + /* * Only use DMI information to set this if nothing was passed * on the kernel command line (which was parsed earlier). diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 94e7662..4a9be6d 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -177,7 +177,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) goto error; i = 0; list_for_each_entry(msidesc, &dev->msi_list, list) { - irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, + irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], (type == PCI_CAP_ID_MSIX) ? "pcifront-msi-x" : "pcifront-msi", @@ -244,7 +244,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) dev_dbg(&dev->dev, "xen: msi already bound to pirq=%d\n", pirq); } - irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, + irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, (type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi", DOMID_SELF); @@ -326,7 +326,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) } ret = xen_bind_pirq_msi_to_irq(dev, msidesc, - map_irq.pirq, map_irq.index, + map_irq.pirq, (type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi", domid); diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 5f2ecaf..b55d174 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -41,6 +41,7 @@ #include <linux/io.h> #include <linux/reboot.h> #include <linux/bcd.h> +#include <linux/ucs2_string.h> #include <asm/setup.h> #include <asm/efi.h> @@ -48,9 +49,17 @@ #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/x86_init.h> +#include <asm/rtc.h> #define EFI_DEBUG 1 +/* + * There's some additional metadata associated with each + * variable. Intel's reference implementation is 60 bytes - bump that + * to account for potential alignment constraints + */ +#define VAR_METADATA_SIZE 64 + struct efi __read_mostly efi = { .mps = EFI_INVALID_TABLE_ADDR, .acpi = EFI_INVALID_TABLE_ADDR, @@ -69,6 +78,13 @@ struct efi_memory_map memmap; static struct efi efi_phys __initdata; static efi_system_table_t efi_systab __initdata; +static u64 efi_var_store_size; +static u64 efi_var_remaining_size; +static u64 efi_var_max_var_size; +static u64 boot_used_size; +static u64 boot_var_size; +static u64 active_size; + unsigned long x86_efi_facility; /* @@ -98,6 +114,15 @@ static int __init setup_add_efi_memmap(char *arg) } early_param("add_efi_memmap", setup_add_efi_memmap); +static bool efi_no_storage_paranoia; + +static int __init setup_storage_paranoia(char *arg) +{ + efi_no_storage_paranoia = true; + return 0; +} +early_param("efi_no_storage_paranoia", setup_storage_paranoia); + static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { @@ -162,8 +187,53 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) { - return efi_call_virt3(get_next_variable, - name_size, name, vendor); + efi_status_t status; + static bool finished = false; + static u64 var_size; + + status = efi_call_virt3(get_next_variable, + name_size, name, vendor); + + if (status == EFI_NOT_FOUND) { + finished = true; + if (var_size < boot_used_size) { + boot_var_size = boot_used_size - var_size; + active_size += boot_var_size; + } else { + printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n"); + } + } + + if (boot_used_size && !finished) { + unsigned long size; + u32 attr; + efi_status_t s; + void *tmp; + + s = virt_efi_get_variable(name, vendor, &attr, &size, NULL); + + if (s != EFI_BUFFER_TOO_SMALL || !size) + return status; + + tmp = kmalloc(size, GFP_ATOMIC); + + if (!tmp) + return status; + + s = virt_efi_get_variable(name, vendor, &attr, &size, tmp); + + if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) { + var_size += size; + var_size += ucs2_strsize(name, 1024); + active_size += size; + active_size += VAR_METADATA_SIZE; + active_size += ucs2_strsize(name, 1024); + } + + kfree(tmp); + } + + return status; } static efi_status_t virt_efi_set_variable(efi_char16_t *name, @@ -172,9 +242,34 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, unsigned long data_size, void *data) { - return efi_call_virt5(set_variable, - name, vendor, attr, - data_size, data); + efi_status_t status; + u32 orig_attr = 0; + unsigned long orig_size = 0; + + status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size, + NULL); + + if (status != EFI_BUFFER_TOO_SMALL) + orig_size = 0; + + status = efi_call_virt5(set_variable, + name, vendor, attr, + data_size, data); + + if (status == EFI_SUCCESS) { + if (orig_size) { + active_size -= orig_size; + active_size -= ucs2_strsize(name, 1024); + active_size -= VAR_METADATA_SIZE; + } + if (data_size) { + active_size += data_size; + active_size += ucs2_strsize(name, 1024); + active_size += VAR_METADATA_SIZE; + } + } + + return status; } static efi_status_t virt_efi_query_variable_info(u32 attr, @@ -258,10 +353,10 @@ static efi_status_t __init phys_efi_get_time(efi_time_t *tm, int efi_set_rtc_mmss(unsigned long nowtime) { - int real_seconds, real_minutes; efi_status_t status; efi_time_t eft; efi_time_cap_t cap; + struct rtc_time tm; status = efi.get_time(&eft, &cap); if (status != EFI_SUCCESS) { @@ -269,13 +364,20 @@ int efi_set_rtc_mmss(unsigned long nowtime) return -1; } - real_seconds = nowtime % 60; - real_minutes = nowtime / 60; - if (((abs(real_minutes - eft.minute) + 15)/30) & 1) - real_minutes += 30; - real_minutes %= 60; - eft.minute = real_minutes; - eft.second = real_seconds; + rtc_time_to_tm(nowtime, &tm); + if (!rtc_valid_tm(&tm)) { + eft.year = tm.tm_year + 1900; + eft.month = tm.tm_mon + 1; + eft.day = tm.tm_mday; + eft.minute = tm.tm_min; + eft.second = tm.tm_sec; + eft.nanosecond = 0; + } else { + printk(KERN_ERR + "%s: Invalid EFI RTC value: write of %lx to EFI RTC failed\n", + __FUNCTION__, nowtime); + return -1; + } status = efi.set_time(&eft); if (status != EFI_SUCCESS) { @@ -682,6 +784,9 @@ void __init efi_init(void) char vendor[100] = "unknown"; int i = 0; void *tmp; + struct setup_data *data; + struct efi_var_bootdata *efi_var_data; + u64 pa_data; #ifdef CONFIG_X86_32 if (boot_params.efi_info.efi_systab_hi || @@ -699,6 +804,22 @@ void __init efi_init(void) if (efi_systab_init(efi_phys.systab)) return; + pa_data = boot_params.hdr.setup_data; + while (pa_data) { + data = early_ioremap(pa_data, sizeof(*efi_var_data)); + if (data->type == SETUP_EFI_VARS) { + efi_var_data = (struct efi_var_bootdata *)data; + + efi_var_store_size = efi_var_data->store_size; + efi_var_remaining_size = efi_var_data->remaining_size; + efi_var_max_var_size = efi_var_data->max_var_size; + } + pa_data = data->next; + early_iounmap(data, sizeof(*efi_var_data)); + } + + boot_used_size = efi_var_store_size - efi_var_remaining_size; + set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); /* @@ -999,3 +1120,48 @@ u64 efi_mem_attributes(unsigned long phys_addr) } return 0; } + +/* + * Some firmware has serious problems when using more than 50% of the EFI + * variable store, i.e. it triggers bugs that can brick machines. Ensure that + * we never use more than this safe limit. + * + * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable + * store. + */ +efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) +{ + efi_status_t status; + u64 storage_size, remaining_size, max_size; + + status = efi.query_variable_info(attributes, &storage_size, + &remaining_size, &max_size); + if (status != EFI_SUCCESS) + return status; + + if (!max_size && remaining_size > size) + printk_once(KERN_ERR FW_BUG "Broken EFI implementation" + " is returning MaxVariableSize=0\n"); + /* + * Some firmware implementations refuse to boot if there's insufficient + * space in the variable store. We account for that by refusing the + * write if permitting it would reduce the available space to under + * 50%. However, some firmware won't reclaim variable space until + * after the used (not merely the actively used) space drops below + * a threshold. We can approximate that case with the value calculated + * above. If both the firmware and our calculations indicate that the + * available space would drop below 50%, refuse the write. + */ + + if (!storage_size || size > remaining_size || + (max_size && size > max_size)) + return EFI_OUT_OF_RESOURCES; + + if (!efi_no_storage_paranoia && + ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) && + (remaining_size - size < storage_size / 2))) + return EFI_OUT_OF_RESOURCES; + + return EFI_SUCCESS; +} +EXPORT_SYMBOL_GPL(efi_query_variable_store); diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index e31bcd8..a0a0a43 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c @@ -356,8 +356,7 @@ static int __init sfi_parse_gpio(struct sfi_table_header *table) num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry); pentry = (struct sfi_gpio_table_entry *)sb->pentry; - gpio_table = (struct sfi_gpio_table_entry *) - kmalloc(num * sizeof(*pentry), GFP_KERNEL); + gpio_table = kmalloc(num * sizeof(*pentry), GFP_KERNEL); if (!gpio_table) return -1; memcpy(gpio_table, pentry, num * sizeof(*pentry)); diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c index 225bd0f..d62b0a3 100644 --- a/arch/x86/platform/mrst/vrtc.c +++ b/arch/x86/platform/mrst/vrtc.c @@ -85,27 +85,35 @@ unsigned long vrtc_get_time(void) return mktime(year, mon, mday, hour, min, sec); } -/* Only care about the minutes and seconds */ int vrtc_set_mmss(unsigned long nowtime) { - int real_sec, real_min; unsigned long flags; - int vrtc_min; - - spin_lock_irqsave(&rtc_lock, flags); - vrtc_min = vrtc_cmos_read(RTC_MINUTES); - - real_sec = nowtime % 60; - real_min = nowtime / 60; - if (((abs(real_min - vrtc_min) + 15)/30) & 1) - real_min += 30; - real_min %= 60; - - vrtc_cmos_write(real_sec, RTC_SECONDS); - vrtc_cmos_write(real_min, RTC_MINUTES); - spin_unlock_irqrestore(&rtc_lock, flags); - - return 0; + struct rtc_time tm; + int year; + int retval = 0; + + rtc_time_to_tm(nowtime, &tm); + if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) { + /* + * tm.year is the number of years since 1900, and the + * vrtc need the years since 1972. + */ + year = tm.tm_year - 72; + spin_lock_irqsave(&rtc_lock, flags); + vrtc_cmos_write(year, RTC_YEAR); + vrtc_cmos_write(tm.tm_mon, RTC_MONTH); + vrtc_cmos_write(tm.tm_mday, RTC_DAY_OF_MONTH); + vrtc_cmos_write(tm.tm_hour, RTC_HOURS); + vrtc_cmos_write(tm.tm_min, RTC_MINUTES); + vrtc_cmos_write(tm.tm_sec, RTC_SECONDS); + spin_unlock_irqrestore(&rtc_lock, flags); + } else { + printk(KERN_ERR + "%s: Invalid vRTC value: write of %lx to vRTC failed\n", + __FUNCTION__, nowtime); + retval = -EINVAL; + } + return retval; } void __init mrst_rtc_init(void) diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c index 74704be..9a2e590 100644 --- a/arch/x86/platform/olpc/olpc-xo1-sci.c +++ b/arch/x86/platform/olpc/olpc-xo1-sci.c @@ -460,7 +460,6 @@ static int setup_power_button(struct platform_device *pdev) static void free_power_button(void) { input_unregister_device(power_button_idev); - input_free_device(power_button_idev); } static int setup_ebook_switch(struct platform_device *pdev) @@ -491,7 +490,6 @@ static int setup_ebook_switch(struct platform_device *pdev) static void free_ebook_switch(void) { input_unregister_device(ebook_switch_idev); - input_free_device(ebook_switch_idev); } static int setup_lid_switch(struct platform_device *pdev) @@ -526,6 +524,7 @@ static int setup_lid_switch(struct platform_device *pdev) err_create_attr: input_unregister_device(lid_switch_idev); + lid_switch_idev = NULL; err_register: input_free_device(lid_switch_idev); return r; @@ -535,7 +534,6 @@ static void free_lid_switch(void) { device_remove_file(&lid_switch_idev->dev, &dev_attr_lid_wake_mode); input_unregister_device(lid_switch_idev); - input_free_device(lid_switch_idev); } static int xo1_sci_probe(struct platform_device *pdev) diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index 98718f6..5c86786 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c @@ -159,10 +159,9 @@ static __init int uv_rtc_allocate_timers(void) { int cpu; - blade_info = kmalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL); + blade_info = kzalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL); if (!blade_info) return -ENOMEM; - memset(blade_info, 0, uv_possible_blades * sizeof(void *)); for_each_present_cpu(cpu) { int nid = cpu_to_node(cpu); diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 120cee1..6d6e907 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -11,6 +11,7 @@ #include <linux/suspend.h> #include <linux/export.h> #include <linux/smp.h> +#include <linux/perf_event.h> #include <asm/pgtable.h> #include <asm/proto.h> @@ -61,11 +62,9 @@ static void __save_processor_state(struct saved_context *ctxt) * descriptor tables */ #ifdef CONFIG_X86_32 - store_gdt(&ctxt->gdt); store_idt(&ctxt->idt); #else /* CONFIG_X86_64 */ - store_gdt((struct desc_ptr *)&ctxt->gdt_limit); store_idt((struct desc_ptr *)&ctxt->idt_limit); #endif store_tr(ctxt->tr); @@ -134,7 +133,10 @@ static void fix_processor_context(void) { int cpu = smp_processor_id(); struct tss_struct *t = &per_cpu(init_tss, cpu); - +#ifdef CONFIG_X86_64 + struct desc_struct *desc = get_cpu_gdt_table(cpu); + tss_desc tss; +#endif set_tss_desc(cpu, t); /* * This just modifies memory; should not be * necessary. But... This is necessary, because @@ -143,7 +145,9 @@ static void fix_processor_context(void) */ #ifdef CONFIG_X86_64 - get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; + memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc)); + tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */ + write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS); syscall_init(); /* This sets MSR_*STAR and related */ #endif @@ -182,11 +186,9 @@ static void __restore_processor_state(struct saved_context *ctxt) * ltr is done i fix_processor_context(). */ #ifdef CONFIG_X86_32 - load_gdt(&ctxt->gdt); load_idt(&ctxt->idt); #else /* CONFIG_X86_64 */ - load_gdt((const struct desc_ptr *)&ctxt->gdt_limit); load_idt((const struct desc_ptr *)&ctxt->idt_limit); #endif @@ -228,6 +230,7 @@ static void __restore_processor_state(struct saved_context *ctxt) do_fpu_end(); x86_platform.restore_sched_clock_state(); mtrr_bp_restore(); + perf_restore_debug_store(); } /* Needed by apm.c */ diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index bae601f..e812034 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile @@ -39,4 +39,5 @@ $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/ina HOST_EXTRACFLAGS += -I$(srctree)/tools/include hostprogs-y += relocs +relocs-objs := relocs_32.o relocs_64.o relocs_common.o relocs: $(obj)/relocs diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 79d67bd..590be10 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -1,43 +1,36 @@ -#include <stdio.h> -#include <stdarg.h> -#include <stdlib.h> -#include <stdint.h> -#include <string.h> -#include <errno.h> -#include <unistd.h> -#include <elf.h> -#include <byteswap.h> -#define USE_BSD -#include <endian.h> -#include <regex.h> -#include <tools/le_byteshift.h> - -static void die(char *fmt, ...); - -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -static Elf32_Ehdr ehdr; -static unsigned long reloc_count, reloc_idx; -static unsigned long *relocs; -static unsigned long reloc16_count, reloc16_idx; -static unsigned long *relocs16; +/* This is included from relocs_32/64.c */ + +#define ElfW(type) _ElfW(ELF_BITS, type) +#define _ElfW(bits, type) __ElfW(bits, type) +#define __ElfW(bits, type) Elf##bits##_##type + +#define Elf_Addr ElfW(Addr) +#define Elf_Ehdr ElfW(Ehdr) +#define Elf_Phdr ElfW(Phdr) +#define Elf_Shdr ElfW(Shdr) +#define Elf_Sym ElfW(Sym) + +static Elf_Ehdr ehdr; + +struct relocs { + uint32_t *offset; + unsigned long count; + unsigned long size; +}; + +static struct relocs relocs16; +static struct relocs relocs32; +static struct relocs relocs64; struct section { - Elf32_Shdr shdr; + Elf_Shdr shdr; struct section *link; - Elf32_Sym *symtab; - Elf32_Rel *reltab; + Elf_Sym *symtab; + Elf_Rel *reltab; char *strtab; }; static struct section *secs; -enum symtype { - S_ABS, - S_REL, - S_SEG, - S_LIN, - S_NSYMTYPES -}; - static const char * const sym_regex_kernel[S_NSYMTYPES] = { /* * Following symbols have been audited. There values are constant and do @@ -49,6 +42,9 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { "^(xen_irq_disable_direct_reloc$|" "xen_save_fl_direct_reloc$|" "VDSO|" +#if ELF_BITS == 64 + "__vvar_page|" +#endif "__crc_)", /* @@ -72,6 +68,11 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { "__end_rodata|" "__initramfs_start|" "(jiffies|jiffies_64)|" +#if ELF_BITS == 64 + "__per_cpu_load|" + "init_per_cpu__.*|" + "__end_rodata_hpage_align|" +#endif "_end)$" }; @@ -132,15 +133,6 @@ static void regex_init(int use_real_mode) } } -static void die(char *fmt, ...) -{ - va_list ap; - va_start(ap, fmt); - vfprintf(stderr, fmt, ap); - va_end(ap); - exit(1); -} - static const char *sym_type(unsigned type) { static const char *type_name[] = { @@ -198,6 +190,24 @@ static const char *rel_type(unsigned type) { static const char *type_name[] = { #define REL_TYPE(X) [X] = #X +#if ELF_BITS == 64 + REL_TYPE(R_X86_64_NONE), + REL_TYPE(R_X86_64_64), + REL_TYPE(R_X86_64_PC32), + REL_TYPE(R_X86_64_GOT32), + REL_TYPE(R_X86_64_PLT32), + REL_TYPE(R_X86_64_COPY), + REL_TYPE(R_X86_64_GLOB_DAT), + REL_TYPE(R_X86_64_JUMP_SLOT), + REL_TYPE(R_X86_64_RELATIVE), + REL_TYPE(R_X86_64_GOTPCREL), + REL_TYPE(R_X86_64_32), + REL_TYPE(R_X86_64_32S), + REL_TYPE(R_X86_64_16), + REL_TYPE(R_X86_64_PC16), + REL_TYPE(R_X86_64_8), + REL_TYPE(R_X86_64_PC8), +#else REL_TYPE(R_386_NONE), REL_TYPE(R_386_32), REL_TYPE(R_386_PC32), @@ -213,6 +223,7 @@ static const char *rel_type(unsigned type) REL_TYPE(R_386_PC8), REL_TYPE(R_386_16), REL_TYPE(R_386_PC16), +#endif #undef REL_TYPE }; const char *name = "unknown type rel type name"; @@ -240,7 +251,7 @@ static const char *sec_name(unsigned shndx) return name; } -static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym) +static const char *sym_name(const char *sym_strtab, Elf_Sym *sym) { const char *name; name = "<noname>"; @@ -253,15 +264,42 @@ static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym) return name; } +static Elf_Sym *sym_lookup(const char *symname) +{ + int i; + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + long nsyms; + char *strtab; + Elf_Sym *symtab; + Elf_Sym *sym; + + if (sec->shdr.sh_type != SHT_SYMTAB) + continue; + nsyms = sec->shdr.sh_size/sizeof(Elf_Sym); + symtab = sec->symtab; + strtab = sec->link->strtab; + + for (sym = symtab; --nsyms >= 0; sym++) { + if (!sym->st_name) + continue; + if (strcmp(symname, strtab + sym->st_name) == 0) + return sym; + } + } + return 0; +} #if BYTE_ORDER == LITTLE_ENDIAN #define le16_to_cpu(val) (val) #define le32_to_cpu(val) (val) +#define le64_to_cpu(val) (val) #endif #if BYTE_ORDER == BIG_ENDIAN #define le16_to_cpu(val) bswap_16(val) #define le32_to_cpu(val) bswap_32(val) +#define le64_to_cpu(val) bswap_64(val) #endif static uint16_t elf16_to_cpu(uint16_t val) @@ -274,6 +312,23 @@ static uint32_t elf32_to_cpu(uint32_t val) return le32_to_cpu(val); } +#define elf_half_to_cpu(x) elf16_to_cpu(x) +#define elf_word_to_cpu(x) elf32_to_cpu(x) + +#if ELF_BITS == 64 +static uint64_t elf64_to_cpu(uint64_t val) +{ + return le64_to_cpu(val); +} +#define elf_addr_to_cpu(x) elf64_to_cpu(x) +#define elf_off_to_cpu(x) elf64_to_cpu(x) +#define elf_xword_to_cpu(x) elf64_to_cpu(x) +#else +#define elf_addr_to_cpu(x) elf32_to_cpu(x) +#define elf_off_to_cpu(x) elf32_to_cpu(x) +#define elf_xword_to_cpu(x) elf32_to_cpu(x) +#endif + static void read_ehdr(FILE *fp) { if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) { @@ -283,8 +338,8 @@ static void read_ehdr(FILE *fp) if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) { die("No ELF magic\n"); } - if (ehdr.e_ident[EI_CLASS] != ELFCLASS32) { - die("Not a 32 bit executable\n"); + if (ehdr.e_ident[EI_CLASS] != ELF_CLASS) { + die("Not a %d bit executable\n", ELF_BITS); } if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB) { die("Not a LSB ELF executable\n"); @@ -293,36 +348,36 @@ static void read_ehdr(FILE *fp) die("Unknown ELF version\n"); } /* Convert the fields to native endian */ - ehdr.e_type = elf16_to_cpu(ehdr.e_type); - ehdr.e_machine = elf16_to_cpu(ehdr.e_machine); - ehdr.e_version = elf32_to_cpu(ehdr.e_version); - ehdr.e_entry = elf32_to_cpu(ehdr.e_entry); - ehdr.e_phoff = elf32_to_cpu(ehdr.e_phoff); - ehdr.e_shoff = elf32_to_cpu(ehdr.e_shoff); - ehdr.e_flags = elf32_to_cpu(ehdr.e_flags); - ehdr.e_ehsize = elf16_to_cpu(ehdr.e_ehsize); - ehdr.e_phentsize = elf16_to_cpu(ehdr.e_phentsize); - ehdr.e_phnum = elf16_to_cpu(ehdr.e_phnum); - ehdr.e_shentsize = elf16_to_cpu(ehdr.e_shentsize); - ehdr.e_shnum = elf16_to_cpu(ehdr.e_shnum); - ehdr.e_shstrndx = elf16_to_cpu(ehdr.e_shstrndx); + ehdr.e_type = elf_half_to_cpu(ehdr.e_type); + ehdr.e_machine = elf_half_to_cpu(ehdr.e_machine); + ehdr.e_version = elf_word_to_cpu(ehdr.e_version); + ehdr.e_entry = elf_addr_to_cpu(ehdr.e_entry); + ehdr.e_phoff = elf_off_to_cpu(ehdr.e_phoff); + ehdr.e_shoff = elf_off_to_cpu(ehdr.e_shoff); + ehdr.e_flags = elf_word_to_cpu(ehdr.e_flags); + ehdr.e_ehsize = elf_half_to_cpu(ehdr.e_ehsize); + ehdr.e_phentsize = elf_half_to_cpu(ehdr.e_phentsize); + ehdr.e_phnum = elf_half_to_cpu(ehdr.e_phnum); + ehdr.e_shentsize = elf_half_to_cpu(ehdr.e_shentsize); + ehdr.e_shnum = elf_half_to_cpu(ehdr.e_shnum); + ehdr.e_shstrndx = elf_half_to_cpu(ehdr.e_shstrndx); if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) { die("Unsupported ELF header type\n"); } - if (ehdr.e_machine != EM_386) { - die("Not for x86\n"); + if (ehdr.e_machine != ELF_MACHINE) { + die("Not for %s\n", ELF_MACHINE_NAME); } if (ehdr.e_version != EV_CURRENT) { die("Unknown ELF version\n"); } - if (ehdr.e_ehsize != sizeof(Elf32_Ehdr)) { + if (ehdr.e_ehsize != sizeof(Elf_Ehdr)) { die("Bad Elf header size\n"); } - if (ehdr.e_phentsize != sizeof(Elf32_Phdr)) { + if (ehdr.e_phentsize != sizeof(Elf_Phdr)) { die("Bad program header entry\n"); } - if (ehdr.e_shentsize != sizeof(Elf32_Shdr)) { + if (ehdr.e_shentsize != sizeof(Elf_Shdr)) { die("Bad section header entry\n"); } if (ehdr.e_shstrndx >= ehdr.e_shnum) { @@ -333,7 +388,7 @@ static void read_ehdr(FILE *fp) static void read_shdrs(FILE *fp) { int i; - Elf32_Shdr shdr; + Elf_Shdr shdr; secs = calloc(ehdr.e_shnum, sizeof(struct section)); if (!secs) { @@ -349,16 +404,16 @@ static void read_shdrs(FILE *fp) if (fread(&shdr, sizeof shdr, 1, fp) != 1) die("Cannot read ELF section headers %d/%d: %s\n", i, ehdr.e_shnum, strerror(errno)); - sec->shdr.sh_name = elf32_to_cpu(shdr.sh_name); - sec->shdr.sh_type = elf32_to_cpu(shdr.sh_type); - sec->shdr.sh_flags = elf32_to_cpu(shdr.sh_flags); - sec->shdr.sh_addr = elf32_to_cpu(shdr.sh_addr); - sec->shdr.sh_offset = elf32_to_cpu(shdr.sh_offset); - sec->shdr.sh_size = elf32_to_cpu(shdr.sh_size); - sec->shdr.sh_link = elf32_to_cpu(shdr.sh_link); - sec->shdr.sh_info = elf32_to_cpu(shdr.sh_info); - sec->shdr.sh_addralign = elf32_to_cpu(shdr.sh_addralign); - sec->shdr.sh_entsize = elf32_to_cpu(shdr.sh_entsize); + sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name); + sec->shdr.sh_type = elf_word_to_cpu(shdr.sh_type); + sec->shdr.sh_flags = elf_xword_to_cpu(shdr.sh_flags); + sec->shdr.sh_addr = elf_addr_to_cpu(shdr.sh_addr); + sec->shdr.sh_offset = elf_off_to_cpu(shdr.sh_offset); + sec->shdr.sh_size = elf_xword_to_cpu(shdr.sh_size); + sec->shdr.sh_link = elf_word_to_cpu(shdr.sh_link); + sec->shdr.sh_info = elf_word_to_cpu(shdr.sh_info); + sec->shdr.sh_addralign = elf_xword_to_cpu(shdr.sh_addralign); + sec->shdr.sh_entsize = elf_xword_to_cpu(shdr.sh_entsize); if (sec->shdr.sh_link < ehdr.e_shnum) sec->link = &secs[sec->shdr.sh_link]; } @@ -412,12 +467,12 @@ static void read_symtabs(FILE *fp) die("Cannot read symbol table: %s\n", strerror(errno)); } - for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) { - Elf32_Sym *sym = &sec->symtab[j]; - sym->st_name = elf32_to_cpu(sym->st_name); - sym->st_value = elf32_to_cpu(sym->st_value); - sym->st_size = elf32_to_cpu(sym->st_size); - sym->st_shndx = elf16_to_cpu(sym->st_shndx); + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) { + Elf_Sym *sym = &sec->symtab[j]; + sym->st_name = elf_word_to_cpu(sym->st_name); + sym->st_value = elf_addr_to_cpu(sym->st_value); + sym->st_size = elf_xword_to_cpu(sym->st_size); + sym->st_shndx = elf_half_to_cpu(sym->st_shndx); } } } @@ -428,7 +483,7 @@ static void read_relocs(FILE *fp) int i,j; for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; - if (sec->shdr.sh_type != SHT_REL) { + if (sec->shdr.sh_type != SHT_REL_TYPE) { continue; } sec->reltab = malloc(sec->shdr.sh_size); @@ -445,10 +500,13 @@ static void read_relocs(FILE *fp) die("Cannot read symbol table: %s\n", strerror(errno)); } - for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { - Elf32_Rel *rel = &sec->reltab[j]; - rel->r_offset = elf32_to_cpu(rel->r_offset); - rel->r_info = elf32_to_cpu(rel->r_info); + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { + Elf_Rel *rel = &sec->reltab[j]; + rel->r_offset = elf_addr_to_cpu(rel->r_offset); + rel->r_info = elf_xword_to_cpu(rel->r_info); +#if (SHT_REL_TYPE == SHT_RELA) + rel->r_addend = elf_xword_to_cpu(rel->r_addend); +#endif } } } @@ -457,6 +515,13 @@ static void read_relocs(FILE *fp) static void print_absolute_symbols(void) { int i; + const char *format; + + if (ELF_BITS == 64) + format = "%5d %016"PRIx64" %5"PRId64" %10s %10s %12s %s\n"; + else + format = "%5d %08"PRIx32" %5"PRId32" %10s %10s %12s %s\n"; + printf("Absolute symbols\n"); printf(" Num: Value Size Type Bind Visibility Name\n"); for (i = 0; i < ehdr.e_shnum; i++) { @@ -468,19 +533,19 @@ static void print_absolute_symbols(void) continue; } sym_strtab = sec->link->strtab; - for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) { - Elf32_Sym *sym; + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) { + Elf_Sym *sym; const char *name; sym = &sec->symtab[j]; name = sym_name(sym_strtab, sym); if (sym->st_shndx != SHN_ABS) { continue; } - printf("%5d %08x %5d %10s %10s %12s %s\n", + printf(format, j, sym->st_value, sym->st_size, - sym_type(ELF32_ST_TYPE(sym->st_info)), - sym_bind(ELF32_ST_BIND(sym->st_info)), - sym_visibility(ELF32_ST_VISIBILITY(sym->st_other)), + sym_type(ELF_ST_TYPE(sym->st_info)), + sym_bind(ELF_ST_BIND(sym->st_info)), + sym_visibility(ELF_ST_VISIBILITY(sym->st_other)), name); } } @@ -490,14 +555,20 @@ static void print_absolute_symbols(void) static void print_absolute_relocs(void) { int i, printed = 0; + const char *format; + + if (ELF_BITS == 64) + format = "%016"PRIx64" %016"PRIx64" %10s %016"PRIx64" %s\n"; + else + format = "%08"PRIx32" %08"PRIx32" %10s %08"PRIx32" %s\n"; for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; struct section *sec_applies, *sec_symtab; char *sym_strtab; - Elf32_Sym *sh_symtab; + Elf_Sym *sh_symtab; int j; - if (sec->shdr.sh_type != SHT_REL) { + if (sec->shdr.sh_type != SHT_REL_TYPE) { continue; } sec_symtab = sec->link; @@ -507,12 +578,12 @@ static void print_absolute_relocs(void) } sh_symtab = sec_symtab->symtab; sym_strtab = sec_symtab->link->strtab; - for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { - Elf32_Rel *rel; - Elf32_Sym *sym; + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { + Elf_Rel *rel; + Elf_Sym *sym; const char *name; rel = &sec->reltab[j]; - sym = &sh_symtab[ELF32_R_SYM(rel->r_info)]; + sym = &sh_symtab[ELF_R_SYM(rel->r_info)]; name = sym_name(sym_strtab, sym); if (sym->st_shndx != SHN_ABS) { continue; @@ -542,10 +613,10 @@ static void print_absolute_relocs(void) printed = 1; } - printf("%08x %08x %10s %08x %s\n", + printf(format, rel->r_offset, rel->r_info, - rel_type(ELF32_R_TYPE(rel->r_info)), + rel_type(ELF_R_TYPE(rel->r_info)), sym->st_value, name); } @@ -555,19 +626,34 @@ static void print_absolute_relocs(void) printf("\n"); } -static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym), - int use_real_mode) +static void add_reloc(struct relocs *r, uint32_t offset) +{ + if (r->count == r->size) { + unsigned long newsize = r->size + 50000; + void *mem = realloc(r->offset, newsize * sizeof(r->offset[0])); + + if (!mem) + die("realloc of %ld entries for relocs failed\n", + newsize); + r->offset = mem; + r->size = newsize; + } + r->offset[r->count++] = offset; +} + +static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel, + Elf_Sym *sym, const char *symname)) { int i; /* Walk through the relocations */ for (i = 0; i < ehdr.e_shnum; i++) { char *sym_strtab; - Elf32_Sym *sh_symtab; + Elf_Sym *sh_symtab; struct section *sec_applies, *sec_symtab; int j; struct section *sec = &secs[i]; - if (sec->shdr.sh_type != SHT_REL) { + if (sec->shdr.sh_type != SHT_REL_TYPE) { continue; } sec_symtab = sec->link; @@ -577,101 +663,281 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym), } sh_symtab = sec_symtab->symtab; sym_strtab = sec_symtab->link->strtab; - for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { - Elf32_Rel *rel; - Elf32_Sym *sym; - unsigned r_type; - const char *symname; - int shn_abs; + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { + Elf_Rel *rel = &sec->reltab[j]; + Elf_Sym *sym = &sh_symtab[ELF_R_SYM(rel->r_info)]; + const char *symname = sym_name(sym_strtab, sym); - rel = &sec->reltab[j]; - sym = &sh_symtab[ELF32_R_SYM(rel->r_info)]; - r_type = ELF32_R_TYPE(rel->r_info); - - shn_abs = sym->st_shndx == SHN_ABS; - - switch (r_type) { - case R_386_NONE: - case R_386_PC32: - case R_386_PC16: - case R_386_PC8: - /* - * NONE can be ignored and and PC relative - * relocations don't need to be adjusted. - */ - break; + process(sec, rel, sym, symname); + } + } +} - case R_386_16: - symname = sym_name(sym_strtab, sym); - if (!use_real_mode) - goto bad; - if (shn_abs) { - if (is_reloc(S_ABS, symname)) - break; - else if (!is_reloc(S_SEG, symname)) - goto bad; - } else { - if (is_reloc(S_LIN, symname)) - goto bad; - else - break; - } - visit(rel, sym); - break; +/* + * The .data..percpu section is a special case for x86_64 SMP kernels. + * It is used to initialize the actual per_cpu areas and to provide + * definitions for the per_cpu variables that correspond to their offsets + * within the percpu area. Since the values of all of the symbols need + * to be offsets from the start of the per_cpu area the virtual address + * (sh_addr) of .data..percpu is 0 in SMP kernels. + * + * This means that: + * + * Relocations that reference symbols in the per_cpu area do not + * need further relocation (since the value is an offset relative + * to the start of the per_cpu area that does not change). + * + * Relocations that apply to the per_cpu area need to have their + * offset adjusted by by the value of __per_cpu_load to make them + * point to the correct place in the loaded image (because the + * virtual address of .data..percpu is 0). + * + * For non SMP kernels .data..percpu is linked as part of the normal + * kernel data and does not require special treatment. + * + */ +static int per_cpu_shndx = -1; +Elf_Addr per_cpu_load_addr; - case R_386_32: - symname = sym_name(sym_strtab, sym); - if (shn_abs) { - if (is_reloc(S_ABS, symname)) - break; - else if (!is_reloc(S_REL, symname)) - goto bad; - } else { - if (use_real_mode && - !is_reloc(S_LIN, symname)) - break; - } - visit(rel, sym); - break; - default: - die("Unsupported relocation type: %s (%d)\n", - rel_type(r_type), r_type); +static void percpu_init(void) +{ + int i; + for (i = 0; i < ehdr.e_shnum; i++) { + ElfW(Sym) *sym; + if (strcmp(sec_name(i), ".data..percpu")) + continue; + + if (secs[i].shdr.sh_addr != 0) /* non SMP kernel */ + return; + + sym = sym_lookup("__per_cpu_load"); + if (!sym) + die("can't find __per_cpu_load\n"); + + per_cpu_shndx = i; + per_cpu_load_addr = sym->st_value; + return; + } +} + +#if ELF_BITS == 64 + +/* + * Check to see if a symbol lies in the .data..percpu section. + * For some as yet not understood reason the "__init_begin" + * symbol which immediately preceeds the .data..percpu section + * also shows up as it it were part of it so we do an explict + * check for that symbol name and ignore it. + */ +static int is_percpu_sym(ElfW(Sym) *sym, const char *symname) +{ + return (sym->st_shndx == per_cpu_shndx) && + strcmp(symname, "__init_begin"); +} + + +static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym, + const char *symname) +{ + unsigned r_type = ELF64_R_TYPE(rel->r_info); + ElfW(Addr) offset = rel->r_offset; + int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname); + + if (sym->st_shndx == SHN_UNDEF) + return 0; + + /* + * Adjust the offset if this reloc applies to the percpu section. + */ + if (sec->shdr.sh_info == per_cpu_shndx) + offset += per_cpu_load_addr; + + switch (r_type) { + case R_X86_64_NONE: + case R_X86_64_PC32: + /* + * NONE can be ignored and PC relative relocations don't + * need to be adjusted. + */ + break; + + case R_X86_64_32: + case R_X86_64_32S: + case R_X86_64_64: + /* + * References to the percpu area don't need to be adjusted. + */ + if (is_percpu_sym(sym, symname)) + break; + + if (shn_abs) { + /* + * Whitelisted absolute symbols do not require + * relocation. + */ + if (is_reloc(S_ABS, symname)) break; - bad: - symname = sym_name(sym_strtab, sym); - die("Invalid %s %s relocation: %s\n", - shn_abs ? "absolute" : "relative", - rel_type(r_type), symname); - } + + die("Invalid absolute %s relocation: %s\n", + rel_type(r_type), symname); + break; } + + /* + * Relocation offsets for 64 bit kernels are output + * as 32 bits and sign extended back to 64 bits when + * the relocations are processed. + * Make sure that the offset will fit. + */ + if ((int32_t)offset != (int64_t)offset) + die("Relocation offset doesn't fit in 32 bits\n"); + + if (r_type == R_X86_64_64) + add_reloc(&relocs64, offset); + else + add_reloc(&relocs32, offset); + break; + + default: + die("Unsupported relocation type: %s (%d)\n", + rel_type(r_type), r_type); + break; } + + return 0; } -static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym) +#else + +static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, + const char *symname) { - if (ELF32_R_TYPE(rel->r_info) == R_386_16) - reloc16_count++; - else - reloc_count++; + unsigned r_type = ELF32_R_TYPE(rel->r_info); + int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname); + + switch (r_type) { + case R_386_NONE: + case R_386_PC32: + case R_386_PC16: + case R_386_PC8: + /* + * NONE can be ignored and PC relative relocations don't + * need to be adjusted. + */ + break; + + case R_386_32: + if (shn_abs) { + /* + * Whitelisted absolute symbols do not require + * relocation. + */ + if (is_reloc(S_ABS, symname)) + break; + + die("Invalid absolute %s relocation: %s\n", + rel_type(r_type), symname); + break; + } + + add_reloc(&relocs32, rel->r_offset); + break; + + default: + die("Unsupported relocation type: %s (%d)\n", + rel_type(r_type), r_type); + break; + } + + return 0; } -static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym) +static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, + const char *symname) { - /* Remember the address that needs to be adjusted. */ - if (ELF32_R_TYPE(rel->r_info) == R_386_16) - relocs16[reloc16_idx++] = rel->r_offset; - else - relocs[reloc_idx++] = rel->r_offset; + unsigned r_type = ELF32_R_TYPE(rel->r_info); + int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname); + + switch (r_type) { + case R_386_NONE: + case R_386_PC32: + case R_386_PC16: + case R_386_PC8: + /* + * NONE can be ignored and PC relative relocations don't + * need to be adjusted. + */ + break; + + case R_386_16: + if (shn_abs) { + /* + * Whitelisted absolute symbols do not require + * relocation. + */ + if (is_reloc(S_ABS, symname)) + break; + + if (is_reloc(S_SEG, symname)) { + add_reloc(&relocs16, rel->r_offset); + break; + } + } else { + if (!is_reloc(S_LIN, symname)) + break; + } + die("Invalid %s %s relocation: %s\n", + shn_abs ? "absolute" : "relative", + rel_type(r_type), symname); + break; + + case R_386_32: + if (shn_abs) { + /* + * Whitelisted absolute symbols do not require + * relocation. + */ + if (is_reloc(S_ABS, symname)) + break; + + if (is_reloc(S_REL, symname)) { + add_reloc(&relocs32, rel->r_offset); + break; + } + } else { + if (is_reloc(S_LIN, symname)) + add_reloc(&relocs32, rel->r_offset); + break; + } + die("Invalid %s %s relocation: %s\n", + shn_abs ? "absolute" : "relative", + rel_type(r_type), symname); + break; + + default: + die("Unsupported relocation type: %s (%d)\n", + rel_type(r_type), r_type); + break; + } + + return 0; } +#endif + static int cmp_relocs(const void *va, const void *vb) { - const unsigned long *a, *b; + const uint32_t *a, *b; a = va; b = vb; return (*a == *b)? 0 : (*a > *b)? 1 : -1; } -static int write32(unsigned int v, FILE *f) +static void sort_relocs(struct relocs *r) +{ + qsort(r->offset, r->count, sizeof(r->offset[0]), cmp_relocs); +} + +static int write32(uint32_t v, FILE *f) { unsigned char buf[4]; @@ -679,33 +945,40 @@ static int write32(unsigned int v, FILE *f) return fwrite(buf, 1, 4, f) == 4 ? 0 : -1; } +static int write32_as_text(uint32_t v, FILE *f) +{ + return fprintf(f, "\t.long 0x%08"PRIx32"\n", v) > 0 ? 0 : -1; +} + static void emit_relocs(int as_text, int use_real_mode) { int i; - /* Count how many relocations I have and allocate space for them. */ - reloc_count = 0; - walk_relocs(count_reloc, use_real_mode); - relocs = malloc(reloc_count * sizeof(relocs[0])); - if (!relocs) { - die("malloc of %d entries for relocs failed\n", - reloc_count); - } + int (*write_reloc)(uint32_t, FILE *) = write32; + int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, + const char *symname); + +#if ELF_BITS == 64 + if (!use_real_mode) + do_reloc = do_reloc64; + else + die("--realmode not valid for a 64-bit ELF file"); +#else + if (!use_real_mode) + do_reloc = do_reloc32; + else + do_reloc = do_reloc_real; +#endif - relocs16 = malloc(reloc16_count * sizeof(relocs[0])); - if (!relocs16) { - die("malloc of %d entries for relocs16 failed\n", - reloc16_count); - } /* Collect up the relocations */ - reloc_idx = 0; - walk_relocs(collect_reloc, use_real_mode); + walk_relocs(do_reloc); - if (reloc16_count && !use_real_mode) + if (relocs16.count && !use_real_mode) die("Segment relocations found but --realmode not specified\n"); /* Order the relocations for more efficient processing */ - qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs); - qsort(relocs16, reloc16_count, sizeof(relocs16[0]), cmp_relocs); + sort_relocs(&relocs16); + sort_relocs(&relocs32); + sort_relocs(&relocs64); /* Print the relocations */ if (as_text) { @@ -714,114 +987,60 @@ static void emit_relocs(int as_text, int use_real_mode) */ printf(".section \".data.reloc\",\"a\"\n"); printf(".balign 4\n"); - if (use_real_mode) { - printf("\t.long %lu\n", reloc16_count); - for (i = 0; i < reloc16_count; i++) - printf("\t.long 0x%08lx\n", relocs16[i]); - printf("\t.long %lu\n", reloc_count); - for (i = 0; i < reloc_count; i++) { - printf("\t.long 0x%08lx\n", relocs[i]); - } - } else { - /* Print a stop */ - printf("\t.long 0x%08lx\n", (unsigned long)0); - for (i = 0; i < reloc_count; i++) { - printf("\t.long 0x%08lx\n", relocs[i]); - } - } - - printf("\n"); + write_reloc = write32_as_text; } - else { - if (use_real_mode) { - write32(reloc16_count, stdout); - for (i = 0; i < reloc16_count; i++) - write32(relocs16[i], stdout); - write32(reloc_count, stdout); - /* Now print each relocation */ - for (i = 0; i < reloc_count; i++) - write32(relocs[i], stdout); - } else { + if (use_real_mode) { + write_reloc(relocs16.count, stdout); + for (i = 0; i < relocs16.count; i++) + write_reloc(relocs16.offset[i], stdout); + + write_reloc(relocs32.count, stdout); + for (i = 0; i < relocs32.count; i++) + write_reloc(relocs32.offset[i], stdout); + } else { + if (ELF_BITS == 64) { /* Print a stop */ - write32(0, stdout); + write_reloc(0, stdout); /* Now print each relocation */ - for (i = 0; i < reloc_count; i++) { - write32(relocs[i], stdout); - } + for (i = 0; i < relocs64.count; i++) + write_reloc(relocs64.offset[i], stdout); } + + /* Print a stop */ + write_reloc(0, stdout); + + /* Now print each relocation */ + for (i = 0; i < relocs32.count; i++) + write_reloc(relocs32.offset[i], stdout); } } -static void usage(void) -{ - die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n"); -} +#if ELF_BITS == 64 +# define process process_64 +#else +# define process process_32 +#endif -int main(int argc, char **argv) +void process(FILE *fp, int use_real_mode, int as_text, + int show_absolute_syms, int show_absolute_relocs) { - int show_absolute_syms, show_absolute_relocs; - int as_text, use_real_mode; - const char *fname; - FILE *fp; - int i; - - show_absolute_syms = 0; - show_absolute_relocs = 0; - as_text = 0; - use_real_mode = 0; - fname = NULL; - for (i = 1; i < argc; i++) { - char *arg = argv[i]; - if (*arg == '-') { - if (strcmp(arg, "--abs-syms") == 0) { - show_absolute_syms = 1; - continue; - } - if (strcmp(arg, "--abs-relocs") == 0) { - show_absolute_relocs = 1; - continue; - } - if (strcmp(arg, "--text") == 0) { - as_text = 1; - continue; - } - if (strcmp(arg, "--realmode") == 0) { - use_real_mode = 1; - continue; - } - } - else if (!fname) { - fname = arg; - continue; - } - usage(); - } - if (!fname) { - usage(); - } regex_init(use_real_mode); - fp = fopen(fname, "r"); - if (!fp) { - die("Cannot open %s: %s\n", - fname, strerror(errno)); - } read_ehdr(fp); read_shdrs(fp); read_strtabs(fp); read_symtabs(fp); read_relocs(fp); + if (ELF_BITS == 64) + percpu_init(); if (show_absolute_syms) { print_absolute_symbols(); - goto out; + return; } if (show_absolute_relocs) { print_absolute_relocs(); - goto out; + return; } emit_relocs(as_text, use_real_mode); -out: - fclose(fp); - return 0; } diff --git a/arch/x86/tools/relocs.h b/arch/x86/tools/relocs.h new file mode 100644 index 0000000..07cdb1e --- /dev/null +++ b/arch/x86/tools/relocs.h @@ -0,0 +1,36 @@ +#ifndef RELOCS_H +#define RELOCS_H + +#include <stdio.h> +#include <stdarg.h> +#include <stdlib.h> +#include <stdint.h> +#include <inttypes.h> +#include <string.h> +#include <errno.h> +#include <unistd.h> +#include <elf.h> +#include <byteswap.h> +#define USE_BSD +#include <endian.h> +#include <regex.h> +#include <tools/le_byteshift.h> + +void die(char *fmt, ...); + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +enum symtype { + S_ABS, + S_REL, + S_SEG, + S_LIN, + S_NSYMTYPES +}; + +void process_32(FILE *fp, int use_real_mode, int as_text, + int show_absolute_syms, int show_absolute_relocs); +void process_64(FILE *fp, int use_real_mode, int as_text, + int show_absolute_syms, int show_absolute_relocs); + +#endif /* RELOCS_H */ diff --git a/arch/x86/tools/relocs_32.c b/arch/x86/tools/relocs_32.c new file mode 100644 index 0000000..b2ade2b --- /dev/null +++ b/arch/x86/tools/relocs_32.c @@ -0,0 +1,17 @@ +#include "relocs.h" + +#define ELF_BITS 32 + +#define ELF_MACHINE EM_386 +#define ELF_MACHINE_NAME "i386" +#define SHT_REL_TYPE SHT_REL +#define Elf_Rel ElfW(Rel) + +#define ELF_CLASS ELFCLASS32 +#define ELF_R_SYM(val) ELF32_R_SYM(val) +#define ELF_R_TYPE(val) ELF32_R_TYPE(val) +#define ELF_ST_TYPE(o) ELF32_ST_TYPE(o) +#define ELF_ST_BIND(o) ELF32_ST_BIND(o) +#define ELF_ST_VISIBILITY(o) ELF32_ST_VISIBILITY(o) + +#include "relocs.c" diff --git a/arch/x86/tools/relocs_64.c b/arch/x86/tools/relocs_64.c new file mode 100644 index 0000000..56b61b7 --- /dev/null +++ b/arch/x86/tools/relocs_64.c @@ -0,0 +1,17 @@ +#include "relocs.h" + +#define ELF_BITS 64 + +#define ELF_MACHINE EM_X86_64 +#define ELF_MACHINE_NAME "x86_64" +#define SHT_REL_TYPE SHT_RELA +#define Elf_Rel Elf64_Rela + +#define ELF_CLASS ELFCLASS64 +#define ELF_R_SYM(val) ELF64_R_SYM(val) +#define ELF_R_TYPE(val) ELF64_R_TYPE(val) +#define ELF_ST_TYPE(o) ELF64_ST_TYPE(o) +#define ELF_ST_BIND(o) ELF64_ST_BIND(o) +#define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o) + +#include "relocs.c" diff --git a/arch/x86/tools/relocs_common.c b/arch/x86/tools/relocs_common.c new file mode 100644 index 0000000..44d3968 --- /dev/null +++ b/arch/x86/tools/relocs_common.c @@ -0,0 +1,76 @@ +#include "relocs.h" + +void die(char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + exit(1); +} + +static void usage(void) +{ + die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n"); +} + +int main(int argc, char **argv) +{ + int show_absolute_syms, show_absolute_relocs; + int as_text, use_real_mode; + const char *fname; + FILE *fp; + int i; + unsigned char e_ident[EI_NIDENT]; + + show_absolute_syms = 0; + show_absolute_relocs = 0; + as_text = 0; + use_real_mode = 0; + fname = NULL; + for (i = 1; i < argc; i++) { + char *arg = argv[i]; + if (*arg == '-') { + if (strcmp(arg, "--abs-syms") == 0) { + show_absolute_syms = 1; + continue; + } + if (strcmp(arg, "--abs-relocs") == 0) { + show_absolute_relocs = 1; + continue; + } + if (strcmp(arg, "--text") == 0) { + as_text = 1; + continue; + } + if (strcmp(arg, "--realmode") == 0) { + use_real_mode = 1; + continue; + } + } + else if (!fname) { + fname = arg; + continue; + } + usage(); + } + if (!fname) { + usage(); + } + fp = fopen(fname, "r"); + if (!fp) { + die("Cannot open %s: %s\n", fname, strerror(errno)); + } + if (fread(&e_ident, 1, EI_NIDENT, fp) != EI_NIDENT) { + die("Cannot read %s: %s", fname, strerror(errno)); + } + rewind(fp); + if (e_ident[EI_CLASS] == ELFCLASS64) + process_64(fp, use_real_mode, as_text, + show_absolute_syms, show_absolute_relocs); + else + process_32(fp, use_real_mode, as_text, + show_absolute_syms, show_absolute_relocs); + fclose(fp); + return 0; +} diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 131dacd..1a3c765 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -4,7 +4,7 @@ config XEN bool "Xen guest support" - select PARAVIRT + depends on PARAVIRT select PARAVIRT_CLOCK select XEN_HAVE_PVMMU depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS) diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index c8e1c7b..53d4f68 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -31,6 +31,7 @@ #include <linux/pci.h> #include <linux/gfp.h> #include <linux/memblock.h> +#include <linux/edd.h> #include <xen/xen.h> #include <xen/events.h> @@ -1220,7 +1221,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .alloc_ldt = xen_alloc_ldt, .free_ldt = xen_free_ldt, - .store_gdt = native_store_gdt, .store_idt = native_store_idt, .store_tr = xen_store_tr, @@ -1306,6 +1306,55 @@ static const struct machine_ops xen_machine_ops __initconst = { .emergency_restart = xen_emergency_restart, }; +static void __init xen_boot_params_init_edd(void) +{ +#if IS_ENABLED(CONFIG_EDD) + struct xen_platform_op op; + struct edd_info *edd_info; + u32 *mbr_signature; + unsigned nr; + int ret; + + edd_info = boot_params.eddbuf; + mbr_signature = boot_params.edd_mbr_sig_buffer; + + op.cmd = XENPF_firmware_info; + + op.u.firmware_info.type = XEN_FW_DISK_INFO; + for (nr = 0; nr < EDDMAXNR; nr++) { + struct edd_info *info = edd_info + nr; + + op.u.firmware_info.index = nr; + info->params.length = sizeof(info->params); + set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params, + &info->params); + ret = HYPERVISOR_dom0_op(&op); + if (ret) + break; + +#define C(x) info->x = op.u.firmware_info.u.disk_info.x + C(device); + C(version); + C(interface_support); + C(legacy_max_cylinder); + C(legacy_max_head); + C(legacy_sectors_per_track); +#undef C + } + boot_params.eddbuf_entries = nr; + + op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE; + for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) { + op.u.firmware_info.index = nr; + ret = HYPERVISOR_dom0_op(&op); + if (ret) + break; + mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature; + } + boot_params.edd_mbr_sig_buf_entries = nr; +#endif +} + /* * Set up the GDT and segment registers for -fstack-protector. Until * we do this, we have to be careful not to call any stack-protected @@ -1508,6 +1557,8 @@ asmlinkage void __init xen_start_kernel(void) /* Avoid searching for BIOS MP tables */ x86_init.mpparse.find_smp_config = x86_init_noop; x86_init.mpparse.get_smp_config = x86_init_uint_noop; + + xen_boot_params_init_edd(); } #ifdef CONFIG_PCI /* PCI BIOS service won't work from a PV guest. */ @@ -1589,8 +1640,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, switch (action) { case CPU_UP_PREPARE: xen_vcpu_setup(cpu); - if (xen_have_vector_callback) + if (xen_have_vector_callback) { xen_init_lock_cpu(cpu); + if (xen_feature(XENFEAT_hvm_safe_pvclock)) + xen_setup_timer(cpu); + } break; default: break; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index e8e3493..fdc3ba2 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1467,8 +1467,6 @@ static void __init xen_write_cr3_init(unsigned long cr3) __xen_write_cr3(true, cr3); xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ - - pv_mmu_ops.write_cr3 = &xen_write_cr3; } #endif @@ -1750,14 +1748,18 @@ static void *m2v(phys_addr_t maddr) } /* Set the page permissions on an identity-mapped pages */ -static void set_page_prot(void *addr, pgprot_t prot) +static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags) { unsigned long pfn = __pa(addr) >> PAGE_SHIFT; pte_t pte = pfn_pte(pfn, prot); - if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) + if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) BUG(); } +static void set_page_prot(void *addr, pgprot_t prot) +{ + return set_page_prot_flags(addr, prot, UVMF_NONE); +} #ifdef CONFIG_X86_32 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) { @@ -1841,12 +1843,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, unsigned long addr) { if (*pt_base == PFN_DOWN(__pa(addr))) { - set_page_prot((void *)addr, PAGE_KERNEL); + set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); clear_page((void *)addr); (*pt_base)++; } if (*pt_end == PFN_DOWN(__pa(addr))) { - set_page_prot((void *)addr, PAGE_KERNEL); + set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); clear_page((void *)addr); (*pt_end)--; } @@ -2041,9 +2043,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) switch (idx) { case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: -#ifdef CONFIG_X86_F00F_BUG - case FIX_F00F_IDT: -#endif + case FIX_RO_IDT: #ifdef CONFIG_X86_32 case FIX_WP_TEST: case FIX_VDSO: @@ -2122,6 +2122,7 @@ static void __init xen_post_allocator_init(void) #endif #ifdef CONFIG_X86_64 + pv_mmu_ops.write_cr3 = &xen_write_cr3; SetPagePinned(virt_to_page(level3_user_vsyscall)); #endif xen_mark_init_mm_pinned(); @@ -2197,6 +2198,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .lazy_mode = { .enter = paravirt_enter_lazy_mmu, .leave = xen_leave_lazy_mmu, + .flush = paravirt_flush_lazy_mmu, }, .set_fixmap = xen_set_fixmap, diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 09ea61d..8ff3799 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -95,7 +95,7 @@ static void __cpuinit cpu_bringup(void) static void __cpuinit cpu_bringup_and_idle(void) { cpu_bringup(); - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } static int xen_smp_intr_init(unsigned int cpu) @@ -144,6 +144,13 @@ static int xen_smp_intr_init(unsigned int cpu) goto fail; per_cpu(xen_callfuncsingle_irq, cpu) = rc; + /* + * The IRQ worker on PVHVM goes through the native path and uses the + * IPI mechanism. + */ + if (xen_hvm_domain()) + return 0; + callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, cpu, @@ -167,6 +174,9 @@ static int xen_smp_intr_init(unsigned int cpu) if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); + if (xen_hvm_domain()) + return rc; + if (per_cpu(xen_irq_work, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); @@ -418,7 +428,7 @@ static int xen_cpu_disable(void) static void xen_cpu_die(unsigned int cpu) { - while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { + while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { current->state = TASK_UNINTERRUPTIBLE; schedule_timeout(HZ/10); } @@ -426,7 +436,8 @@ static void xen_cpu_die(unsigned int cpu) unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); + if (!xen_hvm_domain()) + unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); xen_uninit_lock_cpu(cpu); xen_teardown_timer(cpu); } @@ -657,11 +668,7 @@ static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) static void xen_hvm_cpu_die(unsigned int cpu) { - unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); + xen_cpu_die(cpu); native_cpu_die(cpu); } diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index f7a080e..8b54603 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -364,6 +364,16 @@ void __cpuinit xen_init_lock_cpu(int cpu) int irq; const char *name; + WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n", + cpu, per_cpu(lock_kicker_irq, cpu)); + + /* + * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23 + * (xen: disable PV spinlocks on HVM) + */ + if (xen_hvm_domain()) + return; + name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, cpu, @@ -382,11 +392,26 @@ void __cpuinit xen_init_lock_cpu(int cpu) void xen_uninit_lock_cpu(int cpu) { + /* + * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23 + * (xen: disable PV spinlocks on HVM) + */ + if (xen_hvm_domain()) + return; + unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); + per_cpu(lock_kicker_irq, cpu) = -1; } void __init xen_init_spinlocks(void) { + /* + * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23 + * (xen: disable PV spinlocks on HVM) + */ + if (xen_hvm_domain()) + return; + BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t)); pv_lock_ops.spin_is_locked = xen_spin_is_locked; diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 0296a95..3d88bfd 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -377,7 +377,7 @@ static const struct clock_event_device xen_vcpuop_clockevent = { static const struct clock_event_device *xen_clockevent = &xen_timerop_clockevent; -static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events); +static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events) = { .irq = -1 }; static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) { @@ -401,6 +401,9 @@ void xen_setup_timer(int cpu) struct clock_event_device *evt; int irq; + evt = &per_cpu(xen_clock_events, cpu); + WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); + printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); name = kasprintf(GFP_KERNEL, "timer%d", cpu); @@ -413,7 +416,6 @@ void xen_setup_timer(int cpu) IRQF_FORCE_RESUME, name, NULL); - evt = &per_cpu(xen_clock_events, cpu); memcpy(evt, xen_clockevent, sizeof(*evt)); evt->cpumask = cpumask_of(cpu); @@ -426,6 +428,7 @@ void xen_teardown_timer(int cpu) BUG_ON(cpu == 0); evt = &per_cpu(xen_clock_events, cpu); unbind_from_irqhandler(evt->irq, NULL); + evt->irq = -1; } void xen_setup_cpu_clockevents(void) @@ -497,7 +500,11 @@ static void xen_hvm_setup_cpu_clockevents(void) { int cpu = smp_processor_id(); xen_setup_runstate_info(cpu); - xen_setup_timer(cpu); + /* + * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence + * doing it xen_hvm_cpu_notify (which gets called by smp_init during + * early bootup and also during CPU hotplug events). + */ xen_setup_cpu_clockevents(); } diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 35876ff..b09de49 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -9,7 +9,7 @@ config XTENSA select HAVE_IDE select GENERIC_ATOMIC64 select HAVE_GENERIC_HARDIRQS - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES select MODULES_USE_ELF_RELA diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 5cd82e9..1c85323 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -105,19 +105,9 @@ void coprocessor_flush_all(struct thread_info *ti) /* * Powermanagement idle function, if any is provided by the platform. */ - -void cpu_idle(void) +void arch_cpu_idle(void) { - local_irq_enable(); - - /* endless idle loop with no priority at all */ - while (1) { - rcu_idle_enter(); - while (!need_resched()) - platform_idle(); - rcu_idle_exit(); - schedule_preempt_disabled(); - } + platform_idle(); } /* diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 923db5c..458186d 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -383,6 +383,8 @@ void show_regs(struct pt_regs * regs) { int i, wmask; + show_regs_print_info(KERN_DEFAULT); + wmask = regs->wmask & ~1; for (i = 0; i < 16; i++) { @@ -481,14 +483,6 @@ void show_stack(struct task_struct *task, unsigned long *sp) show_trace(task, stack); } -void dump_stack(void) -{ - show_stack(current, NULL); -} - -EXPORT_SYMBOL(dump_stack); - - void show_code(unsigned int *pc) { long i; diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 7a5156f..bba125b 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -208,32 +208,17 @@ void __init mem_init(void) highmemsize >> 10); } -void -free_reserved_mem(void *start, void *end) -{ - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page((unsigned long)start); - totalram_pages++; - } -} - #ifdef CONFIG_BLK_DEV_INITRD extern int initrd_is_mapped; void free_initrd_mem(unsigned long start, unsigned long end) { - if (initrd_is_mapped) { - free_reserved_mem((void*)start, (void*)end); - printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10); - } + if (initrd_is_mapped) + free_reserved_area(start, end, 0, "initrd"); } #endif void free_initmem(void) { - free_reserved_mem(__init_begin, __init_end); - printk("Freeing unused kernel memory: %zuk freed\n", - (__init_end - __init_begin) >> 10); + free_initmem_default(0); } |