diff options
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/mips/kernel/binfmt_elfn32.c | 11 | ||||
-rw-r--r-- | arch/mips/kernel/binfmt_elfo32.c | 11 | ||||
-rw-r--r-- | arch/mips/kernel/cpu-probe.c | 198 | ||||
-rw-r--r-- | arch/mips/kernel/crash_dump.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/ftrace.c | 4 | ||||
-rw-r--r-- | arch/mips/kernel/genex.S | 8 | ||||
-rw-r--r-- | arch/mips/kernel/idle.c | 245 | ||||
-rw-r--r-- | arch/mips/kernel/kprobes.c | 5 | ||||
-rw-r--r-- | arch/mips/kernel/proc.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/process.c | 53 | ||||
-rw-r--r-- | arch/mips/kernel/rtlx.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/scall64-64.S | 1 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 15 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 43 |
16 files changed, 349 insertions, 251 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 6ad9e04..423d871 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -4,7 +4,7 @@ extra-y := head.o vmlinux.lds -obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ +obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \ prom.o ptrace.o reset.o setup.o signal.o syscall.o \ time.o topology.o traps.o unaligned.o watch.o vdso.o diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index e06f777..1188e00 100644 --- a/arch/mips/kernel/binfmt_elfn32.c +++ b/arch/mips/kernel/binfmt_elfn32.c @@ -119,4 +119,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 +#undef cputime_to_timeval +#define cputime_to_timeval cputime_to_compat_timeval +static __inline__ void +cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) +{ + unsigned long jiffies = cputime_to_jiffies(cputime); + + value->tv_usec = (jiffies % HZ) * (1000000L / HZ); + value->tv_sec = jiffies / HZ; +} + #include "../../../fs/binfmt_elf.c" diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index 97c5a16..202e581 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -162,4 +162,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 +#undef cputime_to_timeval +#define cputime_to_timeval cputime_to_compat_timeval +static __inline__ void +cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) +{ + unsigned long jiffies = cputime_to_jiffies(cputime); + + value->tv_usec = (jiffies % HZ) * (1000000L / HZ); + value->tv_sec = jiffies / HZ; +} + #include "../../../fs/binfmt_elf.c" diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 4bbffdb..c6568bf 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -27,105 +27,6 @@ #include <asm/spram.h> #include <asm/uaccess.h> -/* - * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, - * the implementation of the "wait" feature differs between CPU families. This - * points to the function that implements CPU specific wait. - * The wait instruction stops the pipeline and reduces the power consumption of - * the CPU very much. - */ -void (*cpu_wait)(void); -EXPORT_SYMBOL(cpu_wait); - -static void r3081_wait(void) -{ - unsigned long cfg = read_c0_conf(); - write_c0_conf(cfg | R30XX_CONF_HALT); -} - -static void r39xx_wait(void) -{ - local_irq_disable(); - if (!need_resched()) - write_c0_conf(read_c0_conf() | TX39_CONF_HALT); - local_irq_enable(); -} - -extern void r4k_wait(void); - -/* - * This variant is preferable as it allows testing need_resched and going to - * sleep depending on the outcome atomically. Unfortunately the "It is - * implementation-dependent whether the pipeline restarts when a non-enabled - * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes - * using this version a gamble. - */ -void r4k_wait_irqoff(void) -{ - local_irq_disable(); - if (!need_resched()) - __asm__(" .set push \n" - " .set mips3 \n" - " wait \n" - " .set pop \n"); - local_irq_enable(); - __asm__(" .globl __pastwait \n" - "__pastwait: \n"); -} - -/* - * The RM7000 variant has to handle erratum 38. The workaround is to not - * have any pending stores when the WAIT instruction is executed. - */ -static void rm7k_wait_irqoff(void) -{ - local_irq_disable(); - if (!need_resched()) - __asm__( - " .set push \n" - " .set mips3 \n" - " .set noat \n" - " mfc0 $1, $12 \n" - " sync \n" - " mtc0 $1, $12 # stalls until W stage \n" - " wait \n" - " mtc0 $1, $12 # stalls until W stage \n" - " .set pop \n"); - local_irq_enable(); -} - -/* - * The Au1xxx wait is available only if using 32khz counter or - * external timer source, but specifically not CP0 Counter. - * alchemy/common/time.c may override cpu_wait! - */ -static void au1k_wait(void) -{ - __asm__(" .set mips3 \n" - " cache 0x14, 0(%0) \n" - " cache 0x14, 32(%0) \n" - " sync \n" - " nop \n" - " wait \n" - " nop \n" - " nop \n" - " nop \n" - " nop \n" - " .set mips0 \n" - : : "r" (au1k_wait)); -} - -static int __initdata nowait; - -static int __init wait_disable(char *s) -{ - nowait = 1; - - return 1; -} - -__setup("nowait", wait_disable); - static int __cpuinitdata mips_fpu_disabled; static int __init fpu_disable(char *s) @@ -150,105 +51,6 @@ static int __init dsp_disable(char *s) __setup("nodsp", dsp_disable); -void __init check_wait(void) -{ - struct cpuinfo_mips *c = ¤t_cpu_data; - - if (nowait) { - printk("Wait instruction disabled.\n"); - return; - } - - switch (c->cputype) { - case CPU_R3081: - case CPU_R3081E: - cpu_wait = r3081_wait; - break; - case CPU_TX3927: - cpu_wait = r39xx_wait; - break; - case CPU_R4200: -/* case CPU_R4300: */ - case CPU_R4600: - case CPU_R4640: - case CPU_R4650: - case CPU_R4700: - case CPU_R5000: - case CPU_R5500: - case CPU_NEVADA: - case CPU_4KC: - case CPU_4KEC: - case CPU_4KSC: - case CPU_5KC: - case CPU_25KF: - case CPU_PR4450: - case CPU_BMIPS3300: - case CPU_BMIPS4350: - case CPU_BMIPS4380: - case CPU_BMIPS5000: - case CPU_CAVIUM_OCTEON: - case CPU_CAVIUM_OCTEON_PLUS: - case CPU_CAVIUM_OCTEON2: - case CPU_JZRISC: - case CPU_LOONGSON1: - case CPU_XLR: - case CPU_XLP: - cpu_wait = r4k_wait; - break; - - case CPU_RM7000: - cpu_wait = rm7k_wait_irqoff; - break; - - case CPU_M14KC: - case CPU_M14KEC: - case CPU_24K: - case CPU_34K: - case CPU_1004K: - cpu_wait = r4k_wait; - if (read_c0_config7() & MIPS_CONF7_WII) - cpu_wait = r4k_wait_irqoff; - break; - - case CPU_74K: - cpu_wait = r4k_wait; - if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0)) - cpu_wait = r4k_wait_irqoff; - break; - - case CPU_TX49XX: - cpu_wait = r4k_wait_irqoff; - break; - case CPU_ALCHEMY: - cpu_wait = au1k_wait; - break; - case CPU_20KC: - /* - * WAIT on Rev1.0 has E1, E2, E3 and E16. - * WAIT on Rev2.0 and Rev3.0 has E16. - * Rev3.1 WAIT is nop, why bother - */ - if ((c->processor_id & 0xff) <= 0x64) - break; - - /* - * Another rev is incremeting c0_count at a reduced clock - * rate while in WAIT mode. So we basically have the choice - * between using the cp0 timer as clocksource or avoiding - * the WAIT instruction. Until more details are known, - * disable the use of WAIT for 20Kc entirely. - cpu_wait = r4k_wait; - */ - break; - case CPU_RM9000: - if ((c->processor_id & 0x00ff) >= 0x40) - cpu_wait = r4k_wait; - break; - default: - break; - } -} - static inline void check_errata(void) { struct cpuinfo_mips *c = ¤t_cpu_data; diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c index 35bed0d..3be9e7b 100644 --- a/arch/mips/kernel/crash_dump.c +++ b/arch/mips/kernel/crash_dump.c @@ -2,6 +2,7 @@ #include <linux/bootmem.h> #include <linux/crash_dump.h> #include <asm/uaccess.h> +#include <linux/slab.h> static int __init parse_savemaxmem(char *p) { diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index cf5509f..dba90ec 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -25,12 +25,16 @@ #define MCOUNT_OFFSET_INSNS 4 #endif +#ifdef CONFIG_DYNAMIC_FTRACE + /* Arch override because MIPS doesn't need to run this from stop_machine() */ void arch_ftrace_update_code(int command) { ftrace_modify_all_code(command); } +#endif + /* * Check if the address is in kernel space * diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 5c2ba9f..31fa856 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -122,7 +122,7 @@ handle_vcei: __FINIT .align 5 /* 32 byte rollback region */ -LEAF(r4k_wait) +LEAF(__r4k_wait) .set push .set noreorder /* start of rollback region */ @@ -146,14 +146,14 @@ LEAF(r4k_wait) jr ra nop .set pop - END(r4k_wait) + END(__r4k_wait) .macro BUILD_ROLLBACK_PROLOGUE handler FEXPORT(rollback_\handler) .set push .set noat MFC0 k0, CP0_EPC - PTR_LA k1, r4k_wait + PTR_LA k1, __r4k_wait ori k0, 0x1f /* 32 byte rollback region */ xori k0, 0x1f bne k0, k1, 9f @@ -493,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .set noreorder /* check if TLB contains a entry for EPC */ MFC0 k1, CP0_ENTRYHI - andi k1, 0xff /* ASID_MASK patched at run-time!! */ + andi k1, 0xff /* ASID_MASK */ MFC0 k0, CP0_EPC PTR_SRL k0, _PAGE_SHIFT + 1 PTR_SLL k0, _PAGE_SHIFT + 1 diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c new file mode 100644 index 0000000..0c655de --- /dev/null +++ b/arch/mips/kernel/idle.c @@ -0,0 +1,245 @@ +/* + * MIPS idle loop and WAIT instruction support. + * + * Copyright (C) xxxx the Anonymous + * Copyright (C) 1994 - 2006 Ralf Baechle + * Copyright (C) 2003, 2004 Maciej W. Rozycki + * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/export.h> +#include <linux/init.h> +#include <linux/irqflags.h> +#include <linux/printk.h> +#include <linux/sched.h> +#include <asm/cpu.h> +#include <asm/cpu-info.h> +#include <asm/idle.h> +#include <asm/mipsregs.h> + +/* + * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, + * the implementation of the "wait" feature differs between CPU families. This + * points to the function that implements CPU specific wait. + * The wait instruction stops the pipeline and reduces the power consumption of + * the CPU very much. + */ +void (*cpu_wait)(void); +EXPORT_SYMBOL(cpu_wait); + +static void r3081_wait(void) +{ + unsigned long cfg = read_c0_conf(); + write_c0_conf(cfg | R30XX_CONF_HALT); + local_irq_enable(); +} + +static void r39xx_wait(void) +{ + if (!need_resched()) + write_c0_conf(read_c0_conf() | TX39_CONF_HALT); + local_irq_enable(); +} + +void r4k_wait(void) +{ + local_irq_enable(); + __r4k_wait(); +} + +/* + * This variant is preferable as it allows testing need_resched and going to + * sleep depending on the outcome atomically. Unfortunately the "It is + * implementation-dependent whether the pipeline restarts when a non-enabled + * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes + * using this version a gamble. + */ +void r4k_wait_irqoff(void) +{ + if (!need_resched()) + __asm__( + " .set push \n" + " .set mips3 \n" + " wait \n" + " .set pop \n"); + local_irq_enable(); + __asm__( + " .globl __pastwait \n" + "__pastwait: \n"); +} + +/* + * The RM7000 variant has to handle erratum 38. The workaround is to not + * have any pending stores when the WAIT instruction is executed. + */ +static void rm7k_wait_irqoff(void) +{ + if (!need_resched()) + __asm__( + " .set push \n" + " .set mips3 \n" + " .set noat \n" + " mfc0 $1, $12 \n" + " sync \n" + " mtc0 $1, $12 # stalls until W stage \n" + " wait \n" + " mtc0 $1, $12 # stalls until W stage \n" + " .set pop \n"); + local_irq_enable(); +} + +/* + * Au1 'wait' is only useful when the 32kHz counter is used as timer, + * since coreclock (and the cp0 counter) stops upon executing it. Only an + * interrupt can wake it, so they must be enabled before entering idle modes. + */ +static void au1k_wait(void) +{ + unsigned long c0status = read_c0_status() | 1; /* irqs on */ + + __asm__( + " .set mips3 \n" + " cache 0x14, 0(%0) \n" + " cache 0x14, 32(%0) \n" + " sync \n" + " mtc0 %1, $12 \n" /* wr c0status */ + " wait \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " .set mips0 \n" + : : "r" (au1k_wait), "r" (c0status)); +} + +static int __initdata nowait; + +static int __init wait_disable(char *s) +{ + nowait = 1; + + return 1; +} + +__setup("nowait", wait_disable); + +void __init check_wait(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + + if (nowait) { + printk("Wait instruction disabled.\n"); + return; + } + + switch (c->cputype) { + case CPU_R3081: + case CPU_R3081E: + cpu_wait = r3081_wait; + break; + case CPU_TX3927: + cpu_wait = r39xx_wait; + break; + case CPU_R4200: +/* case CPU_R4300: */ + case CPU_R4600: + case CPU_R4640: + case CPU_R4650: + case CPU_R4700: + case CPU_R5000: + case CPU_R5500: + case CPU_NEVADA: + case CPU_4KC: + case CPU_4KEC: + case CPU_4KSC: + case CPU_5KC: + case CPU_25KF: + case CPU_PR4450: + case CPU_BMIPS3300: + case CPU_BMIPS4350: + case CPU_BMIPS4380: + case CPU_BMIPS5000: + case CPU_CAVIUM_OCTEON: + case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: + case CPU_JZRISC: + case CPU_LOONGSON1: + case CPU_XLR: + case CPU_XLP: + cpu_wait = r4k_wait; + break; + + case CPU_RM7000: + cpu_wait = rm7k_wait_irqoff; + break; + + case CPU_M14KC: + case CPU_M14KEC: + case CPU_24K: + case CPU_34K: + case CPU_1004K: + cpu_wait = r4k_wait; + if (read_c0_config7() & MIPS_CONF7_WII) + cpu_wait = r4k_wait_irqoff; + break; + + case CPU_74K: + cpu_wait = r4k_wait; + if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0)) + cpu_wait = r4k_wait_irqoff; + break; + + case CPU_TX49XX: + cpu_wait = r4k_wait_irqoff; + break; + case CPU_ALCHEMY: + cpu_wait = au1k_wait; + break; + case CPU_20KC: + /* + * WAIT on Rev1.0 has E1, E2, E3 and E16. + * WAIT on Rev2.0 and Rev3.0 has E16. + * Rev3.1 WAIT is nop, why bother + */ + if ((c->processor_id & 0xff) <= 0x64) + break; + + /* + * Another rev is incremeting c0_count at a reduced clock + * rate while in WAIT mode. So we basically have the choice + * between using the cp0 timer as clocksource or avoiding + * the WAIT instruction. Until more details are known, + * disable the use of WAIT for 20Kc entirely. + cpu_wait = r4k_wait; + */ + break; + case CPU_RM9000: + if ((c->processor_id & 0x00ff) >= 0x40) + cpu_wait = r4k_wait; + break; + default: + break; + } +} + +static void smtc_idle_hook(void) +{ +#ifdef CONFIG_MIPS_MT_SMTC + void smtc_idle_loop_hook(void); + + smtc_idle_loop_hook(); +#endif +} + +void arch_cpu_idle(void) +{ + smtc_idle_hook(); + if (cpu_wait) + cpu_wait(); + else + local_irq_enable(); +} diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 12bc4eb..1f8187a 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -207,7 +207,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - free_insn_slot(p->ainsn.insn, 0); + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; + } } static void save_previous_kprobe(struct kprobe_ctlblk *kcb) diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index a3e4614..acb3437 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -10,6 +10,7 @@ #include <asm/bootinfo.h> #include <asm/cpu.h> #include <asm/cpu-features.h> +#include <asm/idle.h> #include <asm/mipsregs.h> #include <asm/processor.h> #include <asm/prom.h> diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index eb902c1..c6a041d 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -51,19 +51,6 @@ void arch_cpu_idle_dead(void) } #endif -void arch_cpu_idle(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - extern void smtc_idle_loop_hook(void); - - smtc_idle_loop_hook(); -#endif - if (cpu_wait) - (*cpu_wait)(); - else - local_irq_enable(); -} - asmlinkage void ret_from_fork(void); asmlinkage void ret_from_kernel_thread(void); @@ -224,6 +211,9 @@ struct mips_frame_info { int pc_offset; }; +#define J_TARGET(pc,target) \ + (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) + static inline int is_ra_save_ins(union mips_instruction *ip) { #ifdef CONFIG_CPU_MICROMIPS @@ -264,7 +254,7 @@ static inline int is_ra_save_ins(union mips_instruction *ip) #endif } -static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) +static inline int is_jump_ins(union mips_instruction *ip) { #ifdef CONFIG_CPU_MICROMIPS /* @@ -288,6 +278,8 @@ static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) return 0; return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); #else + if (ip->j_format.opcode == j_op) + return 1; if (ip->j_format.opcode == jal_op) return 1; if (ip->r_format.opcode != spec_op) @@ -350,7 +342,7 @@ static int get_frame_info(struct mips_frame_info *info) for (i = 0; i < max_insns; i++, ip++) { - if (is_jal_jalr_jr_ins(ip)) + if (is_jump_ins(ip)) break; if (!info->frame_size) { if (is_sp_move_ins(ip)) @@ -393,15 +385,42 @@ err: static struct mips_frame_info schedule_mfi __read_mostly; +#ifdef CONFIG_KALLSYMS +static unsigned long get___schedule_addr(void) +{ + return kallsyms_lookup_name("__schedule"); +} +#else +static unsigned long get___schedule_addr(void) +{ + union mips_instruction *ip = (void *)schedule; + int max_insns = 8; + int i; + + for (i = 0; i < max_insns; i++, ip++) { + if (ip->j_format.opcode == j_op) + return J_TARGET(ip, ip->j_format.target); + } + return 0; +} +#endif + static int __init frame_info_init(void) { unsigned long size = 0; #ifdef CONFIG_KALLSYMS unsigned long ofs; +#endif + unsigned long addr; + + addr = get___schedule_addr(); + if (!addr) + addr = (unsigned long)schedule; - kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs); +#ifdef CONFIG_KALLSYMS + kallsyms_lookup_size_offset(addr, &size, &ofs); #endif - schedule_mfi.func = schedule; + schedule_mfi.func = (void *)addr; schedule_mfi.func_size = size; get_frame_info(&schedule_mfi); diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 93c070b..6fa198d 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -40,6 +40,7 @@ #include <asm/processor.h> #include <asm/vpe.h> #include <asm/rtlx.h> +#include <asm/setup.h> static struct rtlx_info *rtlx; static int major; diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 36cfd40..97a5909 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -423,4 +423,5 @@ sys_call_table: PTR sys_process_vm_writev /* 5305 */ PTR sys_kcmp PTR sys_finit_module + PTR sys_getdents64 .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index c17619f..6e7862a 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -37,6 +37,7 @@ #include <linux/atomic.h> #include <asm/cpu.h> #include <asm/processor.h> +#include <asm/idle.h> #include <asm/r4k-timer.h> #include <asm/mmu_context.h> #include <asm/time.h> diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 31d22f3..75a4fd7 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -34,6 +34,7 @@ #include <asm/hardirq.h> #include <asm/hazards.h> #include <asm/irq.h> +#include <asm/idle.h> #include <asm/mmu_context.h> #include <asm/mipsregs.h> #include <asm/cacheflush.h> @@ -111,7 +112,7 @@ static int vpe0limit; static int ipibuffers; static int nostlb; static int asidmask; -unsigned int smtc_asid_mask = 0xff; +unsigned long smtc_asid_mask = 0xff; static int __init vpe0tcs(char *str) { @@ -858,7 +859,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) unsigned long flags; int mtflags; unsigned long tcrestart; - extern void r4k_wait_irqoff(void), __pastwait(void); int set_resched_flag = (type == LINUX_SMP_IPI && action == SMP_RESCHEDULE_YOURSELF); @@ -914,8 +914,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) */ if (cpu_wait == r4k_wait_irqoff) { tcrestart = read_tc_c0_tcrestart(); - if (tcrestart >= (unsigned long)r4k_wait_irqoff - && tcrestart < (unsigned long)__pastwait) { + if (address_is_in_r4k_wait_irqoff(tcrestart)) { write_tc_c0_tcrestart(__pastwait); tcstatus &= ~TCSTATUS_IXMT; write_tc_c0_tcstatus(tcstatus); @@ -1395,7 +1394,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) asid = asid_cache(cpu); do { - if (!ASID_MASK(ASID_INC(asid))) { + if (!((asid += ASID_INC) & ASID_MASK) ) { if (cpu_has_vtag_icache) flush_icache_all(); /* Traverse all online CPUs (hack requires contiguous range) */ @@ -1414,7 +1413,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) mips_ihb(); } tcstat = read_tc_c0_tcstatus(); - smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i); + smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); if (!prevhalt) write_tc_c0_tchalt(0); } @@ -1423,7 +1422,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) asid = ASID_FIRST_VERSION; local_flush_tlb_all(); /* start new asid cycle */ } - } while (smtc_live_asid[tlb][ASID_MASK(asid)]); + } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); /* * SMTC shares the TLB within VPEs and possibly across all VPEs. @@ -1461,7 +1460,7 @@ void smtc_flush_tlb_asid(unsigned long asid) tlb_read(); ehb(); ehi = read_c0_entryhi(); - if (ASID_MASK(ehi) == asid) { + if ((ehi & ASID_MASK) == asid) { /* * Invalidate only entries with specified ASID, * makiing sure all entries differ. diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 77cff1f..a75ae40 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -41,6 +41,7 @@ #include <asm/dsp.h> #include <asm/fpu.h> #include <asm/fpu_emulator.h> +#include <asm/idle.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/module.h> @@ -57,7 +58,6 @@ #include <asm/uasm.h> extern void check_wait(void); -extern asmlinkage void r4k_wait(void); extern asmlinkage void rollback_handle_int(void); extern asmlinkage void handle_int(void); extern u32 handle_tlbl[]; @@ -897,22 +897,24 @@ out_sigsegv: asmlinkage void do_tr(struct pt_regs *regs) { - unsigned int opcode, tcode = 0; + u32 opcode, tcode = 0; u16 instr[2]; - unsigned long epc = exception_epc(regs); + unsigned long epc = msk_isa16_mode(exception_epc(regs)); - if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) || - (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))) + if (get_isa16_mode(regs->cp0_epc)) { + if (__get_user(instr[0], (u16 __user *)(epc + 0)) || + __get_user(instr[1], (u16 __user *)(epc + 2))) goto out_sigsegv; - opcode = (instr[0] << 16) | instr[1]; - - /* Immediate versions don't provide a code. */ - if (!(opcode & OPCODE)) { - if (get_isa16_mode(regs->cp0_epc)) - /* microMIPS */ - tcode = (opcode >> 12) & 0x1f; - else - tcode = ((opcode >> 6) & ((1 << 10) - 1)); + opcode = (instr[0] << 16) | instr[1]; + /* Immediate versions don't provide a code. */ + if (!(opcode & OPCODE)) + tcode = (opcode >> 12) & ((1 << 4) - 1); + } else { + if (__get_user(opcode, (u32 __user *)epc)) + goto out_sigsegv; + /* Immediate versions don't provide a code. */ + if (!(opcode & OPCODE)) + tcode = (opcode >> 6) & ((1 << 10) - 1); } do_trap_or_bp(regs, tcode, "Trap"); @@ -1542,7 +1544,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) extern char except_vec_vi, except_vec_vi_lui; extern char except_vec_vi_ori, except_vec_vi_end; extern char rollback_except_vec_vi; - char *vec_start = (cpu_wait == r4k_wait) ? + char *vec_start = using_rollback_handler() ? &rollback_except_vec_vi : &except_vec_vi; #ifdef CONFIG_MIPS_MT_SMTC /* @@ -1656,7 +1658,6 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) unsigned int cpu = smp_processor_id(); unsigned int status_set = ST0_CU0; unsigned int hwrena = cpu_hwrena_impl_bits; - unsigned long asid = 0; #ifdef CONFIG_MIPS_MT_SMTC int secondaryTC = 0; int bootTC = (cpu == 0); @@ -1740,9 +1741,8 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) } #endif /* CONFIG_MIPS_MT_SMTC */ - asid = ASID_FIRST_VERSION; - cpu_data[cpu].asid_cache = asid; - TLBMISS_HANDLER_SETUP(); + if (!cpu_data[cpu].asid_cache) + cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; @@ -1814,10 +1814,8 @@ void __init trap_init(void) extern char except_vec4; extern char except_vec3_r4000; unsigned long i; - int rollback; check_wait(); - rollback = (cpu_wait == r4k_wait); #if defined(CONFIG_KGDB) if (kgdb_early_setup) @@ -1894,7 +1892,8 @@ void __init trap_init(void) if (board_be_init) board_be_init(); - set_except_vector(0, rollback ? rollback_handle_int : handle_int); + set_except_vector(0, using_rollback_handler() ? rollback_handle_int + : handle_int); set_except_vector(1, handle_tlbm); set_except_vector(2, handle_tlbl); set_except_vector(3, handle_tlbs); |