diff options
Diffstat (limited to 'arch/mips/kernel')
27 files changed, 468 insertions, 452 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index cd9cec9..bbbb8d7 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \ - time.o traps.o unaligned.o + time.o topology.o traps.o unaligned.o binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \ irix5sys.o sysirix.o @@ -45,7 +45,6 @@ obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o -obj-$(CONFIG_NO_ISA) += dma-no-isa.o obj-$(CONFIG_I8259) += i8259.o obj-$(CONFIG_IRQ_CPU) += irq_cpu.o obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o @@ -67,6 +66,8 @@ obj-$(CONFIG_64BIT) += cpu-bugs64.o obj-$(CONFIG_I8253) += i8253.o +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o + CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) EXTRA_AFLAGS := $(CFLAGS) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 8485af3..442839e 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -110,9 +110,8 @@ static inline void check_wait(void) { struct cpuinfo_mips *c = ¤t_cpu_data; - printk("Checking for 'wait' instruction... "); if (nowait) { - printk (" disabled.\n"); + printk("Wait instruction disabled.\n"); return; } @@ -120,11 +119,9 @@ static inline void check_wait(void) case CPU_R3081: case CPU_R3081E: cpu_wait = r3081_wait; - printk(" available.\n"); break; case CPU_TX3927: cpu_wait = r39xx_wait; - printk(" available.\n"); break; case CPU_R4200: /* case CPU_R4300: */ @@ -146,33 +143,23 @@ static inline void check_wait(void) case CPU_74K: case CPU_PR4450: cpu_wait = r4k_wait; - printk(" available.\n"); break; case CPU_TX49XX: cpu_wait = r4k_wait_irqoff; - printk(" available.\n"); break; case CPU_AU1000: case CPU_AU1100: case CPU_AU1500: case CPU_AU1550: case CPU_AU1200: - if (allow_au1k_wait) { + if (allow_au1k_wait) cpu_wait = au1k_wait; - printk(" available.\n"); - } else - printk(" unavailable.\n"); break; case CPU_RM9000: - if ((c->processor_id & 0x00ff) >= 0x40) { + if ((c->processor_id & 0x00ff) >= 0x40) cpu_wait = r4k_wait; - printk(" available.\n"); - } else { - printk(" unavailable.\n"); - } break; default: - printk(" unavailable.\n"); break; } } diff --git a/arch/mips/kernel/dma-no-isa.c b/arch/mips/kernel/dma-no-isa.c deleted file mode 100644 index 6df8b07..0000000 --- a/arch/mips/kernel/dma-no-isa.c +++ /dev/null @@ -1,28 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2004 by Ralf Baechle - * - * Dummy ISA DMA functions for systems that don't have ISA but share drivers - * with ISA such as legacy free PCI. - */ -#include <linux/errno.h> -#include <linux/module.h> -#include <linux/spinlock.h> - -DEFINE_SPINLOCK(dma_spin_lock); - -int request_dma(unsigned int dmanr, const char * device_id) -{ - return -EINVAL; -} - -void free_dma(unsigned int dmanr) -{ -} - -EXPORT_SYMBOL(dma_spin_lock); -EXPORT_SYMBOL(request_dma); -EXPORT_SYMBOL(free_dma); diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 5baca16..aacd4a0 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -19,6 +19,7 @@ #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/war.h> +#include <asm/page.h> #define PANIC_PIC(msg) \ .set push; \ @@ -378,6 +379,68 @@ NESTED(nmi_handler, PT_SIZE, sp) BUILD_HANDLER dsp dsp sti silent /* #26 */ BUILD_HANDLER reserved reserved sti verbose /* others */ + .align 5 + LEAF(handle_ri_rdhwr_vivt) +#ifdef CONFIG_MIPS_MT_SMTC + PANIC_PIC("handle_ri_rdhwr_vivt called") +#else + .set push + .set noat + .set noreorder + /* check if TLB contains a entry for EPC */ + MFC0 k1, CP0_ENTRYHI + andi k1, 0xff /* ASID_MASK */ + MFC0 k0, CP0_EPC + PTR_SRL k0, PAGE_SHIFT + 1 + PTR_SLL k0, PAGE_SHIFT + 1 + or k1, k0 + MTC0 k1, CP0_ENTRYHI + mtc0_tlbw_hazard + tlbp + tlb_probe_hazard + mfc0 k1, CP0_INDEX + .set pop + bltz k1, handle_ri /* slow path */ + /* fall thru */ +#endif + END(handle_ri_rdhwr_vivt) + + LEAF(handle_ri_rdhwr) + .set push + .set noat + .set noreorder + /* 0x7c03e83b: rdhwr v1,$29 */ + MFC0 k1, CP0_EPC + lui k0, 0x7c03 + lw k1, (k1) + ori k0, 0xe83b + .set reorder + bne k0, k1, handle_ri /* if not ours */ + /* The insn is rdhwr. No need to check CAUSE.BD here. */ + get_saved_sp /* k1 := current_thread_info */ + .set noreorder + MFC0 k0, CP0_EPC +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) + ori k1, _THREAD_MASK + xori k1, _THREAD_MASK + LONG_L v1, TI_TP_VALUE(k1) + LONG_ADDIU k0, 4 + jr k0 + rfe +#else + LONG_ADDIU k0, 4 /* stall on $k0 */ + MTC0 k0, CP0_EPC + /* I hope three instructions between MTC0 and ERET are enough... */ + ori k1, _THREAD_MASK + xori k1, _THREAD_MASK + LONG_L v1, TI_TP_VALUE(k1) + .set mips3 + eret + .set mips0 +#endif + .set pop + END(handle_ri_rdhwr) + #ifdef CONFIG_64BIT /* A temporary overflow handler used by check_daddi(). */ diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index ddc1b71..a2e095a 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -250,6 +250,9 @@ NESTED(smp_bootstrap, 16, sp) */ page swapper_pg_dir, _PGD_ORDER #ifdef CONFIG_64BIT +#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64) + page module_pg_dir, _PGD_ORDER +#endif page invalid_pmd_table, _PMD_ORDER #endif page invalid_pte_table, _PTE_ORDER diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 48e3418..2526c0c 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -40,21 +40,10 @@ static void end_8259A_irq (unsigned int irq) enable_8259A_irq(irq); } -#define shutdown_8259A_irq disable_8259A_irq - void mask_and_ack_8259A(unsigned int); -static unsigned int startup_8259A_irq(unsigned int irq) -{ - enable_8259A_irq(irq); - - return 0; /* never anything pending */ -} - static struct irq_chip i8259A_irq_type = { .typename = "XT-PIC", - .startup = startup_8259A_irq, - .shutdown = shutdown_8259A_irq, .enable = enable_8259A_irq, .disable = disable_8259A_irq, .ack = mask_and_ack_8259A, @@ -120,7 +109,7 @@ int i8259A_irq_pending(unsigned int irq) void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); - irq_desc[irq].chip = &i8259A_irq_type; + set_irq_chip(irq, &i8259A_irq_type); enable_irq(irq); } @@ -323,12 +312,8 @@ void __init init_i8259_irqs (void) init_8259A(0); - for (i = 0; i < 16; i++) { - irq_desc[i].status = IRQ_DISABLED; - irq_desc[i].action = NULL; - irq_desc[i].depth = 1; - irq_desc[i].chip = &i8259A_irq_type; - } + for (i = 0; i < 16; i++) + set_irq_chip(i, &i8259A_irq_type); setup_irq(2, &irq2); } diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index 650a80c..bcaad66 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c @@ -45,31 +45,6 @@ static inline void unmask_msc_irq(unsigned int irq) } /* - * Enables the IRQ on SOC-it - */ -static void enable_msc_irq(unsigned int irq) -{ - unmask_msc_irq(irq); -} - -/* - * Initialize the IRQ on SOC-it - */ -static unsigned int startup_msc_irq(unsigned int irq) -{ - unmask_msc_irq(irq); - return 0; -} - -/* - * Disables the IRQ on SOC-it - */ -static void disable_msc_irq(unsigned int irq) -{ - mask_msc_irq(irq); -} - -/* * Masks and ACKs an IRQ */ static void level_mask_and_ack_msc_irq(unsigned int irq) @@ -136,25 +111,23 @@ msc_bind_eic_interrupt (unsigned int irq, unsigned int set) (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF)); } -#define shutdown_msc_irq disable_msc_irq - struct irq_chip msc_levelirq_type = { .typename = "SOC-it-Level", - .startup = startup_msc_irq, - .shutdown = shutdown_msc_irq, - .enable = enable_msc_irq, - .disable = disable_msc_irq, .ack = level_mask_and_ack_msc_irq, + .mask = mask_msc_irq, + .mask_ack = level_mask_and_ack_msc_irq, + .unmask = unmask_msc_irq, + .eoi = unmask_msc_irq, .end = end_msc_irq, }; struct irq_chip msc_edgeirq_type = { .typename = "SOC-it-Edge", - .startup =startup_msc_irq, - .shutdown = shutdown_msc_irq, - .enable = enable_msc_irq, - .disable = disable_msc_irq, .ack = edge_mask_and_ack_msc_irq, + .mask = mask_msc_irq, + .mask_ack = edge_mask_and_ack_msc_irq, + .unmask = unmask_msc_irq, + .eoi = unmask_msc_irq, .end = end_msc_irq, }; @@ -175,14 +148,14 @@ void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq) switch (imp->im_type) { case MSC01_IRQ_EDGE: - irq_desc[base+n].chip = &msc_edgeirq_type; + set_irq_chip(base+n, &msc_edgeirq_type); if (cpu_has_veic) MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); else MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); break; case MSC01_IRQ_LEVEL: - irq_desc[base+n].chip = &msc_levelirq_type; + set_irq_chip(base+n, &msc_levelirq_type); if (cpu_has_veic) MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); else diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c index 37d1062..6cfb31c 100644 --- a/arch/mips/kernel/irq-mv6434x.c +++ b/arch/mips/kernel/irq-mv6434x.c @@ -67,39 +67,6 @@ static inline void unmask_mv64340_irq(unsigned int irq) } /* - * Enables the IRQ on Marvell Chip - */ -static void enable_mv64340_irq(unsigned int irq) -{ - unmask_mv64340_irq(irq); -} - -/* - * Initialize the IRQ on Marvell Chip - */ -static unsigned int startup_mv64340_irq(unsigned int irq) -{ - unmask_mv64340_irq(irq); - return 0; -} - -/* - * Disables the IRQ on Marvell Chip - */ -static void disable_mv64340_irq(unsigned int irq) -{ - mask_mv64340_irq(irq); -} - -/* - * Masks and ACKs an IRQ - */ -static void mask_and_ack_mv64340_irq(unsigned int irq) -{ - mask_mv64340_irq(irq); -} - -/* * End IRQ processing */ static void end_mv64340_irq(unsigned int irq) @@ -133,15 +100,12 @@ void ll_mv64340_irq(void) do_IRQ(ls1bit32(irq_src_high) + irq_base + 32); } -#define shutdown_mv64340_irq disable_mv64340_irq - struct irq_chip mv64340_irq_type = { .typename = "MV-64340", - .startup = startup_mv64340_irq, - .shutdown = shutdown_mv64340_irq, - .enable = enable_mv64340_irq, - .disable = disable_mv64340_irq, - .ack = mask_and_ack_mv64340_irq, + .ack = mask_mv64340_irq, + .mask = mask_mv64340_irq, + .mask_ack = mask_mv64340_irq, + .unmask = unmask_mv64340_irq, .end = end_mv64340_irq, }; @@ -149,13 +113,9 @@ void __init mv64340_irq_init(unsigned int base) { int i; - /* Reset irq handlers pointers to NULL */ - for (i = base; i < base + 64; i++) { - irq_desc[i].status = IRQ_DISABLED; - irq_desc[i].action = 0; - irq_desc[i].depth = 2; - irq_desc[i].chip = &mv64340_irq_type; - } + for (i = base; i < base + 64; i++) + set_irq_chip_and_handler(i, &mv64340_irq_type, + handle_level_irq); irq_base = base; } diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c index 6b54c71..ddcc2a5 100644 --- a/arch/mips/kernel/irq-rm7000.c +++ b/arch/mips/kernel/irq-rm7000.c @@ -29,42 +29,6 @@ static inline void mask_rm7k_irq(unsigned int irq) clear_c0_intcontrol(0x100 << (irq - irq_base)); } -static inline void rm7k_cpu_irq_enable(unsigned int irq) -{ - unsigned long flags; - - local_irq_save(flags); - unmask_rm7k_irq(irq); - local_irq_restore(flags); -} - -static void rm7k_cpu_irq_disable(unsigned int irq) -{ - unsigned long flags; - - local_irq_save(flags); - mask_rm7k_irq(irq); - local_irq_restore(flags); -} - -static unsigned int rm7k_cpu_irq_startup(unsigned int irq) -{ - rm7k_cpu_irq_enable(irq); - - return 0; -} - -#define rm7k_cpu_irq_shutdown rm7k_cpu_irq_disable - -/* - * While we ack the interrupt interrupts are disabled and thus we don't need - * to deal with concurrency issues. Same for rm7k_cpu_irq_end. - */ -static void rm7k_cpu_irq_ack(unsigned int irq) -{ - mask_rm7k_irq(irq); -} - static void rm7k_cpu_irq_end(unsigned int irq) { if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) @@ -73,11 +37,10 @@ static void rm7k_cpu_irq_end(unsigned int irq) static struct irq_chip rm7k_irq_controller = { .typename = "RM7000", - .startup = rm7k_cpu_irq_startup, - .shutdown = rm7k_cpu_irq_shutdown, - .enable = rm7k_cpu_irq_enable, - .disable = rm7k_cpu_irq_disable, - .ack = rm7k_cpu_irq_ack, + .ack = mask_rm7k_irq, + .mask = mask_rm7k_irq, + .mask_ack = mask_rm7k_irq, + .unmask = unmask_rm7k_irq, .end = rm7k_cpu_irq_end, }; @@ -87,12 +50,9 @@ void __init rm7k_cpu_irq_init(int base) clear_c0_intcontrol(0x00000f00); /* Mask all */ - for (i = base; i < base + 4; i++) { - irq_desc[i].status = IRQ_DISABLED; - irq_desc[i].action = NULL; - irq_desc[i].depth = 1; - irq_desc[i].chip = &rm7k_irq_controller; - } + for (i = base; i < base + 4; i++) + set_irq_chip_and_handler(i, &rm7k_irq_controller, + handle_level_irq); irq_base = base; } diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c index 62f011b..ba6440c 100644 --- a/arch/mips/kernel/irq-rm9000.c +++ b/arch/mips/kernel/irq-rm9000.c @@ -48,15 +48,6 @@ static void rm9k_cpu_irq_disable(unsigned int irq) local_irq_restore(flags); } -static unsigned int rm9k_cpu_irq_startup(unsigned int irq) -{ - rm9k_cpu_irq_enable(irq); - - return 0; -} - -#define rm9k_cpu_irq_shutdown rm9k_cpu_irq_disable - /* * Performance counter interrupts are global on all processors. */ @@ -89,16 +80,6 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq) on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1); } - -/* - * While we ack the interrupt interrupts are disabled and thus we don't need - * to deal with concurrency issues. Same for rm9k_cpu_irq_end. - */ -static void rm9k_cpu_irq_ack(unsigned int irq) -{ - mask_rm9k_irq(irq); -} - static void rm9k_cpu_irq_end(unsigned int irq) { if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) @@ -107,11 +88,10 @@ static void rm9k_cpu_irq_end(unsigned int irq) static struct irq_chip rm9k_irq_controller = { .typename = "RM9000", - .startup = rm9k_cpu_irq_startup, - .shutdown = rm9k_cpu_irq_shutdown, - .enable = rm9k_cpu_irq_enable, - .disable = rm9k_cpu_irq_disable, - .ack = rm9k_cpu_irq_ack, + .ack = mask_rm9k_irq, + .mask = mask_rm9k_irq, + .mask_ack = mask_rm9k_irq, + .unmask = unmask_rm9k_irq, .end = rm9k_cpu_irq_end, }; @@ -119,9 +99,10 @@ static struct irq_chip rm9k_perfcounter_irq = { .typename = "RM9000", .startup = rm9k_perfcounter_irq_startup, .shutdown = rm9k_perfcounter_irq_shutdown, - .enable = rm9k_cpu_irq_enable, - .disable = rm9k_cpu_irq_disable, - .ack = rm9k_cpu_irq_ack, + .ack = mask_rm9k_irq, + .mask = mask_rm9k_irq, + .mask_ack = mask_rm9k_irq, + .unmask = unmask_rm9k_irq, .end = rm9k_cpu_irq_end, }; @@ -135,15 +116,13 @@ void __init rm9k_cpu_irq_init(int base) clear_c0_intcontrol(0x0000f000); /* Mask all */ - for (i = base; i < base + 4; i++) { - irq_desc[i].status = IRQ_DISABLED; - irq_desc[i].action = NULL; - irq_desc[i].depth = 1; - irq_desc[i].chip = &rm9k_irq_controller; - } + for (i = base; i < base + 4; i++) + set_irq_chip_and_handler(i, &rm9k_irq_controller, + handle_level_irq); rm9000_perfcount_irq = base + 1; - irq_desc[rm9000_perfcount_irq].chip = &rm9k_perfcounter_irq; + set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, + handle_level_irq); irq_base = base; } diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 9b0e49d..b339798 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -88,25 +88,6 @@ atomic_t irq_err_count; unsigned long irq_hwmask[NR_IRQS]; #endif /* CONFIG_MIPS_MT_SMTC */ -#undef do_IRQ - -/* - * do_IRQ handles all normal device IRQ's (the special - * SMP cross-CPU interrupts have their own specific - * handlers). - */ -asmlinkage unsigned int do_IRQ(unsigned int irq) -{ - irq_enter(); - - __DO_IRQ_SMTC_HOOK(); - __do_IRQ(irq); - - irq_exit(); - - return 1; -} - /* * Generic, controller-independent functions: */ @@ -172,19 +153,6 @@ __setup("nokgdb", nokgdb); void __init init_IRQ(void) { - int i; - - for (i = 0; i < NR_IRQS; i++) { - irq_desc[i].status = IRQ_DISABLED; - irq_desc[i].action = NULL; - irq_desc[i].depth = 1; - irq_desc[i].chip = &no_irq_chip; - spin_lock_init(&irq_desc[i].lock); -#ifdef CONFIG_MIPS_MT_SMTC - irq_hwmask[i] = 0; -#endif /* CONFIG_MIPS_MT_SMTC */ - } - arch_init_irq(); #ifdef CONFIG_KGDB diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index 9bb21c7..be5ac23 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c @@ -50,44 +50,6 @@ static inline void mask_mips_irq(unsigned int irq) irq_disable_hazard(); } -static inline void mips_cpu_irq_enable(unsigned int irq) -{ - unsigned long flags; - - local_irq_save(flags); - unmask_mips_irq(irq); - back_to_back_c0_hazard(); - local_irq_restore(flags); -} - -static void mips_cpu_irq_disable(unsigned int irq) -{ - unsigned long flags; - - local_irq_save(flags); - mask_mips_irq(irq); - back_to_back_c0_hazard(); - local_irq_restore(flags); -} - -static unsigned int mips_cpu_irq_startup(unsigned int irq) -{ - mips_cpu_irq_enable(irq); - - return 0; -} - -#define mips_cpu_irq_shutdown mips_cpu_irq_disable - -/* - * While we ack the interrupt interrupts are disabled and thus we don't need - * to deal with concurrency issues. Same for mips_cpu_irq_end. - */ -static void mips_cpu_irq_ack(unsigned int irq) -{ - mask_mips_irq(irq); -} - static void mips_cpu_irq_end(unsigned int irq) { if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) @@ -96,11 +58,11 @@ static void mips_cpu_irq_end(unsigned int irq) static struct irq_chip mips_cpu_irq_controller = { .typename = "MIPS", - .startup = mips_cpu_irq_startup, - .shutdown = mips_cpu_irq_shutdown, - .enable = mips_cpu_irq_enable, - .disable = mips_cpu_irq_disable, - .ack = mips_cpu_irq_ack, + .ack = mask_mips_irq, + .mask = mask_mips_irq, + .mask_ack = mask_mips_irq, + .unmask = unmask_mips_irq, + .eoi = unmask_mips_irq, .end = mips_cpu_irq_end, }; @@ -110,8 +72,6 @@ static struct irq_chip mips_cpu_irq_controller = { #define unmask_mips_mt_irq unmask_mips_irq #define mask_mips_mt_irq mask_mips_irq -#define mips_mt_cpu_irq_enable mips_cpu_irq_enable -#define mips_mt_cpu_irq_disable mips_cpu_irq_disable static unsigned int mips_mt_cpu_irq_startup(unsigned int irq) { @@ -119,13 +79,11 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq) clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); evpe(vpflags); - mips_mt_cpu_irq_enable(irq); + unmask_mips_mt_irq(irq); return 0; } -#define mips_mt_cpu_irq_shutdown mips_mt_cpu_irq_disable - /* * While we ack the interrupt interrupts are disabled and thus we don't need * to deal with concurrency issues. Same for mips_cpu_irq_end. @@ -143,10 +101,11 @@ static void mips_mt_cpu_irq_ack(unsigned int irq) static struct irq_chip mips_mt_cpu_irq_controller = { .typename = "MIPS", .startup = mips_mt_cpu_irq_startup, - .shutdown = mips_mt_cpu_irq_shutdown, - .enable = mips_mt_cpu_irq_enable, - .disable = mips_mt_cpu_irq_disable, .ack = mips_mt_cpu_irq_ack, + .mask = mask_mips_mt_irq, + .mask_ack = mips_mt_cpu_irq_ack, + .unmask = unmask_mips_mt_irq, + .eoi = unmask_mips_mt_irq, .end = mips_mt_cpu_irq_end, }; @@ -163,19 +122,12 @@ void __init mips_cpu_irq_init(int irq_base) * leave them uninitialized for other processors. */ if (cpu_has_mipsmt) - for (i = irq_base; i < irq_base + 2; i++) { - irq_desc[i].status = IRQ_DISABLED; - irq_desc[i].action = NULL; - irq_desc[i].depth = 1; - irq_desc[i].chip = &mips_mt_cpu_irq_controller; - } - - for (i = irq_base + 2; i < irq_base + 8; i++) { - irq_desc[i].status = IRQ_DISABLED; - irq_desc[i].action = NULL; - irq_desc[i].depth = 1; - irq_desc[i].chip = &mips_cpu_irq_controller; - } + for (i = irq_base; i < irq_base + 2; i++) + set_irq_chip(i, &mips_mt_cpu_irq_controller); + + for (i = irq_base + 2; i < irq_base + 8; i++) + set_irq_chip_and_handler(i, &mips_cpu_irq_controller, + handle_level_irq); mips_cpu_irq_base = irq_base; } diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c new file mode 100644 index 0000000..e0ad754 --- /dev/null +++ b/arch/mips/kernel/machine_kexec.c @@ -0,0 +1,85 @@ +/* + * machine_kexec.c for kexec + * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006 + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include <linux/kexec.h> +#include <linux/mm.h> +#include <linux/delay.h> + +#include <asm/cacheflush.h> +#include <asm/page.h> + +const extern unsigned char relocate_new_kernel[]; +const extern unsigned int relocate_new_kernel_size; + +extern unsigned long kexec_start_address; +extern unsigned long kexec_indirection_page; + +int +machine_kexec_prepare(struct kimage *kimage) +{ + return 0; +} + +void +machine_kexec_cleanup(struct kimage *kimage) +{ +} + +void +machine_shutdown(void) +{ +} + +void +machine_crash_shutdown(struct pt_regs *regs) +{ +} + +void +machine_kexec(struct kimage *image) +{ + unsigned long reboot_code_buffer; + unsigned long entry; + unsigned long *ptr; + + reboot_code_buffer = + (unsigned long)page_address(image->control_code_page); + + kexec_start_address = image->start; + kexec_indirection_page = phys_to_virt(image->head & PAGE_MASK); + + memcpy((void*)reboot_code_buffer, relocate_new_kernel, + relocate_new_kernel_size); + + /* + * The generic kexec code builds a page list with physical + * addresses. they are directly accessible through KSEG0 (or + * CKSEG0 or XPHYS if on 64bit system), hence the + * pys_to_virt() call. + */ + for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE); + ptr = (entry & IND_INDIRECTION) ? + phys_to_virt(entry & PAGE_MASK) : ptr + 1) { + if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || + *ptr & IND_DESTINATION) + *ptr = phys_to_virt(*ptr); + } + + /* + * we do not want to be bothered. + */ + local_irq_disable(); + + flush_icache_range(reboot_code_buffer, + reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE); + + printk("Will call new kernel at %08x\n", image->start); + printk("Bye ...\n"); + flush_cache_all(); + ((void (*)(void))reboot_code_buffer)(); +} diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index d7bf021..cb08014 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -29,6 +29,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> +#include <asm/pgtable.h> /* MODULE_START */ struct mips_hi16 { struct mips_hi16 *next; @@ -43,9 +44,23 @@ static DEFINE_SPINLOCK(dbe_lock); void *module_alloc(unsigned long size) { +#ifdef MODULE_START + struct vm_struct *area; + + size = PAGE_ALIGN(size); + if (!size) + return NULL; + + area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END); + if (!area) + return NULL; + + return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL); +#else if (size == 0) return NULL; return vmalloc(size); +#endif } /* Free memory returned from module_alloc */ diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S new file mode 100644 index 0000000..a3f0d00c --- /dev/null +++ b/arch/mips/kernel/relocate_kernel.S @@ -0,0 +1,80 @@ +/* + * relocate_kernel.S for kexec + * Created by <nschichan@corp.free.fr> on Thu Oct 12 17:49:57 2006 + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/regdef.h> +#include <asm/page.h> +#include <asm/mipsregs.h> +#include <asm/stackframe.h> +#include <asm/addrspace.h> + + .globl relocate_new_kernel +relocate_new_kernel: + + PTR_L s0, kexec_indirection_page + PTR_L s1, kexec_start_address + +process_entry: + PTR_L s2, (s0) + PTR_ADD s0, s0, SZREG + + /* destination page */ + and s3, s2, 0x1 + beq s3, zero, 1f + and s4, s2, ~0x1 /* store destination addr in s4 */ + move a0, s4 + b process_entry + +1: + /* indirection page, update s0 */ + and s3, s2, 0x2 + beq s3, zero, 1f + and s0, s2, ~0x2 + b process_entry + +1: + /* done page */ + and s3, s2, 0x4 + beq s3, zero, 1f + b done +1: + /* source page */ + and s3, s2, 0x8 + beq s3, zero, process_entry + and s2, s2, ~0x8 + li s6, (1 << PAGE_SHIFT) / SZREG + +copy_word: + /* copy page word by word */ + REG_L s5, (s2) + REG_S s5, (s4) + INT_ADD s4, s4, SZREG + INT_ADD s2, s2, SZREG + INT_SUB s6, s6, 1 + beq s6, zero, process_entry + b copy_word + b process_entry + +done: + /* jump to kexec_start_address */ + j s1 + + .globl kexec_start_address +kexec_start_address: + .long 0x0 + + .globl kexec_indirection_page +kexec_indirection_page: + .long 0x0 + +relocate_new_kernel_end: + + .globl relocate_new_kernel_size +relocate_new_kernel_size: + .long relocate_new_kernel_end - relocate_new_kernel diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index a95f37d..7c0b393 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -653,7 +653,7 @@ einval: li v0, -EINVAL sys sys_move_pages 6 sys sys_set_robust_list 2 sys sys_get_robust_list 3 /* 4310 */ - sys sys_ni_syscall 0 + sys sys_kexec_load 4 sys sys_getcpu 3 sys sys_epoll_pwait 6 .endm diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 8fb0f60..e569b84 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -468,6 +468,6 @@ sys_call_table: PTR sys_move_pages PTR sys_set_robust_list PTR sys_get_robust_list - PTR sys_ni_syscall /* 5270 */ + PTR sys_kexec_load /* 5270 */ PTR sys_getcpu PTR sys_epoll_pwait diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 0da5ca2..5b18f26 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -394,6 +394,6 @@ EXPORT(sysn32_call_table) PTR sys_move_pages PTR compat_sys_set_robust_list PTR compat_sys_get_robust_list - PTR sys_ni_syscall + PTR compat_sys_kexec_load PTR sys_getcpu PTR sys_epoll_pwait diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index b9d00ca..e91379c 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -516,7 +516,7 @@ sys_call_table: PTR compat_sys_move_pages PTR compat_sys_set_robust_list PTR compat_sys_get_robust_list /* 4310 */ - PTR sys_ni_syscall + PTR compat_sys_kexec_load PTR sys_getcpu PTR sys_epoll_pwait .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 8f6e896..89440a0 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -145,13 +145,12 @@ static int __init rd_start_early(char *p) unsigned long start = memparse(p, &p); #ifdef CONFIG_64BIT - /* HACK: Guess if the sign extension was forgotten */ - if (start > 0x0000000080000000 && start < 0x00000000ffffffff) - start |= 0xffffffff00000000UL; + /* Guess if the sign extension was forgotten by bootloader */ + if (start < XKPHYS) + start = (int)start; #endif initrd_start = start; initrd_end += start; - return 0; } early_param("rd_start", rd_start_early); @@ -159,41 +158,64 @@ early_param("rd_start", rd_start_early); static int __init rd_size_early(char *p) { initrd_end += memparse(p, &p); - return 0; } early_param("rd_size", rd_size_early); +/* it returns the next free pfn after initrd */ static unsigned long __init init_initrd(void) { - unsigned long tmp, end, size; + unsigned long end; u32 *initrd_header; - ROOT_DEV = Root_RAM0; - /* * Board specific code or command line parser should have * already set up initrd_start and initrd_end. In these cases * perfom sanity checks and use them if all looks good. */ - size = initrd_end - initrd_start; - if (initrd_end == 0 || size == 0) { - initrd_start = 0; - initrd_end = 0; - } else - return initrd_end; - - end = (unsigned long)&_end; - tmp = PAGE_ALIGN(end) - sizeof(u32) * 2; - if (tmp < end) - tmp += PAGE_SIZE; - - initrd_header = (u32 *)tmp; - if (initrd_header[0] == 0x494E5244) { - initrd_start = (unsigned long)&initrd_header[2]; - initrd_end = initrd_start + initrd_header[1]; + if (initrd_start && initrd_end > initrd_start) + goto sanitize; + + /* + * See if initrd has been added to the kernel image by + * arch/mips/boot/addinitrd.c. In that case a header is + * prepended to initrd and is made up by 8 bytes. The fisrt + * word is a magic number and the second one is the size of + * initrd. Initrd start must be page aligned in any cases. + */ + initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8; + if (initrd_header[0] != 0x494E5244) + goto disable; + initrd_start = (unsigned long)(initrd_header + 2); + initrd_end = initrd_start + initrd_header[1]; + +sanitize: + if (initrd_start & ~PAGE_MASK) { + printk(KERN_ERR "initrd start must be page aligned\n"); + goto disable; } - return initrd_end; + if (initrd_start < PAGE_OFFSET) { + printk(KERN_ERR "initrd start < PAGE_OFFSET\n"); + goto disable; + } + + /* + * Sanitize initrd addresses. For example firmware + * can't guess if they need to pass them through + * 64-bits values if the kernel has been built in pure + * 32-bit. We need also to switch from KSEG0 to XKPHYS + * addresses now, so the code can now safely use __pa(). + */ + end = __pa(initrd_end); + initrd_end = (unsigned long)__va(end); + initrd_start = (unsigned long)__va(__pa(initrd_start)); + + ROOT_DEV = Root_RAM0; + return PFN_UP(end); +disable: + initrd_start = 0; + initrd_end = 0; + return 0; } static void __init finalize_initrd(void) @@ -204,12 +226,12 @@ static void __init finalize_initrd(void) printk(KERN_INFO "Initrd not found or empty"); goto disable; } - if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) { + if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { printk("Initrd extends beyond end of memory"); goto disable; } - reserve_bootmem(CPHYSADDR(initrd_start), size); + reserve_bootmem(__pa(initrd_start), size); initrd_below_start_ok = 1; printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", @@ -259,8 +281,7 @@ static void __init bootmem_init(void) * not selected. Once that done we can determine the low bound * of usable memory. */ - reserved_end = init_initrd(); - reserved_end = PFN_UP(CPHYSADDR(max(reserved_end, (unsigned long)&_end))); + reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end))); /* * Find the highest page frame number we have available. @@ -432,10 +453,10 @@ static void __init resource_init(void) if (UNCAC_BASE != IO_BASE) return; - code_resource.start = virt_to_phys(&_text); - code_resource.end = virt_to_phys(&_etext) - 1; - data_resource.start = virt_to_phys(&_etext); - data_resource.end = virt_to_phys(&_edata) - 1; + code_resource.start = __pa_symbol(&_text); + code_resource.end = __pa_symbol(&_etext) - 1; + data_resource.start = __pa_symbol(&_etext); + data_resource.end = __pa_symbol(&_edata) - 1; /* * Request address space for all standard RAM. diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index 477c533..a67c185 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -17,7 +17,6 @@ */ #include <linux/cache.h> #include <linux/sched.h> -#include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/smp_lock.h> diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 2ac19a6c..1ee689c 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -278,7 +278,9 @@ void __init plat_prepare_cpus(unsigned int max_cpus) /* need to mark IPI's as IRQ_PER_CPU */ irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU; + set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU; + set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); } /* diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index db80957..49db516 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -463,28 +463,5 @@ void flush_tlb_one(unsigned long vaddr) smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); } -static DEFINE_PER_CPU(struct cpu, cpu_devices); - -static int __init topology_init(void) -{ - int i, ret; - -#ifdef CONFIG_NUMA - for_each_online_node(i) - register_one_node(i); -#endif /* CONFIG_NUMA */ - - for_each_present_cpu(i) { - ret = register_cpu(&per_cpu(cpu_devices, i), i); - if (ret) - printk(KERN_WARNING "topology_init: register_cpu %d " - "failed (%d)\n", i, ret); - } - - return 0; -} - -subsys_initcall(topology_init); - EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(flush_tlb_one); diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 3b78caf..802febe 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -1009,6 +1009,7 @@ void setup_cross_vpe_interrupts(void) setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU; + set_irq_handler(cpu_ipi_irq, handle_percpu_irq); } /* diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index e535f86..11aab6d 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -11,7 +11,6 @@ * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ -#include <linux/clocksource.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> @@ -83,17 +82,11 @@ static void null_timer_ack(void) { /* nothing */ } /* * Null high precision timer functions for systems lacking one. */ -static unsigned int null_hpt_read(void) +static cycle_t null_hpt_read(void) { return 0; } -static void __init null_hpt_init(void) -{ - /* nothing */ -} - - /* * Timer ack for an R4k-compatible timer of a known frequency. */ @@ -118,7 +111,7 @@ static void c0_timer_ack(void) /* * High precision timer functions for a R4k-compatible timer. */ -static unsigned int c0_hpt_read(void) +static cycle_t c0_hpt_read(void) { return read_c0_count(); } @@ -132,9 +125,6 @@ static void __init c0_hpt_timer_init(void) int (*mips_timer_state)(void); void (*mips_timer_ack)(void); -unsigned int (*mips_hpt_read)(void); -void (*mips_hpt_init)(void) __initdata = null_hpt_init; -unsigned int mips_hpt_mask = 0xffffffff; /* last time when xtime and rtc are sync'ed up */ static long last_rtc_update; @@ -276,8 +266,7 @@ static struct irqaction timer_irqaction = { static unsigned int __init calibrate_hpt(void) { - u64 frequency; - u32 hpt_start, hpt_end, hpt_count, hz; + cycle_t frequency, hpt_start, hpt_end, hpt_count, hz; const int loops = HZ / 10; int log_2_loops = 0; @@ -303,28 +292,23 @@ static unsigned int __init calibrate_hpt(void) * during the calculated number of periods between timer * interrupts. */ - hpt_start = mips_hpt_read(); + hpt_start = clocksource_mips.read(); do { while (mips_timer_state()); while (!mips_timer_state()); } while (--i); - hpt_end = mips_hpt_read(); + hpt_end = clocksource_mips.read(); - hpt_count = (hpt_end - hpt_start) & mips_hpt_mask; + hpt_count = (hpt_end - hpt_start) & clocksource_mips.mask; hz = HZ; - frequency = (u64)hpt_count * (u64)hz; + frequency = hpt_count * hz; return frequency >> log_2_loops; } -static cycle_t read_mips_hpt(void) -{ - return (cycle_t)mips_hpt_read(); -} - -static struct clocksource clocksource_mips = { +struct clocksource clocksource_mips = { .name = "MIPS", - .read = read_mips_hpt, + .mask = 0xffffffff, .is_continuous = 1, }; @@ -333,7 +317,7 @@ static void __init init_mips_clocksource(void) u64 temp; u32 shift; - if (!mips_hpt_frequency || mips_hpt_read == null_hpt_read) + if (!mips_hpt_frequency || clocksource_mips.read == null_hpt_read) return; /* Calclate a somewhat reasonable rating value */ @@ -347,7 +331,6 @@ static void __init init_mips_clocksource(void) } clocksource_mips.shift = shift; clocksource_mips.mult = (u32)temp; - clocksource_mips.mask = mips_hpt_mask; clocksource_register(&clocksource_mips); } @@ -367,32 +350,36 @@ void __init time_init(void) -xtime.tv_sec, -xtime.tv_nsec); /* Choose appropriate high precision timer routines. */ - if (!cpu_has_counter && !mips_hpt_read) + if (!cpu_has_counter && !clocksource_mips.read) /* No high precision timer -- sorry. */ - mips_hpt_read = null_hpt_read; + clocksource_mips.read = null_hpt_read; else if (!mips_hpt_frequency && !mips_timer_state) { /* A high precision timer of unknown frequency. */ - if (!mips_hpt_read) + if (!clocksource_mips.read) /* No external high precision timer -- use R4k. */ - mips_hpt_read = c0_hpt_read; + clocksource_mips.read = c0_hpt_read; } else { /* We know counter frequency. Or we can get it. */ - if (!mips_hpt_read) { + if (!clocksource_mips.read) { /* No external high precision timer -- use R4k. */ - mips_hpt_read = c0_hpt_read; + clocksource_mips.read = c0_hpt_read; if (!mips_timer_state) { /* No external timer interrupt -- use R4k. */ - mips_hpt_init = c0_hpt_timer_init; mips_timer_ack = c0_timer_ack; + /* Calculate cache parameters. */ + cycles_per_jiffy = + (mips_hpt_frequency + HZ / 2) / HZ; + /* + * This sets up the high precision + * timer for the first interrupt. + */ + c0_hpt_timer_init(); } } if (!mips_hpt_frequency) mips_hpt_frequency = calibrate_hpt(); - /* Calculate cache parameters. */ - cycles_per_jiffy = (mips_hpt_frequency + HZ / 2) / HZ; - /* Report the high precision timer rate for a reference. */ printk("Using %u.%03u MHz high precision timer.\n", ((mips_hpt_frequency + 500) / 1000) / 1000, @@ -403,9 +390,6 @@ void __init time_init(void) /* No timer interrupt ack (e.g. i8254). */ mips_timer_ack = null_timer_ack; - /* This sets up the high precision timer for the first interrupt. */ - mips_hpt_init(); - /* * Call board specific timer interrupt setup. * diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c new file mode 100644 index 0000000..660e44e --- /dev/null +++ b/arch/mips/kernel/topology.c @@ -0,0 +1,29 @@ +#include <linux/cpu.h> +#include <linux/cpumask.h> +#include <linux/init.h> +#include <linux/node.h> +#include <linux/nodemask.h> +#include <linux/percpu.h> + +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +static int __init topology_init(void) +{ + int i, ret; + +#ifdef CONFIG_NUMA + for_each_online_node(i) + register_one_node(i); +#endif /* CONFIG_NUMA */ + + for_each_present_cpu(i) { + ret = register_cpu(&per_cpu(cpu_devices, i), i); + if (ret) + printk(KERN_WARNING "topology_init: register_cpu %d " + "failed (%d)\n", i, ret); + } + + return 0; +} + +subsys_initcall(topology_init); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 9fda1b8..2a932cad 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -54,6 +54,8 @@ extern asmlinkage void handle_dbe(void); extern asmlinkage void handle_sys(void); extern asmlinkage void handle_bp(void); extern asmlinkage void handle_ri(void); +extern asmlinkage void handle_ri_rdhwr_vivt(void); +extern asmlinkage void handle_ri_rdhwr(void); extern asmlinkage void handle_cpu(void); extern asmlinkage void handle_ov(void); extern asmlinkage void handle_tr(void); @@ -397,19 +399,6 @@ asmlinkage void do_be(struct pt_regs *regs) force_sig(SIGBUS, current); } -static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode) -{ - unsigned int __user *epc; - - epc = (unsigned int __user *) regs->cp0_epc + - ((regs->cp0_cause & CAUSEF_BD) != 0); - if (!get_user(*opcode, epc)) - return 0; - - force_sig(SIGSEGV, current); - return 1; -} - /* * ll/sc emulation */ @@ -544,8 +533,8 @@ static inline int simulate_llsc(struct pt_regs *regs) { unsigned int opcode; - if (unlikely(get_insn_opcode(regs, &opcode))) - return -EFAULT; + if (get_user(opcode, (unsigned int __user *) exception_epc(regs))) + goto out_sigsegv; if ((opcode & OPCODE) == LL) { simulate_ll(regs, opcode); @@ -557,6 +546,10 @@ static inline int simulate_llsc(struct pt_regs *regs) } return -EFAULT; /* Strange things going on ... */ + +out_sigsegv: + force_sig(SIGSEGV, current); + return -EFAULT; } /* @@ -569,8 +562,8 @@ static inline int simulate_rdhwr(struct pt_regs *regs) struct thread_info *ti = task_thread_info(current); unsigned int opcode; - if (unlikely(get_insn_opcode(regs, &opcode))) - return -EFAULT; + if (get_user(opcode, (unsigned int __user *) exception_epc(regs))) + goto out_sigsegv; if (unlikely(compute_return_epc(regs))) return -EFAULT; @@ -589,6 +582,10 @@ static inline int simulate_rdhwr(struct pt_regs *regs) /* Not ours. */ return -EFAULT; + +out_sigsegv: + force_sig(SIGSEGV, current); + return -EFAULT; } asmlinkage void do_ov(struct pt_regs *regs) @@ -672,10 +669,8 @@ asmlinkage void do_bp(struct pt_regs *regs) unsigned int opcode, bcode; siginfo_t info; - die_if_kernel("Break instruction in kernel code", regs); - - if (get_insn_opcode(regs, &opcode)) - return; + if (get_user(opcode, (unsigned int __user *) exception_epc(regs))) + goto out_sigsegv; /* * There is the ancient bug in the MIPS assemblers that the break @@ -696,6 +691,7 @@ asmlinkage void do_bp(struct pt_regs *regs) switch (bcode) { case BRK_OVERFLOW << 10: case BRK_DIVZERO << 10: + die_if_kernel("Break instruction in kernel code", regs); if (bcode == (BRK_DIVZERO << 10)) info.si_code = FPE_INTDIV; else @@ -705,9 +701,16 @@ asmlinkage void do_bp(struct pt_regs *regs) info.si_addr = (void __user *) regs->cp0_epc; force_sig_info(SIGFPE, &info, current); break; + case BRK_BUG: + die("Kernel bug detected", regs); + break; default: + die_if_kernel("Break instruction in kernel code", regs); force_sig(SIGTRAP, current); } + +out_sigsegv: + force_sig(SIGSEGV, current); } asmlinkage void do_tr(struct pt_regs *regs) @@ -715,10 +718,8 @@ asmlinkage void do_tr(struct pt_regs *regs) unsigned int opcode, tcode = 0; siginfo_t info; - die_if_kernel("Trap instruction in kernel code", regs); - - if (get_insn_opcode(regs, &opcode)) - return; + if (get_user(opcode, (unsigned int __user *) exception_epc(regs))) + goto out_sigsegv; /* Immediate versions don't provide a code. */ if (!(opcode & OPCODE)) @@ -733,6 +734,7 @@ asmlinkage void do_tr(struct pt_regs *regs) switch (tcode) { case BRK_OVERFLOW: case BRK_DIVZERO: + die_if_kernel("Trap instruction in kernel code", regs); if (tcode == BRK_DIVZERO) info.si_code = FPE_INTDIV; else @@ -742,9 +744,16 @@ asmlinkage void do_tr(struct pt_regs *regs) info.si_addr = (void __user *) regs->cp0_epc; force_sig_info(SIGFPE, &info, current); break; + case BRK_BUG: + die("Kernel bug detected", regs); + break; default: + die_if_kernel("Trap instruction in kernel code", regs); force_sig(SIGTRAP, current); } + +out_sigsegv: + force_sig(SIGSEGV, current); } asmlinkage void do_ri(struct pt_regs *regs) @@ -1423,6 +1432,15 @@ void __init set_uncached_handler (unsigned long offset, void *addr, unsigned lon memcpy((void *)(uncached_ebase + offset), addr, size); } +static int __initdata rdhwr_noopt; +static int __init set_rdhwr_noopt(char *str) +{ + rdhwr_noopt = 1; + return 1; +} + +__setup("rdhwr_noopt", set_rdhwr_noopt); + void __init trap_init(void) { extern char except_vec3_generic, except_vec3_r4000; @@ -1502,7 +1520,9 @@ void __init trap_init(void) set_except_vector(8, handle_sys); set_except_vector(9, handle_bp); - set_except_vector(10, handle_ri); + set_except_vector(10, rdhwr_noopt ? handle_ri : + (cpu_has_vtag_icache ? + handle_ri_rdhwr_vivt : handle_ri_rdhwr)); set_except_vector(11, handle_cpu); set_except_vector(12, handle_ov); set_except_vector(13, handle_tr); |